diff --git a/.cirrus.yml b/.cirrus.yml index 54458bfd38..1323ce8e95 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -166,19 +166,19 @@ env: # Linux EOL timelines: https://linuxlifecycle.com/ # Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle +fedora40_task: + container: + # Fedora 40 EOL: Around May 2025 + dockerfile: ci/fedora-40/Dockerfile + << : *RESOURCES_TEMPLATE + << : *CI_TEMPLATE + fedora39_task: container: # Fedora 39 EOL: Around Nov 2024 dockerfile: ci/fedora-39/Dockerfile << : *RESOURCES_TEMPLATE << : *CI_TEMPLATE - -fedora38_task: - container: - # Fedora 38 EOL: Around May 2024 - dockerfile: ci/fedora-38/Dockerfile - << : *RESOURCES_TEMPLATE - << : *CI_TEMPLATE << : *SKIP_TASK_ON_PR centosstream9_task: @@ -188,14 +188,6 @@ centosstream9_task: << : *RESOURCES_TEMPLATE << : *CI_TEMPLATE -centosstream8_task: - container: - # Stream 8 EOL: May 31, 2024 - dockerfile: ci/centos-stream-8/Dockerfile - << : *RESOURCES_TEMPLATE - << : *CI_TEMPLATE - << : *SKIP_TASK_ON_PR - debian12_task: container: # Debian 12 (bookworm) EOL: TBD @@ -346,18 +338,15 @@ alpine_task: << : *RESOURCES_TEMPLATE << : *CI_TEMPLATE -# Apple doesn't publish official long-term support timelines. -# We aim to support both the current and previous macOS release. +# Cirrus only supports the following macos runner currently, selecting +# anything else automatically upgrades to this one. +# +# ghcr.io/cirruslabs/macos-runner:sonoma +# +# See also: https://cirrus-ci.org/guide/macOS/ macos_sonoma_task: macos_instance: - image: ghcr.io/cirruslabs/macos-sonoma-xcode:latest - prepare_script: ./ci/macos/prepare.sh - << : *CI_TEMPLATE - << : *MACOS_ENVIRONMENT - -macos_ventura_task: - macos_instance: - image: ghcr.io/cirruslabs/macos-ventura-base:latest + image: ghcr.io/cirruslabs/macos-runner:sonoma prepare_script: ./ci/macos/prepare.sh << : *CI_TEMPLATE << : *MACOS_ENVIRONMENT @@ -397,6 +386,8 @@ asan_sanitizer_task: CXXFLAGS: -DZEEK_DICT_DEBUG ZEEK_CI_CONFIGURE_FLAGS: *ASAN_SANITIZER_CONFIG ASAN_OPTIONS: detect_leaks=1:detect_odr_violation=0 + # Use absolute paths for coverage files. + CCACHE_BASEDIR: ubsan_sanitizer_task: container: diff --git a/.gitmodules b/.gitmodules index ddb2651a18..38e0606337 100644 --- a/.gitmodules +++ b/.gitmodules @@ -75,4 +75,4 @@ url = https://github.com/microsoft/vcpkg [submodule "auxil/prometheus-cpp"] path = auxil/prometheus-cpp - url = https://github.com/jupp0r/prometheus-cpp + url = https://github.com/zeek/prometheus-cpp diff --git a/CHANGES b/CHANGES index d073a31b86..6c209a27f4 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,420 @@ +7.0.0-dev.461 | 2024-07-10 18:45:36 +0200 + + * Extend btest for logging of disabled analyzers (Jan Grashoefer, Corelight) + + * Add logging of disabled analyzers to analyzer.log (Jan Grashoefer, Corelight) + +7.0.0-dev.458 | 2024-07-10 11:05:19 +0200 + + * testing/btest: Default to HILTI_JIT_PARALLELISM=1 (Arne Welzel, Corelight) + + This is a rework of b59bed9d06f2257a6b4f6c6408b8363a277eebac moving + HILTI_JIT_PARALLELISM=1 into btest.cfg to make it default applicable to + btest -j users (and CI). + + The background for this change is that spicyz may spawn up to nproc compiler + instances by default. Combined with btest -j, this may be nproc x nproc + instances worst case. Particularly with gcc, this easily overloads CI or + local systems, putting them into hard-to-recover-from thrashing/OOM states. + + Exporting HILTI_JIT_PARALLELISM in the shell allows overriding. + + * Revert "CI: Use ccache and a single CPU when building spicy analyzers for btests" (Arne Welzel, Corelight) + + This reverts commit b59bed9d06f2257a6b4f6c6408b8363a277eebac. + +7.0.0-dev.454 | 2024-07-09 15:12:19 -0700 + + * CI: Use ccache and a single CPU when building spicy analyzers for btests (Tim Wojtulewicz, Corelight) + +7.0.0-dev.452 | 2024-07-09 14:53:41 -0700 + + * Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight) + +7.0.0-dev.450 | 2024-07-09 10:15:13 -0700 + + * minor script optimization updates to reflect recent changes, Coverity findings (Vern Paxson, Corelight) + +7.0.0-dev.447 | 2024-07-09 17:10:16 +0200 + + * Fix for --display-cmake in configure (cknill) + Moved build directory creation further down in the script so that --display-cmake + has a chance to happen before build tree setup. + +7.0.0-dev.445 | 2024-07-09 00:07:25 -0700 + + * Management framework: bump cluster testsuite to pull in telemetry tests (Christian Kreibich, Corelight) + + * Management framework: bump zeek-client (Christian Kreibich, Corelight) + + * Management framework: augment deployed configs with instance IP addresses (Christian Kreibich, Corelight) + + * Management framework: add auto-enumeration of metrics ports (Christian Kreibich, Corelight) + + * Management framework: propagate metrics port from agent (Christian Kreibich, Corelight) + + * Management framework: add metrics port in management & Supervisor node records (Christian Kreibich, Corelight) + + * Harden the telemetry manager against unset Telemetry::metrics_address (Christian Kreibich, Corelight) + + * Comment-only tweaks for telemetry-related settings. (Christian Kreibich, Corelight) + + * Update submodules [nomail] (Tim Wojtulewicz, Corelight) + +7.0.0-dev.434 | 2024-07-04 17:34:23 +0200 + + * CMakeLists: Ensure Threads::Threads target exists (Arne Welzel, Corelight) + + Fix failure on OSX with CMake 3.30 complaining about missing + Threads::Threads target. + + * ci/macos: Only use sonoma image (Arne Welzel, Corelight) + +7.0.0-dev.431 | 2024-07-04 17:22:42 +0200 + + * Bump auxil/zeek-aux (Benjamin Bannier, Corelight) + +7.0.0-dev.429 | 2024-07-04 15:15:56 +0200 + + * Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight) + +7.0.0-dev.427 | 2024-07-04 13:35:25 +0200 + + * GH-3789: HTTP: Implement FlipRoles() (Arne Welzel, Corelight) + + When Zeek flips roles of a HTTP connection subsequent to the HTTP analyzer + being attached, that analyzer would not update its own ContentLine analyzer + state, resulting in the wrong ContentLine analyzer being switched into + plain delivery mode. + + In debug builds, this would result in assertion failures, in production + builds, the HTTP analyzer would receive HTTP bodies as individual header + lines, or conversely, individual header lines would be delivered as a + large chunk from the ContentLine analyzer. + + PCAPs were generated locally using tcprewrite to select well-known-http ports + for both endpoints, then editcap to drop the first SYN packet. + + Kudos to @JordanBarnartt for keeping at it. + + Closes #3789 + + * ContentLineAnalyzer: Add getter for skip_partial (Arne Welzel, Corelight) + +7.0.0-dev.423 | 2024-07-02 15:35:36 -0700 + + * Remove the Supervisor's internal ClusterEndpoint struct. (Christian Kreibich, Corelight) + + * Provide a script-layer equivalent to Supervisor::__init_cluster(). (Christian Kreibich, Corelight) + +7.0.0-dev.420 | 2024-07-02 14:46:29 -0700 + + * Update NEWS file to cover JSON enhancements (Christian Kreibich, Corelight) + + * Support JSON roundtripping via to_json()/from_json() for patterns (Christian Kreibich, Corelight) + + * Support table deserialization in from_json() (Christian Kreibich, Corelight) + + * Support map-based definition of ports in from_json() (Christian Kreibich, Corelight) + + * Document the field_escape_pattern in the to_json() BiF (Christian Kreibich, Corelight) + +7.0.0-dev.414 | 2024-07-02 19:42:10 +0200 + + * Bump cmake submodule [nomail] (Arne Welzel, Corelight) + +7.0.0-dev.413 | 2024-07-02 14:41:27 +0200 + + * threading/Manager: Warn if threads are added after termination (Arne Welzel, Corelight) + + The core.file-analyzer-violation test showed that it's possible to + create new threads (log writers) when Zeek is in the process of + terminating. This can result in the IO manager's deconstructor + deleting IO sources for threads that are still running. + + This is sort of a scripting issue, so for now log a reporter warning + when it happens to have a bit of a bread-crumb what might be + going on. In the future it might make sense to plug APIs with + zeek_is_terminating(). + + * iosource/Manager: Reap dry sources while computing timeout (Arne Welzel, Corelight) + + Avoids looping over the sources vector twice and should result + in the same behavior. + + * GH-3682: threading/MsgThread: Decouple IO source and thread lifetimes (Arne Welzel, Corelight) + + MsgThread acting as an IO source can result in the situation where the + threading manager's heartbeat timer deletes a finished MsgThread instance, + but at the same time this thread is in the list of ready IO sources the + main loop is currently processing. + + Fix this by decoupling the lifetime of the IO source part and properly + registering as lifetime managed IO sources with the IO manager. + + Fixes #3682 + + * iosource/Manager: Do not manage lifetime of pkt_src (Arne Welzel, Corelight) + + Now that dry sources are properly reaped and freed, an offline packet + source would be deleted once dry, resulting in GetPktSrc() returning + a wild pointer. Don't manage the packet source lifetime and instead + free it during Manager destruction. + + * iosource/Manager: Honor manage_lifetime and dont_count for short-lived IO sources (Arne Welzel, Corelight) + + If an IO source is registered and becomes dry at runtime, the IO + manager would not honor its manage_lifetime or dont_count attribute + during collection, resulting in memory leaks. + + This probably hasn't mattered so far as there's no IO sources registered + in-tree at runtime using manage_lifetime=true. + +7.0.0-dev.407 | 2024-06-26 13:16:10 +0200 + + * coverage/lcov_html: Allow missing coveralls token (Arne Welzel, Corelight) + + This is a fixup for 0cd023b83919fca7bfac55e75a41f724e820fd26 which + currently causes ASAN coverage builds to fail for non-master branches + when due to a missing COVERALLS_REPO_TOKEN. + + Instead of bailing out for non-master branches, pass `--dry-run` to the + coveralls-lcov invocation to test more of the script. + +7.0.0-dev.405 | 2024-06-25 13:35:41 +0200 + + * Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight) + +7.0.0-dev.403 | 2024-06-25 11:29:58 +0200 + + * NEWS: Add entry about FileExtractDir (Arne Welzel, Corelight) + + * Update zeekctl submodule (Arne Welzel, Corelight) + + * zeek-testing-private: Update baseline, after merge (Arne Welzel, Corelight) + +7.0.0-dev.399 | 2024-06-24 11:19:14 +0200 + + * script optimization for record operations sourced (in part) from other records (Vern Paxson, Corelight) + +7.0.0-dev.397 | 2024-06-20 18:16:51 -0700 + + * zeek-testing-private: Update baseline (Arne Welzel, Corelight) + + * ssh: Revert half-duplex robustness (Arne Welzel, Corelight) + + This reverts part of commit a0888b7e36308d241f4c62b42715a94d499aab23 due + to inhibiting analyzer violations when parsing non SSH traffic when + the &restofdata path is entered. + + See additional discussion here: + https://github.com/zeek/zeek/pull/3781 + https://github.com/zeek/zeek/pull/3720/files#r1636314375 + +7.0.0-dev.394 | 2024-06-20 17:28:11 -0700 + + * Add "zeek/" prefixes to includes in spicy/runtime-support.h (Michael Dopheide) + +7.0.0-dev.392 | 2024-06-20 15:51:42 +0200 + + * Spicy: Provide runtime API to access Zeek-side globals. (Robin Sommer, Corelight) + + This allows to read Zeek global variables from inside Spicy code. The + main challenge here is supporting all of Zeek's data type in a + type-safe manner. + + The most straight-forward API is a set of functions + `get_()`, where `` is the Zeek-side type + name (e.g., `count`, `string`, `bool`) and `` is the fully scoped + name of the Zeek-side global (e.g., `MyModule::Boolean`). These + functions then return the corresponding Zeek value, converted in an + appropriate Spicy type. Example: + + Zeek: + module Foo; + + const x: count = 42; + const y: string = "xxx"; + + Spicy: + import zeek; + + assert zeek::get_count("Foo::x") == 42; + assert zeek::get_string("Foo::y") == b"xxx"; # returns bytes(!) + + For container types, the `get_*` function returns an opaque types that + can be used to access the containers' values. An additional set of + functions `as_` allows converting opaque values of atomic + types to Spicy equivalents. Example: + + Zeek: + module Foo; + + const s: set[count] = { 1, 2 }; + const t: table[count] of string = { [1] = "One", [2] = "Two" } + + Spicy: + + # Check set membership. + local set_ = zeek::get_set("Foo::s"); + assert zeek::set_contains(set_, 1) == True + + # Look up table element. + local table_ = zeek::get_table("Foo::t"); + local value = zeek::table_lookup(t, 1); + assert zeek::as_string(value) == b"One" + + There are also functions for accessing elements of Zeek-side vectors + and records. + + If any of these `zeek::*` conversion functions fails (e.g., due to a + global of that name not existing), it will throw an exception. + + The documentation has more information on all of this. + + Design considerations: + + - We support only reading Zeek variables, not writing. This is + both to simplify the API, and also conceptually to avoid + offering backdoors into Zeek state that could end up with a very + tight coupling of Spicy and Zeek code. + + - We accept that a single access might be relatively slow due to + name lookup and data conversion. This is primarily meant for + configuration-style data, not for transferring lots of dynamic + state over. + + - In that spirit, we don't support deep-copying complex data types + from Zeek over to Spicy. This is (1) to avoid performance + problems when accidentally copying large containers over, + potentially even at every access; and (2) to avoid the two sides + getting out of sync if one ends up modifying a container without + the other being able to see it. + + * Spicy: Reformat `zeek.spicy` with `spicy-format`. (Robin Sommer, Corelight) + + * Spicy: Extend exception hierarchy. (Robin Sommer, Corelight) + + We move the current `TypeMismatch` into a new `ParameterMismatch` + exception that's derived from a more general `TypeMismatch` now that + can also be used for other, non-parameter mismatches. + +7.0.0-dev.387 | 2024-06-18 10:52:25 +0200 + + * CMakeLists: Disable -Werror for 3rdparty/sqlite3.c (Arne Welzel, Corelight) + + We package vanilla sqlite from upstream and on Fedora 40 with sqlite 3.46 + there's the following compiler warning: + + In function 'sqlite3Strlen30', + inlined from 'sqlite3ColumnSetColl' at + ../../src/3rdparty/sqlite3.c:122105:10: + ../../src/3rdparty/sqlite3.c:35003:28: error: 'strlen' reading 1 or more bytes from a region of size 0 [-Werror=stringop-overread] + 35003 | return 0x3fffffff & (int)strlen(z); + | ^~~~~~~~~ + In function 'sqlite3ColumnSetColl': + + Disabling -Werror on sqlite3.c seems sensible given we have little + control over that code. + + * Bump zeek-3rdparty to pull in sqlite move to 3.46 (Christian Kreibich, Corelight) + + This avoids a compiler warning/error on Fedora 40. + + * CI: drop Fedora 38, add 40 (Christian Kreibich, Corelight) + +7.0.0-dev.383 | 2024-06-14 13:50:05 +0200 + + * GH-3783: Spicy: Disallow repeating replacements of the same analyzer. (Robin Sommer, Corelight) + + We now reject EVT files that attempt to replace the same built-in + analyzer multiple times as doing so would be ill-defined and not very + intuitive in what exactly it means. + + * Bump Spicy. (Robin Sommer, Corelight) + +7.0.0-dev.380 | 2024-06-11 15:11:43 +0200 + + * Drop EOL centos8-stream in CI (Benjamin Bannier, Corelight) + +7.0.0-dev.378 | 2024-06-11 12:01:03 +0200 + + * Suppress a known data race during civetweb shutdown (Tim Wojtulewicz) + +7.0.0-dev.376 | 2024-06-11 11:00:46 +0200 + + * Bump cmake for -fprofile-update=atomic usage (Arne Welzel, Corelight) + + * cirrus: Unset CCACHE_BASEDIR for asan/coverage build (Arne Welzel, Corelight) + + When CCACHE_BASEDIR is set, ccache will rewrite absolute paths to + relative paths in order to allow compilation in different source + directories. We do not need this feature on Cirrus (the checkout + is always in /zeek) and using absolute paths avoids + confusion/normalization needs for the gcov -p results. + + We could consider removing the global CCACHE_BASEDIR, but it'd + bust the ccache of every other task, too. + +7.0.0-dev.372 | 2024-06-07 09:28:52 -0700 + + * Add Telemetry::metrics_address option (Tim Wojtulewicz, Corelight) + +7.0.0-dev.368 | 2024-06-06 18:08:29 -0700 + + * Change prometheus test to check for require jq (Tim Wojtulewicz, Corelight) + +7.0.0-dev.366 | 2024-06-06 18:06:42 -0700 + + * Check for 'zeekctl check' before trying to start up prometheus (Tim Wojtulewicz, Corelight) + +7.0.0-dev.362 | 2024-06-04 14:16:54 -0700 + + * Switch to zeek fork of prometheus-cpp (Tim Wojtulewicz, Corelight) + + * Remove unnecessary shared_from_this on instrument classes (Tim Wojtulewicz, Corelight) + + * Restore label_names field in MetricOpts record (Tim Wojtulewicz) + + * Change how we count FDs on Linux to fix zeekctl stop issues (Tim Wojtulewicz) + + * Update zeekctl tests for telemetry rework (Tim Wojtulewicz, Corelight) + + * Use forward declarations of prometheus-cpp types in telemetry::Manager (Tim Wojtulewicz, Corelight) + + * Add prometheus-cpp files to install set for plugins to use (Tim Wojtulewicz, Corelight) + + * Fix a memory leak with the CivetWeb callbacks in telemetry (Tim Wojtulewicz, Corelight) + + * Fix a bunch of copy-instead-of-move findings from Coverity (Tim Wojtulewicz, Corelight) + + * Move telmetry label names out of opts records, into main metric records (Tim Wojtulewicz, Corelight) + + * Ensure the order of label values matches the label names (Tim Wojtulewicz, Corelight) + + * Remove prefix column from telemetry.log (Tim Wojtulewicz, Corelight) + + * Fix race condition by pre-building the cluster json data for services.json (Tim Wojtulewicz) + + * Set running_under_test for scripts.base.frameworks.logging.telemetry test (Tim Wojtulewicz, Corelight) + +7.0.0-dev.347 | 2024-06-04 11:36:13 -0700 + + * Update reporter.bif to describe special case of errors in init (Smoot) + + Originally proposed in zeek/zeek-docs#257, but reverted via + 9f9ebde62380a3012a1471d9ff1c1c91c7aa69da. + +7.0.0-dev.345 | 2024-06-04 10:36:46 -0700 + + * script optimization baseline tweaks due to recent minor changes (Vern Paxson, Corelight) + + * updated list of BiFs for script optimization (Vern Paxson, Corelight) + + * addressed some Coverity nits (Vern Paxson, Corelight) + + * improved error cascade for invalid attributes (Vern Paxson, Corelight) + 7.0.0-dev.338 | 2024-05-31 14:18:15 -0700 * Add type aliases for instrument and family shared_ptrs (Tim Wojtulewicz, Corelight) diff --git a/CMakeLists.txt b/CMakeLists.txt index 60de8b05fc..c4c7aa9990 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -306,6 +306,9 @@ function (zeek_add_dependencies dep) endforeach () endfunction () +# Used by library zeek_dynamic_plugin_base and for sanitizer builds. +find_package(Threads REQUIRED) + # Interface library for propagating extra flags and include paths to dynamically # loaded plugins. Also propagates include paths and C++17 mode on the install # interface. @@ -338,15 +341,20 @@ add_zeek_dynamic_plugin_build_interface_include_directories( ${PROJECT_SOURCE_DIR}/auxil/broker/libbroker ${PROJECT_SOURCE_DIR}/auxil/paraglob/include ${PROJECT_SOURCE_DIR}/auxil/rapidjson/include + ${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include ${CMAKE_BINARY_DIR}/src ${CMAKE_BINARY_DIR}/src/include ${CMAKE_BINARY_DIR}/auxil/binpac/lib - ${CMAKE_BINARY_DIR}/auxil/broker/libbroker) + ${CMAKE_BINARY_DIR}/auxil/broker/libbroker + ${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include) # threading/formatters/JSON.h includes rapidjson headers and may be used # by external plugins, extend the include path. target_include_directories(zeek_dynamic_plugin_base SYSTEM INTERFACE $) +target_include_directories( + zeek_dynamic_plugin_base SYSTEM + INTERFACE $) # Convenience function for adding an OBJECT library that feeds directly into the # main target(s). @@ -635,11 +643,6 @@ if (NOT BINARY_PACKAGING_MODE) endif () if (ZEEK_SANITIZERS) - # Check the thread library info early as setting compiler flags seems to - # interfere with the detection and cause CMAKE_THREAD_LIBS_INIT to not include - # -lpthread when it should. - find_package(Threads) - string(REPLACE "," " " _sanitizer_args "${ZEEK_SANITIZERS}") separate_arguments(_sanitizer_args) set(ZEEK_SANITIZERS "") @@ -1013,6 +1016,12 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/rapidjson/include/rapidjson install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc DESTINATION include/zeek/3rdparty/) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/prometheus-cpp/core/include/prometheus + DESTINATION include/zeek/3rdparty/prometheus-cpp/include) + +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxil/prometheus-cpp/core/include/prometheus + DESTINATION include/zeek/3rdparty/prometheus-cpp/include) + # Create 3rdparty/ghc within the build directory so that the include for # "zeek/3rdparty/ghc/filesystem.hpp" works within the build tree. execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory diff --git a/NEWS b/NEWS index f5ae0c5de3..e526919b09 100644 --- a/NEWS +++ b/NEWS @@ -51,6 +51,12 @@ Breaking Changes instruments are not. ``Histogram`` instruments don't have the concept of summing. +- Zeekctl now sets `FileExtract::prefix` to `spool/extract_files/` to avoid + deletion of extracted files when stopping worker nodes. To revert to the + previous behavior, set `FileExtractDir` to an empty string in `zeekctl.cfg`. + + If you never enabled Zeek's file extraction functionality, there's no impact. + New Functionality ----------------- @@ -101,6 +107,14 @@ New Functionality Use ``Analyzer::get_tag()`` if you need to obtain an analyzer's tag from its name (such as "HTTP"). +- The ``from_json()`` function now supports ingesting JSON representations of + tables as produced by the ``to_json()`` function. It now also supports reading + the object-based representation of ports that ``to_json()`` generates for that + Zeek type. + +- The ``analyzer.log`` now optionally supports logging of disabled analyzers + through the new option ``Analyzer::logging::include_disabling``. + Changed Functionality --------------------- @@ -140,6 +154,10 @@ Changed Functionality it aligns with the same requirement for traditional analyzers and enables customizing file handles for protocol-specific semantics. +- The Supervisor's API now returns NodeConfig records with a cluster table whose + ClusterEndpoints have a port value of 0/unknown, rather than 0/tcp, to + indicate that the node in question has no listening port. + Removed Functionality --------------------- diff --git a/VERSION b/VERSION index d6ac92c7a9..e7572f9d9b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -7.0.0-dev.338 +7.0.0-dev.461 diff --git a/auxil/bifcl b/auxil/bifcl index 22c0317628..fd83a78984 160000 --- a/auxil/bifcl +++ b/auxil/bifcl @@ -1 +1 @@ -Subproject commit 22c031762832b72c2f7b4ac8bbe8102d66b09ccc +Subproject commit fd83a789848b485c81f28b8a6af23d28eca7b3c7 diff --git a/auxil/binpac b/auxil/binpac index 822cdb551b..7db629d4e2 160000 --- a/auxil/binpac +++ b/auxil/binpac @@ -1 +1 @@ -Subproject commit 822cdb551bf6c2e7c18f16c7f088c61675ae588b +Subproject commit 7db629d4e2f8128e3e27aa28200106fa6d553be0 diff --git a/auxil/broker b/auxil/broker index c529c38de3..c47de11e4b 160000 --- a/auxil/broker +++ b/auxil/broker @@ -1 +1 @@ -Subproject commit c529c38de3d540953e799a83c44683a3413a1a14 +Subproject commit c47de11e4b84f24e8b501c3b1a446ad808e4964a diff --git a/auxil/btest b/auxil/btest index 46f982cd6f..989c7513c3 160000 --- a/auxil/btest +++ b/auxil/btest @@ -1 +1 @@ -Subproject commit 46f982cd6fafd34639c2f97628a57f1457f7e56a +Subproject commit 989c7513c3b6056a429a5d48dacdc9a2c1b216a7 diff --git a/auxil/gen-zam b/auxil/gen-zam index 376de10133..396723c04b 160000 --- a/auxil/gen-zam +++ b/auxil/gen-zam @@ -1 +1 @@ -Subproject commit 376de10133c100948a2875258d11ab97b361467c +Subproject commit 396723c04ba1f8f2f75555745a503b8edf353ff6 diff --git a/auxil/package-manager b/auxil/package-manager index ba0354c84f..bdc15fab95 160000 --- a/auxil/package-manager +++ b/auxil/package-manager @@ -1 +1 @@ -Subproject commit ba0354c84f8afb7804afe7d673081edfa712ad5c +Subproject commit bdc15fab95b1ca2bd370fa25d91f7879b5da35fc diff --git a/auxil/prometheus-cpp b/auxil/prometheus-cpp index cdb357ad55..2fec7205d1 160000 --- a/auxil/prometheus-cpp +++ b/auxil/prometheus-cpp @@ -1 +1 @@ -Subproject commit cdb357ad556c9ba96cbfa90fed2940fedf101673 +Subproject commit 2fec7205d1a9cb4829b86c943d599696d53de85c diff --git a/auxil/spicy b/auxil/spicy index 83bc845b8d..6581b1855a 160000 --- a/auxil/spicy +++ b/auxil/spicy @@ -1 +1 @@ -Subproject commit 83bc845b8daf82fa22b783261d4c339627d55c09 +Subproject commit 6581b1855a5ea8cc102c66b4ac6a431fc67484a0 diff --git a/auxil/zeek-aux b/auxil/zeek-aux index 338ece9314..1478f2ee55 160000 --- a/auxil/zeek-aux +++ b/auxil/zeek-aux @@ -1 +1 @@ -Subproject commit 338ece93146c594f497f5fd370fd5dde23186d2e +Subproject commit 1478f2ee550a0f99f5b93975c17ae814ebe515b7 diff --git a/auxil/zeek-client b/auxil/zeek-client index 6c8cb3e1c4..5bcc140851 160000 --- a/auxil/zeek-client +++ b/auxil/zeek-client @@ -1 +1 @@ -Subproject commit 6c8cb3e1c475424880eae968f812805fdbd95cea +Subproject commit 5bcc14085178ed4ddfa9ad972b441c36e8bc0787 diff --git a/auxil/zeekctl b/auxil/zeekctl index 4dad935e9c..7671450f34 160000 --- a/auxil/zeekctl +++ b/auxil/zeekctl @@ -1 +1 @@ -Subproject commit 4dad935e9c995b7ae2f0a4e7677892fcfb988cf0 +Subproject commit 7671450f34c65259463b4fd651a18df3935f235c diff --git a/ci/centos-stream-8/Dockerfile b/ci/centos-stream-8/Dockerfile deleted file mode 100644 index d8e7322c11..0000000000 --- a/ci/centos-stream-8/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM quay.io/centos/centos:stream8 - -# A version field to invalidate Cirrus's build cache when needed, as suggested in -# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822 -ENV DOCKERFILE_VERSION 20230801 - -RUN dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -RUN dnf config-manager --set-enabled powertools - -RUN dnf -y install \ - bison \ - ccache \ - cmake \ - diffutils \ - flex \ - gcc \ - gcc-c++ \ - git \ - jq \ - libpcap-devel \ - make \ - openssl \ - openssl-devel \ - procps-ng \ - python38 \ - python38-devel \ - python38-pip\ - sqlite \ - swig \ - which \ - zlib-devel \ - && dnf clean all && rm -rf /var/cache/dnf - -RUN pip3 install websockets junit2html diff --git a/ci/fedora-38/Dockerfile b/ci/fedora-40/Dockerfile similarity index 88% rename from ci/fedora-38/Dockerfile rename to ci/fedora-40/Dockerfile index 5ed4573ac6..f292e11ece 100644 --- a/ci/fedora-38/Dockerfile +++ b/ci/fedora-40/Dockerfile @@ -1,15 +1,14 @@ -FROM fedora:38 +FROM fedora:40 # A version field to invalidate Cirrus's build cache when needed, as suggested in # https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822 -ENV DOCKERFILE_VERSION 20230801 +ENV DOCKERFILE_VERSION 20240617 RUN dnf -y install \ bison \ ccache \ cmake \ diffutils \ - findutils \ flex \ gcc \ gcc-c++ \ @@ -21,7 +20,6 @@ RUN dnf -y install \ openssl \ openssl-devel \ procps-ng \ - python3 \ python3-devel \ python3-pip\ sqlite \ diff --git a/ci/tsan_suppressions.txt b/ci/tsan_suppressions.txt index ded78d13ac..7490adda94 100644 --- a/ci/tsan_suppressions.txt +++ b/ci/tsan_suppressions.txt @@ -42,3 +42,7 @@ race:zeek::threading::InputMessage::Object mutex:zeek::threading::Queue::Put mutex:zeek::threading::Queue::LocksForAllQueues deadlock:zeek::threading::Queue::LocksForAllQueues + +# This only happens at shutdown. It was supposedly fixed in civetweb, but has cropped +# up again. See https://github.com/civetweb/civetweb/issues/861 for details. +race:mg_stop diff --git a/cmake b/cmake index 34cf738d60..db0d52761f 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 34cf738d60a95d5ecf001de2c6e259578be4fc56 +Subproject commit db0d52761f38f3602060da36adc1afff608730c1 diff --git a/configure b/configure index 3130214e08..c38acf5980 100755 --- a/configure +++ b/configure @@ -462,6 +462,19 @@ if [ -z "$CMakeCommand" ]; then fi fi +echo "Using $(cmake --version | head -1)" +echo +if [ -n "$CMakeGenerator" ]; then + cmake="${CMakeCommand} -G ${CMakeGenerator} ${CMakeCacheEntries} ${sourcedir}" +else + cmake="${CMakeCommand} ${CMakeCacheEntries} ${sourcedir}" +fi + +if [ "${display_cmake}" = 1 ]; then + echo "${cmake}" + exit 0 +fi + if [ -d $builddir ]; then # If build directory exists, check if it has a CMake cache if [ -f $builddir/CMakeCache.txt ]; then @@ -478,19 +491,6 @@ echo "Build Directory : $builddir" echo "Source Directory: $sourcedir" cd $builddir -echo "Using $(cmake --version | head -1)" -echo -if [ -n "$CMakeGenerator" ]; then - cmake="${CMakeCommand} -G ${CMakeGenerator} ${CMakeCacheEntries} ${sourcedir}" -else - cmake="${CMakeCommand} ${CMakeCacheEntries} ${sourcedir}" -fi - -if [ "${display_cmake}" = 1 ]; then - echo "${cmake}" - exit 0 -fi - eval ${cmake} 2>&1 echo "# This is the command used to configure this build" >config.status diff --git a/doc b/doc index a369cee890..f65820ff0f 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit a369cee890d88a106216915c0202ddb581e39974 +Subproject commit f65820ff0faf2887799fe691a443b5db39eeed54 diff --git a/scripts/base/frameworks/analyzer/logging.zeek b/scripts/base/frameworks/analyzer/logging.zeek index dc3a611f4d..cde62315c2 100644 --- a/scripts/base/frameworks/analyzer/logging.zeek +++ b/scripts/base/frameworks/analyzer/logging.zeek @@ -53,6 +53,12 @@ export { ## service field. option include_confirmations = F; + ## Enable tracking of analyzers getting disabled. This is mostly + ## interesting for troubleshooting of analyzers in DPD scenarios. + ## Setting this option may also generated multiple log entries per + ## connection. + option include_disabling = F; + ## If a violation contains information about the data causing it, ## include at most this many bytes of it in the log. option failure_data_max_size = 40; @@ -88,11 +94,24 @@ event zeek_init() &priority=5 Option::set_change_handler("Analyzer::Logging::include_confirmations", include_confirmations_handler); + local include_disabling_handler = function(id: string, new_value: bool): bool { + if ( new_value ) + enable_event_group("Analyzer::Logging::include_disabling"); + else + disable_event_group("Analyzer::Logging::include_disabling"); + + return new_value; + }; + Option::set_change_handler("Analyzer::Logging::include_disabling", + include_disabling_handler); + # Call the handlers directly with the current values to avoid config # framework interactions like creating entries in config.log. enable_handler("Analyzer::Logging::enable", Analyzer::Logging::enable); include_confirmations_handler("Analyzer::Logging::include_confirmations", Analyzer::Logging::include_confirmations); + include_disabling_handler("Analyzer::Logging::include_disabling", + Analyzer::Logging::include_disabling); } @@ -119,7 +138,7 @@ function populate_from_file(rec: Info, f: fa_file) { rec$fuid = f$id; # If the confirmation didn't have a connection, but the - # fa_file object has has exactly one, use it. + # fa_file object has exactly one, use it. if ( ! rec?$uid && f?$conns && |f$conns| == 1 ) { for ( _, c in f$conns ) @@ -151,7 +170,7 @@ event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirm Log::write(LOG, rec); } -event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) +event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=6 { if ( atype in ignore_analyzers ) return; @@ -180,3 +199,25 @@ event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationI Log::write(LOG, rec); } + +hook Analyzer::disabling_analyzer(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=-1000 &group="Analyzer::Logging::include_disabling" + { + if ( atype in ignore_analyzers ) + return; + + local rec = Info( + $ts=network_time(), + $cause="disabled", + $analyzer_kind=analyzer_kind(atype), + $analyzer_name=Analyzer::name(atype), + ); + + populate_from_conn(rec, c); + + if ( c?$dpd_state && aid in c$dpd_state$violations ) + { + rec$failure_data = fmt("Disabled after %d violations", c$dpd_state$violations[aid]); + } + + Log::write(LOG, rec); + } diff --git a/scripts/base/frameworks/cluster/__load__.zeek b/scripts/base/frameworks/cluster/__load__.zeek index 47918e7d0d..a854302636 100644 --- a/scripts/base/frameworks/cluster/__load__.zeek +++ b/scripts/base/frameworks/cluster/__load__.zeek @@ -14,14 +14,21 @@ redef Broker::log_topic = Cluster::rr_log_topic; # Add a cluster prefix. @prefixes += cluster -# If this script isn't found anywhere, the cluster bombs out. -# Loading the cluster framework requires that a script by this name exists -# somewhere in the ZEEKPATH. The only thing in the file should be the -# cluster definition in the :zeek:id:`Cluster::nodes` variable. +@if ( Supervisor::is_supervised() ) +# When running a supervised cluster, populate Cluster::nodes from the node table +# the Supervisor provides to new Zeek nodes. The management framework configures +# the cluster this way. +@load ./supervisor +@if ( Cluster::Supervisor::__init_cluster_nodes() && Cluster::get_node_count(Cluster::LOGGER) > 0 ) +redef Cluster::manager_is_logger = F; +@endif +@endif -@if ( ! Supervisor::__init_cluster() ) -# When running a supervised cluster, Cluster::nodes is instead populated -# from the internal C++-layer directly via the above BIF. +@if ( |Cluster::nodes| == 0 ) +# Fall back to loading a cluster topology from cluster-layout.zeek. If Zeek +# cannot find this script in your ZEEKPATH, it will exit. The script should only +# contain the cluster definition in the :zeek:id:`Cluster::nodes` variable. +# The zeekctl tool manages this file for you. @load cluster-layout @endif diff --git a/scripts/base/frameworks/cluster/supervisor.zeek b/scripts/base/frameworks/cluster/supervisor.zeek new file mode 100644 index 0000000000..ba0d676c6a --- /dev/null +++ b/scripts/base/frameworks/cluster/supervisor.zeek @@ -0,0 +1,59 @@ +##! Cluster-related functionality specific to running under the Supervisor +##! framework. + +@load base/frameworks/supervisor/api + +module Cluster::Supervisor; + +export { + ## Populates the current node's :zeek:id:`Cluster::nodes` table from the + ## supervisor's node configuration in :zeek:id:`Supervisor::NodeConfig`. + ## + ## Returns: true if initialization completed, false otherwise. + global __init_cluster_nodes: function(): bool; +} + +function __init_cluster_nodes(): bool + { + local config = Supervisor::node(); + + if ( |config$cluster| == 0 ) + return F; + + local rolemap: table[Supervisor::ClusterRole] of Cluster::NodeType = { + [Supervisor::LOGGER] = Cluster::LOGGER, + [Supervisor::MANAGER] = Cluster::MANAGER, + [Supervisor::PROXY] = Cluster::PROXY, + [Supervisor::WORKER] = Cluster::WORKER, + }; + + local manager_name = ""; + local cnode: Cluster::Node; + local typ: Cluster::NodeType = Cluster::NONE; + + for ( node_name, endp in config$cluster ) + { + if ( endp$role == Supervisor::MANAGER ) + manager_name = node_name; + } + + for ( node_name, endp in config$cluster ) + { + if ( endp$role in rolemap ) + typ = rolemap[endp$role]; + + cnode = [$node_type=typ, $ip=endp$host, $p=endp$p]; +@pragma push ignore-deprecations + if ( endp?$interface ) + cnode$interface = endp$interface; +@pragma pop ignore-deprecations + if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER ) + cnode$manager = manager_name; + if ( endp?$metrics_port ) + cnode$metrics_port = endp$metrics_port; + + Cluster::nodes[node_name] = cnode; + } + + return T; + } diff --git a/scripts/base/frameworks/supervisor/api.zeek b/scripts/base/frameworks/supervisor/api.zeek index 97a286f8c9..e3a6d06c9b 100644 --- a/scripts/base/frameworks/supervisor/api.zeek +++ b/scripts/base/frameworks/supervisor/api.zeek @@ -30,6 +30,8 @@ export { ## The PCAP file name from which the node will read/analyze packets. ## Typically used by worker nodes. pcap_file: string &optional; + ## The TCP port at which the cluster node exposes metrics for Prometheus. + metrics_port: port &optional; }; ## Configuration options that influence behavior of a supervised Zeek node. diff --git a/scripts/base/frameworks/telemetry/main.zeek b/scripts/base/frameworks/telemetry/main.zeek index 59e480a125..d71a7d9783 100644 --- a/scripts/base/frameworks/telemetry/main.zeek +++ b/scripts/base/frameworks/telemetry/main.zeek @@ -295,11 +295,11 @@ function register_counter_family(opts: MetricOpts): CounterFamily local f = Telemetry::__counter_family( opts$prefix, opts$name, - opts$labels, + opts$label_names, opts$help_text, opts$unit ); - return CounterFamily($__family=f, $__labels=opts$labels); + return CounterFamily($__family=f, $__labels=opts$label_names); } # Fallback Counter returned when there are issues with the labels. @@ -354,11 +354,11 @@ function register_gauge_family(opts: MetricOpts): GaugeFamily local f = Telemetry::__gauge_family( opts$prefix, opts$name, - opts$labels, + opts$label_names, opts$help_text, opts$unit ); - return GaugeFamily($__family=f, $__labels=opts$labels); + return GaugeFamily($__family=f, $__labels=opts$label_names); } # Fallback Gauge returned when there are issues with the label usage. @@ -422,12 +422,12 @@ function register_histogram_family(opts: MetricOpts): HistogramFamily local f = Telemetry::__histogram_family( opts$prefix, opts$name, - opts$labels, + opts$label_names, opts$bounds, opts$help_text, opts$unit ); - return HistogramFamily($__family=f, $__labels=opts$labels); + return HistogramFamily($__family=f, $__labels=opts$label_names); } # Fallback Histogram when there are issues with the labels. @@ -484,8 +484,8 @@ global version_gauge_family = Telemetry::register_gauge_family([ $name="version_info", $unit="", $help_text="The Zeek version", - $labels=vector("version_number", "major", "minor", "patch", "commit", - "beta", "debug","version_string") + $label_names=vector("version_number", "major", "minor", "patch", "commit", + "beta", "debug","version_string") ]); event zeek_init() diff --git a/scripts/base/frameworks/telemetry/options.zeek b/scripts/base/frameworks/telemetry/options.zeek index f26d8a0ef0..6aa05f5f9b 100644 --- a/scripts/base/frameworks/telemetry/options.zeek +++ b/scripts/base/frameworks/telemetry/options.zeek @@ -6,8 +6,12 @@ module Telemetry; # to see the options without needing the rest. export { - ## Port used to make metric data available to Prometheus scrapers via + ## Address used to make metric data available to Prometheus scrapers via ## HTTP. + const metrics_address = getenv("ZEEK_DEFAULT_LISTEN_ADDRESS") &redef; + + ## Port used to make metric data available to Prometheus scrapers via + ## HTTP. The default value means Zeek won't expose the port. const metrics_port = 0/unknown &redef; ## ID for the metrics exporter. This is used as the 'endpoint' label diff --git a/scripts/base/init-bare.zeek b/scripts/base/init-bare.zeek index dd47c55aad..30b49def26 100644 --- a/scripts/base/init-bare.zeek +++ b/scripts/base/init-bare.zeek @@ -5807,8 +5807,12 @@ export { ## label values have to be provided. Examples of a label might ## be the protocol a general observation applies to, the ## directionality in a traffic flow, or protocol-specific - ## context like a particular message type. - labels: vector of string &default=vector(); + ## context like a particular message type. This field is only + ## used in the construction of new metrics and will not be + ## filled in when returned from + ## :zeek:see:`Telemetry::collect_metrics` or + ## :zeek:see:`Telemetry::collect_histogram_metrics`, + label_names: vector of string &default=vector(); ## Whether the metric represents something that is accumulating. ## Defaults to ``T`` for counters and ``F`` for gauges and @@ -5832,8 +5836,16 @@ export { ## A :zeek:see:`Telemetry::MetricOpts` record describing this metric. opts: MetricOpts; + ## The label names (also called dimensions) of the metric. When + ## instantiating or working with concrete metrics, corresponding + ## label values have to be provided. Examples of a label might + ## be the protocol a general observation applies to, the + ## directionality in a traffic flow, or protocol-specific + ## context like a particular message type. + label_names: vector of string &default=vector(); + ## The label values associated with this metric, if any. - labels: vector of string; + label_values: vector of string &optional; ## The value of gauge or counter cast to a double ## independent of the underlying data type. @@ -5847,8 +5859,16 @@ export { ## A :zeek:see:`Telemetry::MetricOpts` record describing this histogram. opts: MetricOpts; - ## The label values associated with this histogram, if any. - labels: vector of string; + ## The label names (also called dimensions) of the metric. When + ## instantiating or working with concrete metrics, corresponding + ## label values have to be provided. Examples of a label might + ## be the protocol a general observation applies to, the + ## directionality in a traffic flow, or protocol-specific + ## context like a particular message type. + label_names: vector of string &default=vector(); + + ## The label values associated with this metric, if any. + label_values: vector of string &optional; ## Individual counters for each of the buckets as ## described by the *bounds* field in *opts*; diff --git a/scripts/policy/frameworks/management/agent/main.zeek b/scripts/policy/frameworks/management/agent/main.zeek index 6397313eab..81ffe97252 100644 --- a/scripts/policy/frameworks/management/agent/main.zeek +++ b/scripts/policy/frameworks/management/agent/main.zeek @@ -520,6 +520,8 @@ function deploy_request_finish(areq: Management::Request::Request) if ( node?$interface ) cep$interface = node$interface; + if ( node?$metrics_port ) + cep$metrics_port = node$metrics_port; g_cluster[node$name] = cep; } @@ -618,18 +620,21 @@ function get_nodes_request_finish(areq: Management::Request::Request) # the respective boot.zeek scripts). if ( node in sns$node$cluster ) { - cns$cluster_role = sns$node$cluster[node]$role; + local cep: Supervisor::ClusterEndpoint = sns$node$cluster[node]; + cns$cluster_role = cep$role; # For cluster nodes, copy run state from g_nodes, our # live node status table. if ( node in g_nodes ) cns$state = g_nodes[node]$state; - # The supervisor's responses use 0/tcp (not 0/unknown) - # when indicating an unused port because its internal - # serialization always assumes TCP. - if ( sns$node$cluster[node]$p != 0/tcp ) - cns$p = sns$node$cluster[node]$p; + # The supervisor's responses use 0/unknown to indicate + # unused ports. (Prior to Zeek 7 this used to be 0/tcp.) + if ( cep$p != 0/unknown ) + cns$p = cep$p; + + if ( cep?$metrics_port ) + cns$metrics_port = cep$metrics_port; } else { diff --git a/scripts/policy/frameworks/management/controller/config.zeek b/scripts/policy/frameworks/management/controller/config.zeek index 9f96155d9f..0b3d50b4ab 100644 --- a/scripts/policy/frameworks/management/controller/config.zeek +++ b/scripts/policy/frameworks/management/controller/config.zeek @@ -61,16 +61,27 @@ export { ## for websocket clients. const default_port_websocket = 2149/tcp &redef; - ## Whether the controller should auto-assign listening ports to cluster - ## nodes that need them and don't have them explicitly specified in - ## cluster configurations. - const auto_assign_ports = T &redef; + ## Whether the controller should auto-assign Broker listening ports to + ## cluster nodes that need them and don't have them explicitly specified + ## in cluster configurations. + const auto_assign_broker_ports = T &redef; + const auto_assign_ports = T &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_ports."; ## The TCP start port to use for auto-assigning cluster node listening - ## ports, if :zeek:see:`Management::Controller::auto_assign_ports` is - ## enabled (the default) and the provided configurations don't have - ## ports assigned. - const auto_assign_start_port = 2200/tcp &redef; + ## ports, if :zeek:see:`Management::Controller::auto_assign_broker_ports` is + ## enabled (the default) and nodes don't come with those ports assigned. + const auto_assign_broker_start_port = 2200/tcp &redef; + const auto_assign_start_port = 2200/tcp &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_start_port."; + + ## Whether the controller should auto-assign metrics ports for Prometheus + ## to nodes that need them and don't have them explicitly specified in + ## their cluster configurations. + const auto_assign_metrics_ports = T &redef; + + ## The TCP start port to use for auto-assigning metrics exposition ports + ## for Prometheus, if :zeek:see:`Management::Controller::auto_assign_metrics_ports` + ## is enabled (the default). + const auto_assign_metrics_start_port = 9000/tcp &redef; ## The controller's Broker topic. Clients send requests to this topic. const topic = "zeek/management/controller" &redef; diff --git a/scripts/policy/frameworks/management/controller/main.zeek b/scripts/policy/frameworks/management/controller/main.zeek index 1cfd5e6880..fd7df8343d 100644 --- a/scripts/policy/frameworks/management/controller/main.zeek +++ b/scripts/policy/frameworks/management/controller/main.zeek @@ -116,14 +116,18 @@ global config_deploy_to_agents: function(config: Management::Configuration, req: Management::Request::Request); # Returns list of names of nodes in the given configuration that require a -# listening port. Returns empty list if the config has no such nodes. -global config_nodes_lacking_ports: function(config: Management::Configuration): vector of string; +# Broker listening port. Returns empty list if the config has no such nodes. +global config_nodes_lacking_broker_ports: function(config: Management::Configuration): vector of string; # Assign node listening ports in the given configuration by counting up from -# Management::Controller::auto_assign_start_port. Scans the included nodes and -# fills in ports for any non-worker cluster node that doesn't have an existing -# port. This assumes those ports are actually available on the instances. -global config_assign_ports: function(config: Management::Configuration); +# Management::Controller::auto_assign_broker_start_port. Scans the included +# nodes and fills in ports for any non-worker cluster node that doesn't have an +# existing port. This assumes those ports are actually available on the +# instances. +global config_assign_broker_ports: function(config: Management::Configuration); + +# Assign node metrics ports, similar to config_assign_broker_ports above. +global config_assign_metrics_ports: function(config: Management::Configuration); # Validate the given configuration in terms of missing, incorrect, or # conflicting content. Returns T if validation succeeds, F otherwise. The @@ -182,6 +186,24 @@ global g_configs: table[ConfigState] of Management::Configuration function config_deploy_to_agents(config: Management::Configuration, req: Management::Request::Request) { + # Make any final changes to the configuration we send off. + + # If needed, fill in agent IP address info as learned from their peerings. + # XXX this will need revisiting when we support host names. + local instances: set[Management::Instance]; + + for ( inst in config$instances ) + { + if ( inst$name in g_instances_known + && inst$host == 0.0.0.0 + && g_instances_known[inst$name]$host != 0.0.0.0 ) + inst$host = g_instances_known[inst$name]$host; + + add instances[inst]; + } + + config$instances = instances; + for ( name in g_instances ) { if ( name !in g_instances_ready ) @@ -265,7 +287,7 @@ function drop_instance(inst: Management::Instance) Management::Log::info(fmt("dropped instance %s", inst$name)); } -function config_nodes_lacking_ports(config: Management::Configuration): vector of string +function config_nodes_lacking_broker_ports(config: Management::Configuration): vector of string { local res: vector of string; local roles = { Supervisor::MANAGER, Supervisor::LOGGER, Supervisor::PROXY }; @@ -279,7 +301,23 @@ function config_nodes_lacking_ports(config: Management::Configuration): vector o return sort(res, strcmp); } -function config_assign_ports(config: Management::Configuration) +# A comparison function for nodes. This first compares according to the node's +# agent/instance, then by role priority, and finally by node name. This yields a +# sequence in which ports remain sequential to a node, and to roles on that +# node. +function config_nodes_compare(n1: Management::Node, n2: Management::Node, roles: table[Supervisor::ClusterRole] of count): int + { + local instcmp = strcmp(n1$instance, n2$instance); + if ( instcmp != 0 ) + return instcmp; + if ( roles[n1$role] < roles[n2$role] ) + return -1; + if ( roles[n1$role] > roles[n2$role] ) + return 1; + return strcmp(n1$name, n2$name); + } + +function config_assign_broker_ports(config: Management::Configuration) { # We're changing nodes in the configuration's set, so need to rebuild it: local new_nodes: set[Management::Node]; @@ -295,7 +333,15 @@ function config_assign_ports(config: Management::Configuration) # not per-instance: if the user wants auto-assignment, it seems better # to avoid confusion with the same port being used on multiple # instances. - local p = port_to_count(Management::Controller::auto_assign_start_port); + local start_port = Management::Controller::auto_assign_broker_start_port; + +@pragma push ignore-deprecations + # Keep deprecated config setting working until 7.1: + if ( Management::Controller::auto_assign_start_port != 2200/tcp ) + start_port = Management::Controller::auto_assign_start_port; +@pragma pop ignore-deprecations + + local p = port_to_count(start_port); # A set that tracks the ports we've used so far. Helpful for avoiding # collisions between manually specified and auto-enumerated ports. @@ -327,11 +373,9 @@ function config_assign_ports(config: Management::Configuration) add new_nodes[node]; } - # Now process the ones that may need ports, in order. We first sort by - # roles; we go manager -> logger -> proxy. Next are instance names, to - # get locally sequential ports among the same roles, and finally by - # name. + # Now process the ones that need ports, in order. local nodes: vector of Management::Node; + for ( node in config$nodes ) { if ( node?$p ) @@ -342,16 +386,7 @@ function config_assign_ports(config: Management::Configuration) } sort(nodes, function [roles] (n1: Management::Node, n2: Management::Node): int - { - if ( roles[n1$role] < roles[n2$role] ) - return -1; - if ( roles[n1$role] > roles[n2$role] ) - return 1; - local instcmp = strcmp(n1$instance, n2$instance); - if ( instcmp != 0 ) - return instcmp; - return strcmp(n1$name, n2$name); - }); + { return config_nodes_compare(n1, n2, roles); }); for ( i in nodes ) { @@ -372,6 +407,87 @@ function config_assign_ports(config: Management::Configuration) config$nodes = new_nodes; } +function config_assign_metrics_ports(config: Management::Configuration) + { + # We're changing nodes in the configuration's set, so need to rebuild it: + local new_nodes: set[Management::Node]; + + # An ordering of nodes by role: + local roles: table[Supervisor::ClusterRole] of count = { + [Supervisor::MANAGER] = 0, + [Supervisor::LOGGER] = 1, + [Supervisor::PROXY] = 2, + [Supervisor::WORKER] = 3, + }; + + local p = port_to_count(Management::Controller::auto_assign_metrics_start_port); + local ports_set: set[count]; + local node: Management::Node; + + # Pre-populate agents ports, if we have them: + for ( inst in config$instances ) + { + if ( inst?$listen_port ) + add ports_set[port_to_count(inst$listen_port)]; + } + + # Pre-populate nodes with pre-defined metrics ports, as well + # as their Broker ports: + for ( node in config$nodes ) + { + if ( node?$p ) + add ports_set[port_to_count(node$p)]; + + if ( node?$metrics_port ) + { + add ports_set[port_to_count(node$metrics_port)]; + add new_nodes[node]; + } + } + + # Copy any nodes to the new set that have roles we don't care about. + # (This should be none right now given that every cluster node can have + # a metrics port.) + for ( node in config$nodes ) + { + if ( node$role !in roles ) + add new_nodes[node]; + } + + # Now process the ones that need ports, in order. + local nodes: vector of Management::Node; + + for ( node in config$nodes ) + { + if ( node?$metrics_port ) + next; + if ( node$role !in roles ) + next; + nodes += node; + } + + sort(nodes, function [roles] (n1: Management::Node, n2: Management::Node): int + { return config_nodes_compare(n1, n2, roles); }); + + for ( i in nodes ) + { + node = nodes[i]; + + # Find next available port ... + while ( p in ports_set ) + ++p; + + node$metrics_port = count_to_port(p, tcp); + add new_nodes[node]; + add ports_set[p]; + + # ... and consume it. + ++p; + } + + config$nodes = new_nodes; + } + function config_validate(config: Management::Configuration, req: Management::Request::Request): bool { @@ -493,6 +609,36 @@ function config_validate(config: Management::Configuration, } } + # If port auto-configuration is disabled, the user needs to define the + # ports. Verify this both for Broker's ports and the metrics export + # ones. + +@pragma push ignore-deprecations + # Keep deprecated config setting working until 7.1: + local auto_broker_ports = Management::Controller::auto_assign_broker_ports; + if ( ! Management::Controller::auto_assign_ports ) + auto_broker_ports = F; +@pragma pop ignore-deprecations + + local nodes: vector of string; + local nodes_str: string; + + if ( ! auto_broker_ports ) + { + nodes = config_nodes_lacking_broker_ports(config); + + if ( |nodes| > 0 ) + { + nodes_str = join_string_vec(nodes, ", "); + errors += make_error(req$id, fmt("Broker port auto-assignment disabled but nodes %s lack ports", nodes_str)); + } + } + + # For metrics ports, it is not an error if auto-assignment is disabled + # but not all nodes feature a port. They user might intentionally want + # telemetry only from select nodes, and the discovery feature supports + # this. + # Possibilities for the future: # - Are node options understood? # - Do provided scripts exist/load? @@ -893,32 +1039,20 @@ event Management::Controller::API::stage_configuration_request(reqid: string, co return; } - if ( ! Management::Controller::auto_assign_ports ) - { - local nodes = config_nodes_lacking_ports(config); - - if ( |nodes| > 0 ) - { - local nodes_str = join_string_vec(nodes, ", "); - - res$success = F; - res$error = fmt("port auto-assignment disabled but nodes %s lack ports", nodes_str); - req$results += res; - - Management::Log::info(fmt("tx Management::Controller::API::stage_configuration_response %s", - Management::Request::to_string(req))); - Broker::publish(Management::Controller::topic, - Management::Controller::API::stage_configuration_response, req$id, req$results); - Management::Request::finish(req$id); - return; - } - } - g_configs[STAGED] = config; config_copy = copy(config); - if ( Management::Controller::auto_assign_ports ) - config_assign_ports(config_copy); +@pragma push ignore-deprecations + # Keep deprecated config setting working until 7.1: + local auto_broker_ports = Management::Controller::auto_assign_broker_ports; + if ( ! Management::Controller::auto_assign_ports ) + auto_broker_ports = F; + + if ( auto_broker_ports ) + config_assign_broker_ports(config_copy); + if ( Management::Controller::auto_assign_metrics_ports ) + config_assign_metrics_ports(config_copy); +@pragma pop ignore-deprecations g_configs[READY] = config_copy; diff --git a/scripts/policy/frameworks/management/types.zeek b/scripts/policy/frameworks/management/types.zeek index 796c943754..f12cc1d9e5 100644 --- a/scripts/policy/frameworks/management/types.zeek +++ b/scripts/policy/frameworks/management/types.zeek @@ -60,6 +60,7 @@ export { interface: string &optional; ##< Interface to sniff cpu_affinity: int &optional; ##< CPU/core number to pin to env: table[string] of string &default=table(); ##< Custom environment vars + metrics_port: port &optional; ##< Metrics exposure port, for Prometheus }; ## Data structure capturing a cluster's complete configuration. @@ -88,6 +89,8 @@ export { pid: int &optional; ## The node's Broker peering listening port, if any. p: port &optional; + ## The node's metrics port for Prometheus, if any. + metrics_port: port &optional; }; type NodeStatusVec: vector of NodeStatus; diff --git a/scripts/policy/frameworks/telemetry/log.zeek b/scripts/policy/frameworks/telemetry/log.zeek index 935b92cefa..8ee376eee4 100644 --- a/scripts/policy/frameworks/telemetry/log.zeek +++ b/scripts/policy/frameworks/telemetry/log.zeek @@ -33,9 +33,6 @@ export { ## the underlying metric type. metric_type: string &log; - ## The prefix (namespace) of the metric. - prefix: string &log; - ## The name of the metric. name: string &log; @@ -57,9 +54,6 @@ export { ## Peer that generated this log. peer: string &log; - ## The prefix (namespace) of the metric. - prefix: string &log; - ## The name of the metric. name: string &log; @@ -137,10 +131,9 @@ function do_log() local rec = Info($ts=ts, $peer=peer_description, $metric_type=metric_type, - $prefix=m$opts$prefix, $name=m$opts$name, - $labels=m$opts$labels, - $label_values=m$labels, + $labels=m$label_names, + $label_values=m$label_values, $value=m$value); Log::write(LOG, rec); @@ -168,10 +161,9 @@ function do_log() local hrec = HistogramInfo($ts=ts, $peer=peer_description, - $prefix=hm$opts$prefix, $name=hm$opts$name, - $labels=hm$opts$labels, - $label_values=hm$labels, + $labels=hm$label_names, + $label_values=hm$label_values, $bounds=hm$opts$bounds, $values=hm$values, $sum=hm$sum, diff --git a/scripts/policy/frameworks/telemetry/prometheus.zeek b/scripts/policy/frameworks/telemetry/prometheus.zeek index b1d9374e8b..2b2ac4d255 100644 --- a/scripts/policy/frameworks/telemetry/prometheus.zeek +++ b/scripts/policy/frameworks/telemetry/prometheus.zeek @@ -2,10 +2,6 @@ ##! from the cluster node configuration for exporting data to ##! Prometheus. ##! -##! For customization or disabling, redef the involved Telemetry options -##! again. Specifically, to disable listening on port 9911, set -##! :zeek:see:`Telemetry::metrics_port` to `0/unknown` again. -##! ##! The manager node will also provide a ``/services.json`` endpoint ##! for the HTTP Service Discovery system in Prometheus to use for ##! configuration. This endpoint will include information for all of diff --git a/scripts/site/local.zeek b/scripts/site/local.zeek index 328c823975..71251c0cb1 100644 --- a/scripts/site/local.zeek +++ b/scripts/site/local.zeek @@ -94,8 +94,8 @@ redef digest_salt = "Please change this value."; # telemetry_histogram.log. @load frameworks/telemetry/log -# Enable metrics centralization on the manager. This opens port 9911/tcp -# on the manager node that can be readily scraped by Prometheus. +# Enable Prometheus metrics scraping in the cluster: each Zeek node will listen +# on the metrics port defined in its Cluster::nodes entry. # @load frameworks/telemetry/prometheus # Uncomment the following line to enable detection of the heartbleed attack. Enabling diff --git a/scripts/spicy/zeek.spicy b/scripts/spicy/zeek.spicy index a6f3b1f344..cc24f96117 100644 --- a/scripts/spicy/zeek.spicy +++ b/scripts/spicy/zeek.spicy @@ -12,13 +12,13 @@ import spicy; ## ## This function has been deprecated and will be removed. Use ``spicy::accept_input`` ## instead, which will have the same effect with Zeek. -public function confirm_protocol() : void &cxxname="zeek::spicy::rt::confirm_protocol"; +public function confirm_protocol(): void &cxxname="zeek::spicy::rt::confirm_protocol"; ## [Deprecated] Triggers a DPD protocol violation for the current connection. ## ## This function has been deprecated and will be removed. Use ``spicy::decline_input`` ## instead, which will have the same effect with Zeek. -public function reject_protocol(reason: string) : void &cxxname="zeek::spicy::rt::reject_protocol"; +public function reject_protocol(reason: string): void &cxxname="zeek::spicy::rt::reject_protocol"; ## Reports a "weird" to Zeek. This should be used with similar semantics as in ## Zeek: something quite unexpected happening at the protocol level, which however @@ -31,19 +31,19 @@ public function reject_protocol(reason: string) : void &cxxname="zeek::spicy::rt public function weird(id: string, addl: string = "") &cxxname="zeek::spicy::rt::weird"; ## Returns true if we're currently parsing the originator side of a connection. -public function is_orig() : bool &cxxname="zeek::spicy::rt::is_orig"; +public function is_orig(): bool &cxxname="zeek::spicy::rt::is_orig"; ## Returns the current connection's UID. -public function uid() : string &cxxname="zeek::spicy::rt::uid"; +public function uid(): string &cxxname="zeek::spicy::rt::uid"; ## Returns the current connection's 4-tuple ID to make IP address and port information available. -public function conn_id() : tuple &cxxname="zeek::spicy::rt::conn_id"; +public function conn_id(): tuple &cxxname="zeek::spicy::rt::conn_id"; ## Instructs Zeek to flip the directionality of the current connection. -public function flip_roles() : void &cxxname="zeek::spicy::rt::flip_roles"; +public function flip_roles(): void &cxxname="zeek::spicy::rt::flip_roles"; ## Returns the number of packets seen so far on the current side of the current connection. -public function number_packets() : uint64 &cxxname="zeek::spicy::rt::number_packets"; +public function number_packets(): uint64 &cxxname="zeek::spicy::rt::number_packets"; ## Opaque handle to a protocol analyzer. public type ProtocolHandle = __library_type("zeek::spicy::rt::ProtocolHandle"); @@ -65,7 +65,7 @@ public type ProtocolHandle = __library_type("zeek::spicy::rt::ProtocolHandle"); ## Note: For backwards compatibility, the analyzer argument can be left unset to add ## a DPD analyzer. This use is deprecated, though; use the single-argument version of ## `protocol_begin` for that instead. -public function protocol_begin(analyzer: optional, protocol: spicy::Protocol = spicy::Protocol::TCP) : void &cxxname="zeek::spicy::rt::protocol_begin"; +public function protocol_begin(analyzer: optional, protocol: spicy::Protocol = spicy::Protocol::TCP): void &cxxname="zeek::spicy::rt::protocol_begin"; ## Adds a Zeek-side DPD child protocol analyzer performing dynamic protocol detection ## on subsequently provided data. @@ -78,7 +78,7 @@ public function protocol_begin(analyzer: optional, protocol: spicy::Prot ## ## protocol: the transport-layer protocol on which to perform protocol detection; ## only TCP is currently supported here -public function protocol_begin(protocol: spicy::Protocol = spicy::Protocol::TCP) : void &cxxname="zeek::spicy::rt::protocol_begin"; +public function protocol_begin(protocol: spicy::Protocol = spicy::Protocol::TCP): void &cxxname="zeek::spicy::rt::protocol_begin"; ## Gets a handle to a Zeek-side child protocol analyzer for the current connection. ## @@ -98,7 +98,7 @@ public function protocol_begin(protocol: spicy::Protocol = spicy::Protocol::TCP) ## protocol: the transport-layer protocol that the analyser uses; only TCP is ## currently supported here ## -public function protocol_handle_get_or_create(analyzer: string, protocol: spicy::Protocol = spicy::Protocol::TCP) : ProtocolHandle &cxxname="zeek::spicy::rt::protocol_handle_get_or_create"; +public function protocol_handle_get_or_create(analyzer: string, protocol: spicy::Protocol = spicy::Protocol::TCP): ProtocolHandle &cxxname="zeek::spicy::rt::protocol_handle_get_or_create"; ## Forwards protocol data to all previously instantiated Zeek-side child protocol analyzers of a given transport-layer. ## @@ -107,7 +107,7 @@ public function protocol_handle_get_or_create(analyzer: string, protocol: spicy: ## data: chunk of data to forward to child analyzer ## ## protocol: the transport-layer protocol of the children to forward to; only TCP is currently supported here -public function protocol_data_in(is_orig: bool, data: bytes, protocol: spicy::Protocol = spicy::Protocol::TCP) : void &cxxname="zeek::spicy::rt::protocol_data_in"; +public function protocol_data_in(is_orig: bool, data: bytes, protocol: spicy::Protocol = spicy::Protocol::TCP): void &cxxname="zeek::spicy::rt::protocol_data_in"; ## Forwards protocol data to a specific previously instantiated Zeek-side child analyzer. ## @@ -116,7 +116,7 @@ public function protocol_data_in(is_orig: bool, data: bytes, protocol: spicy::Pr ## data: chunk of data to forward to child analyzer ## ## h: handle to the child analyzer to forward data into -public function protocol_data_in(is_orig: bool, data: bytes, h: ProtocolHandle) : void &cxxname="zeek::spicy::rt::protocol_data_in"; +public function protocol_data_in(is_orig: bool, data: bytes, h: ProtocolHandle): void &cxxname="zeek::spicy::rt::protocol_data_in"; ## Signals a gap in input data to all previously instantiated Zeek-side child protocol analyzers. ## @@ -127,11 +127,11 @@ public function protocol_data_in(is_orig: bool, data: bytes, h: ProtocolHandle) ## len: size of gap ## ## h: optional handle to the child analyzer signal a gap to, else signal to all child analyzers -public function protocol_gap(is_orig: bool, offset: uint64, len: uint64, h: optional = Null) : void &cxxname="zeek::spicy::rt::protocol_gap"; +public function protocol_gap(is_orig: bool, offset: uint64, len: uint64, h: optional = Null): void &cxxname="zeek::spicy::rt::protocol_gap"; ## Signals end-of-data to all previously instantiated Zeek-side child protocol ## analyzers and removes them. -public function protocol_end() : void &cxxname="zeek::spicy::rt::protocol_end"; +public function protocol_end(): void &cxxname="zeek::spicy::rt::protocol_end"; ## Signals end-of-data to the given child analyzer and removes it. ## @@ -147,54 +147,364 @@ public function protocol_handle_close(handle: ProtocolHandle): void &cxxname="ze ## Optionally, a mime type can be provided. It will be passed on to Zeek's file analysis framework. ## Optionally, a file ID can be provided. It will be passed on to Zeek's file analysis framework. ## Returns the Zeek-side file ID of the new file. -public function file_begin(mime_type: optional = Null, fuid: optional = Null) : string &cxxname="zeek::spicy::rt::file_begin"; +public function file_begin(mime_type: optional = Null, fuid: optional = Null): string &cxxname="zeek::spicy::rt::file_begin"; ## Returns the current file's FUID. -public function fuid() : string &cxxname="zeek::spicy::rt::fuid"; +public function fuid(): string &cxxname="zeek::spicy::rt::fuid"; ## Terminates the currently active Zeek-side session, flushing all state. Any ## subsequent activity will start a new session from scratch. This can only be ## called from inside a protocol analyzer. -public function terminate_session() : void &cxxname="zeek::spicy::rt::terminate_session"; +public function terminate_session(): void &cxxname="zeek::spicy::rt::terminate_session"; ## Tells Zeek to skip sending any further input data to the current analyzer. ## This is supported for protocol and file analyzers. -public function skip_input() : void &cxxname="zeek::spicy::rt::skip_input"; +public function skip_input(): void &cxxname="zeek::spicy::rt::skip_input"; ## Signals the expected size of a file to Zeek's file analysis. ## ## size: expected size of file ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_set_size(size: uint64, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_set_size"; +public function file_set_size(size: uint64, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_set_size"; ## Passes file content on to Zeek's file analysis. ## ## data: chunk of raw data to pass into analysis ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_data_in(data: bytes, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_data_in"; +public function file_data_in(data: bytes, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_data_in"; ## Passes file content at a specific offset on to Zeek's file analysis. ## ## data: chunk of raw data to pass into analysis ## offset: position in file where data starts ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_data_in_at_offset(data: bytes, offset: uint64, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_data_in_at_offset"; +public function file_data_in_at_offset(data: bytes, offset: uint64, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_data_in_at_offset"; ## Signals a gap in a file to Zeek's file analysis. ## ## offset: position in file where gap starts ## len: size of gap ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_gap(offset: uint64, len: uint64, fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_gap"; +public function file_gap(offset: uint64, len: uint64, fid: optional = Null): void &cxxname="zeek::spicy::rt::file_gap"; ## Signals the end of a file to Zeek's file analysis. ## ## fid: Zeek-side ID of the file to operate on; if not given, the file started by the most recent file_begin() will be used -public function file_end(fid: optional = Null) : void &cxxname="zeek::spicy::rt::file_end"; +public function file_end(fid: optional = Null): void &cxxname="zeek::spicy::rt::file_end"; ## Inside a packet analyzer, forwards what data remains after parsing the top-level unit ## on to another analyzer. The index specifies the target, per the current dispatcher table. -public function forward_packet(identifier: uint32) : void &cxxname="zeek::spicy::rt::forward_packet"; +public function forward_packet(identifier: uint32): void &cxxname="zeek::spicy::rt::forward_packet"; ## Gets the network time from Zeek. -public function network_time() : time &cxxname="zeek::spicy::rt::network_time"; +public function network_time(): time &cxxname="zeek::spicy::rt::network_time"; + +## Opaque handle for a Zeek-side value. +public type ZeekVal = __library_type("::zeek::ValPtr"); + +## Opaque handle for a Zeek-side record value. +public type ZeekRecord = __library_type("::zeek::spicy::rt::ValRecordPtr"); + +## Opaque handle for a Zeek-side set value. +public type ZeekSet = __library_type("::zeek::spicy::rt::ValSetPtr"); + +## Opaque handle for a Zeek-side table value. +public type ZeekTable = __library_type("::zeek::spicy::rt::ValTablePtr"); + +## Opaque handle for a Zeek-side vector value. +public type ZeekVector = __library_type("::zeek::spicy::rt::ValVectorPtr"); + +## Returns the value of a global Zeek script variable of Zeek type ``addr``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_address(id: string): addr &cxxname="zeek::spicy::rt::get_address"; + +## Returns the value of a global Zeek script variable of Zeek type ``bool``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_bool(id: string): bool &cxxname="zeek::spicy::rt::get_bool"; + +## Returns the value of a global Zeek script variable of Zeek type ``count``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_count(id: string): uint64 &cxxname="zeek::spicy::rt::get_count"; + +## Returns the value of a global Zeek script variable of Zeek type ``double``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_double(id: string): real &cxxname="zeek::spicy::rt::get_double"; + +## Returns the value of a global Zeek script variable of Zeek type ``enum``. +## The value is returned as a string containing the enum's label name, without +## any scope. Throws an exception if there's no such Zeek of that name, or if +## it's not of the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_enum(id: string): string &cxxname="zeek::spicy::rt::get_enum"; + +## Returns the value of a global Zeek script variable of Zeek type ``int``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_int(id: string): int64 &cxxname="zeek::spicy::rt::get_int"; + +## Returns the value of a global Zeek script variable of Zeek type +## ``interval``. Throws an exception if there's no such Zeek of that name, or +## if it's not of the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_interval(id: string): interval &cxxname="zeek::spicy::rt::get_interval"; + +## Returns the value of a global Zeek script variable of Zeek type ``port``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_port(id: string): port &cxxname="zeek::spicy::rt::get_port"; + +## Returns the value of a global Zeek script variable of Zeek type ``record``. +## The value is returned as an opaque handle to the record, which can be used +## with the ``zeek::record_*()`` functions to access the record's fields. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_record(id: string): ZeekRecord &cxxname="zeek::spicy::rt::get_record"; + +## Returns the value of a global Zeek script variable of Zeek type ``set``. The +## value is returned as an opaque handle to the set, which can be used with the +## ``zeek::set_*()`` functions to access the set's content. Throws an exception +## if there's no such Zeek of that name, or if it's not of the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_set(id: string): ZeekSet &cxxname="zeek::spicy::rt::get_set"; + +## Returns the value of a global Zeek script variable of Zeek type ``string``. +## The string's value is returned as a Spicy ``bytes`` value. Throws an +## exception if there's no such Zeek of that name, or if it's not of the +## expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_string(id: string): bytes &cxxname="zeek::spicy::rt::get_string"; + +## Returns the value of a global Zeek script variable of Zeek type ``subnet``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_subnet(id: string): network &cxxname="zeek::spicy::rt::get_subnet"; + +## Returns the value of a global Zeek script variable of Zeek type ``table``. +## The value is returned as an opaque handle to the set, which can be used with +## the ``zeek::set_*()`` functions to access the set's content. Throws an +## exception if there's no such Zeek of that name, or if it's not of the +## expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_table(id: string): ZeekTable &cxxname="zeek::spicy::rt::get_table"; + +## Returns the value of a global Zeek script variable of Zeek type ``time``. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_time(id: string): time &cxxname="zeek::spicy::rt::get_time"; + +## Returns the value of a global Zeek script variable of Zeek type ``vector``. +## The value is returned as an opaque handle to the vector, which can be used +## with the ``zeek::vector_*()`` functions to access the vector's content. +## Throws an exception if there's no such Zeek of that name, or if it's not of +## the expected type. +## +## id: fully-qualified name of the global Zeek variable to retrieve +public function get_vector(id: string): ZeekVector &cxxname="zeek::spicy::rt::get_vector"; + +## Returns an opaque handle to a global Zeek script variable. The handle can be +## used with the ``zeek::as_*()`` functions to access the variable's value. +## Throws an exception if there's no Zeek variable of that name. +public function get_value(id: string): ZeekVal &cxxname="zeek::spicy::rt::get_value"; + +## Returns a Zeek ``addr`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_address(v: ZeekVal): addr &cxxname="zeek::spicy::rt::as_address"; + +## Returns a Zeek ``bool`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_bool(v: ZeekVal): bool &cxxname="zeek::spicy::rt::as_bool"; + +## Returns a Zeek ``count`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_count(v: ZeekVal): uint64 &cxxname="zeek::spicy::rt::as_count"; + +## Returns a Zeek ``double`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_double(v: ZeekVal): real &cxxname="zeek::spicy::rt::as_double"; + +## Returns a Zeek ``enum`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_enum(v: ZeekVal): string &cxxname="zeek::spicy::rt::as_enum"; + +## Returns a Zeek ``int`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_int(v: ZeekVal): int64 &cxxname="zeek::spicy::rt::as_int"; + +## Returns a Zeek ``interval`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_interval(v: ZeekVal): interval &cxxname="zeek::spicy::rt::as_interval"; + +## Returns a Zeek ``port`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_port(v: ZeekVal): port &cxxname="zeek::spicy::rt::as_port"; + +## Returns a Zeek ``record`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_record(v: ZeekVal): ZeekRecord &cxxname="zeek::spicy::rt::as_record"; + +## Returns a Zeek ``set`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_set(v: ZeekVal): ZeekSet &cxxname="zeek::spicy::rt::as_set"; + +## Returns a Zeek ``string`` value refereced by an opaque handle. The string's +## value is returned as a Spicy ``bytes`` value. Throws an exception if the +## referenced value is not of the expected type. +public function as_string(v: ZeekVal): bytes &cxxname="zeek::spicy::rt::as_string"; + +## Returns a Zeek ``subnet`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_subnet(v: ZeekVal): network &cxxname="zeek::spicy::rt::as_subnet"; + +## Returns a Zeek ``table`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_table(v: ZeekVal): ZeekTable &cxxname="zeek::spicy::rt::as_table"; + +## Returns a Zeek ``time`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_time(v: ZeekVal): time &cxxname="zeek::spicy::rt::as_time"; + +## Returns a Zeek ``vector`` value refereced by an opaque handle. Throws an +## exception if the referenced value is not of the expected type. +public function as_vector(v: ZeekVal): ZeekVector &cxxname="zeek::spicy::rt::as_vector"; + +## Returns true if a Zeek set contains a given value. Throws an exception if +## the given ID does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek set to check +## v: value to check for, which must be of the Spicy-side equivalent of the set's key type +public function set_contains(id: string, v: any): bool &cxxname="zeek::spicy::rt::set_contains"; + +## Returns true if a Zeek set contains a given value. Throws an exception if +## the set does not have the expected type. +## +## s: opaque handle to the Zeek set, as returned by other functions +## v: value to check for, which must be of the Spicy-side equivalent of the set's key type +public function set_contains(s: ZeekSet, v: any): bool &cxxname="zeek::spicy::rt::set_contains"; + +## Returns true if a Zeek table contains a given value. Throws an exception if +## the given ID does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek table to check +## v: value to check for, which must be of the Spicy-side equivalent of the table's key type +public function table_contains(id: string, v: any): bool &cxxname="zeek::spicy::rt::table_contains"; + +## Returns true if a Zeek table contains a given value. Throws an exception if +## the given ID does not exist, or does not have the expected type. +## +## t: opaque handle to the Zeek table, as returned by other functions +## v: value to check for, which must be of the Spicy-side equivalent of the table's key type +public function table_contains(t: ZeekTable, v: any): bool &cxxname="zeek::spicy::rt::table_contains"; + +## Returns the value associated with a key in a Zeek table. Returns an error +## result if the key does not exist in the table. Throws an exception if the +## given table ID does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek table to check +## v: value to lookup, which must be of the Spicy-side equivalent of the table's key type +public function table_lookup(id: string, v: any): optional &cxxname="zeek::spicy::rt::table_lookup"; + +## Returns the value associated with a key in a Zeek table. Returns an error +## result if the key does not exist in the table. Throws an exception if the +## given table ID does not exist, or does not have the expected type. +## +## t: opaque handle to the Zeek table, as returned by other functions +## v: value to lookup, which must be of the Spicy-side equivalent of the table's key type +public function table_lookup(t: ZeekTable, v: any): optional &cxxname="zeek::spicy::rt::table_lookup"; + +## Returns true if a Zeek record provides a value for a given field. This +## includes fields with `&default` values. Throws an exception if the given ID +## does not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek record to check field: name of +## the field to check +public function record_has_value(id: string, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns true if a Zeek record provides a value for a given field. +## This includes fields with `&default` values. +## +## r: opaque handle to the Zeek record, as returned by other functions +## field: name of the field to check +public function record_has_value(r: ZeekRecord, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns true if the type of a Zeek record has a field of a given name. +## Throws an exception if the given ID does not exist, or does not have the +## expected type. +## +## id: fully-qualified name of the global Zeek record to check +## field: name of the field to check +public function record_has_field(id: string, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns true if the type of a Zeek record has a field of a given name. +## +## r: opaque handle to the Zeek record, as returned by other functions +## field: name of the field to check +public function record_has_field(r: ZeekRecord, field: string): bool &cxxname="zeek::spicy::rt::record_has_field"; + +## Returns a field's value from a Zeek record. Throws an exception if the given +## ID does not exist, or does not have the expected type; or if there's no such +## field in the record type, or if the field does not have a value. +## +## id: fully-qualified name of the global Zeek record to check +## field: name of the field to retrieve +public function record_field(id: string, field: string): ZeekVal &cxxname="zeek::spicy::rt::record_field"; + +## Returns a field's value from a Zeek record. Throws an exception if the given +## record does not have such a field, or if the field does not have a value. +## +## r: opaque handle to the Zeek record, as returned by other functions +## field: name of the field to retrieve +public function record_field(r: ZeekRecord, field: string): ZeekVal &cxxname="zeek::spicy::rt::record_field"; + +## Returns the value of an index in a Zeek vector. Throws an exception if the +## given ID does not exist, or does not have the expected type; or if the index +## is out of bounds. +## +## id: fully-qualified name of the global Zeek vector to check +## index: index of the element to retrieve +public function vector_index(id: string, index: uint64): ZeekVal &cxxname="zeek::spicy::rt::vector_index"; + +## Returns the value of an index in a Zeek vector. Throws an exception if the +## index is out of bounds. +## +## v: opaque handle to the Zeek vector, as returned by other functions +## index: index of the element to retrieve +public function vector_index(v: ZeekVector, index: uint64): ZeekVal &cxxname="zeek::spicy::rt::vector_index"; + +## Returns the size of a Zeek vector. Throws an exception if the given ID does +## not exist, or does not have the expected type. +## +## id: fully-qualified name of the global Zeek vector to check +public function vector_size(id: string): uint64 &cxxname="zeek::spicy::rt::vector_size"; + +## Returns the size of a Zeek vector. +## +## v: opaque handle to the Zeek vector, as returned by other functions +public function vector_size(v: ZeekVector): uint64 &cxxname="zeek::spicy::rt::vector_size"; + diff --git a/src/3rdparty b/src/3rdparty index 4c5985ca87..96caa6a590 160000 --- a/src/3rdparty +++ b/src/3rdparty @@ -1 +1 @@ -Subproject commit 4c5985ca8743d33927943a58e2cc2b74d7b05790 +Subproject commit 96caa6a59023eafae8a94a7b906a4436ebb57d29 diff --git a/src/Attr.cc b/src/Attr.cc index e080bfcfd6..b8c87b7ff0 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -221,8 +221,13 @@ void Attributes::AddAttr(AttrPtr attr, bool is_redef) { // instantiator of the object specified a null type, however, then // that's a signal to skip the checking. If the type is error, // there's no point checking attributes either. - if ( type && ! IsErrorType(type->Tag()) ) - CheckAttr(attr.get()); + if ( type && ! IsErrorType(type->Tag()) ) { + if ( ! CheckAttr(attr.get()) ) { + // Get rid of it, so we don't get error cascades down the line. + RemoveAttr(attr->Tag()); + return; + } + } // For ADD_FUNC or DEL_FUNC, add in an implicit REDEF, since // those attributes only have meaning for a redefinable value. @@ -285,7 +290,7 @@ void Attributes::DescribeReST(ODesc* d, bool shorten) const { } } -void Attributes::CheckAttr(Attr* a) { +bool Attributes::CheckAttr(Attr* a) { switch ( a->Tag() ) { case ATTR_DEPRECATED: case ATTR_REDEF: @@ -294,7 +299,7 @@ void Attributes::CheckAttr(Attr* a) { case ATTR_OPTIONAL: if ( global_var ) - Error("&optional is not valid for global variables"); + return AttrError("&optional is not valid for global variables"); break; case ATTR_ADD_FUNC: @@ -304,61 +309,53 @@ void Attributes::CheckAttr(Attr* a) { const auto& at = a->GetExpr()->GetType(); if ( at->Tag() != TYPE_FUNC ) { a->GetExpr()->Error(is_add ? "&add_func must be a function" : "&delete_func must be a function"); - break; + return false; } FuncType* aft = at->AsFuncType(); if ( ! same_type(aft->Yield(), type) ) { a->GetExpr()->Error(is_add ? "&add_func function must yield same type as variable" : "&delete_func function must yield same type as variable"); - break; + return false; } } break; case ATTR_DEFAULT_INSERT: { - if ( ! type->IsTable() ) { - Error("&default_insert only applicable to tables"); - break; - } + if ( ! type->IsTable() ) + return AttrError("&default_insert only applicable to tables"); - if ( Find(ATTR_DEFAULT) ) { - Error("&default and &default_insert cannot be used together"); - break; - } + if ( Find(ATTR_DEFAULT) ) + return AttrError("&default and &default_insert cannot be used together"); std::string err_msg; if ( ! check_default_attr(a, type, global_var, in_record, err_msg) && ! err_msg.empty() ) - Error(err_msg.c_str()); + return AttrError(err_msg.c_str()); break; } case ATTR_DEFAULT: { - if ( Find(ATTR_DEFAULT_INSERT) ) { - Error("&default and &default_insert cannot be used together"); - break; - } + if ( Find(ATTR_DEFAULT_INSERT) ) + return AttrError("&default and &default_insert cannot be used together"); std::string err_msg; if ( ! check_default_attr(a, type, global_var, in_record, err_msg) && ! err_msg.empty() ) - Error(err_msg.c_str()); + return AttrError(err_msg.c_str()); break; } case ATTR_EXPIRE_READ: { if ( Find(ATTR_BROKER_STORE) ) - Error("&broker_store and &read_expire cannot be used simultaneously"); + return AttrError("&broker_store and &read_expire cannot be used simultaneously"); if ( Find(ATTR_BACKEND) ) - Error("&backend and &read_expire cannot be used simultaneously"); + return AttrError("&backend and &read_expire cannot be used simultaneously"); } // fallthrough case ATTR_EXPIRE_WRITE: case ATTR_EXPIRE_CREATE: { - if ( type->Tag() != TYPE_TABLE ) { - Error("expiration only applicable to sets/tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("expiration only applicable to sets/tables"); int num_expires = 0; @@ -368,57 +365,49 @@ void Attributes::CheckAttr(Attr* a) { num_expires++; } - if ( num_expires > 1 ) { - Error( + if ( num_expires > 1 ) + return AttrError( "set/table can only have one of &read_expire, &write_expire, " "&create_expire"); - break; - } } #if 0 //### not easy to test this w/o knowing the ID. if ( ! global_var ) - Error("expiration not supported for local variables"); + return AttrError("expiration not supported for local variables"); #endif break; case ATTR_EXPIRE_FUNC: { - if ( type->Tag() != TYPE_TABLE ) { - Error("expiration only applicable to tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("expiration only applicable to tables"); type->AsTableType()->CheckExpireFuncCompatibility({NewRef{}, a}); if ( Find(ATTR_BROKER_STORE) ) - Error("&broker_store and &expire_func cannot be used simultaneously"); + return AttrError("&broker_store and &expire_func cannot be used simultaneously"); if ( Find(ATTR_BACKEND) ) - Error("&backend and &expire_func cannot be used simultaneously"); + return AttrError("&backend and &expire_func cannot be used simultaneously"); break; } case ATTR_ON_CHANGE: { - if ( type->Tag() != TYPE_TABLE ) { - Error("&on_change only applicable to sets/tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("&on_change only applicable to sets/tables"); const auto& change_func = a->GetExpr(); if ( change_func->GetType()->Tag() != TYPE_FUNC || change_func->GetType()->AsFuncType()->Flavor() != FUNC_FLAVOR_FUNCTION ) - Error("&on_change attribute is not a function"); + return AttrError("&on_change attribute is not a function"); const FuncType* c_ft = change_func->GetType()->AsFuncType(); - if ( c_ft->Yield()->Tag() != TYPE_VOID ) { - Error("&on_change must not return a value"); - break; - } + if ( c_ft->Yield()->Tag() != TYPE_VOID ) + return AttrError("&on_change must not return a value"); const TableType* the_table = type->AsTableType(); @@ -427,107 +416,85 @@ void Attributes::CheckAttr(Attr* a) { const auto& args = c_ft->ParamList()->GetTypes(); const auto& t_indexes = the_table->GetIndexTypes(); - if ( args.size() != (type->IsSet() ? 2 : 3) + t_indexes.size() ) { - Error("&on_change function has incorrect number of arguments"); - break; - } + if ( args.size() != (type->IsSet() ? 2 : 3) + t_indexes.size() ) + return AttrError("&on_change function has incorrect number of arguments"); - if ( ! same_type(args[0], the_table->AsTableType()) ) { - Error("&on_change: first argument must be of same type as table"); - break; - } + if ( ! same_type(args[0], the_table->AsTableType()) ) + return AttrError("&on_change: first argument must be of same type as table"); // can't check exact type here yet - the data structures don't exist yet. - if ( args[1]->Tag() != TYPE_ENUM ) { - Error("&on_change: second argument must be a TableChange enum"); - break; - } + if ( args[1]->Tag() != TYPE_ENUM ) + return AttrError("&on_change: second argument must be a TableChange enum"); for ( size_t i = 0; i < t_indexes.size(); i++ ) { - if ( ! same_type(args[2 + i], t_indexes[i]) ) { - Error("&on_change: index types do not match table"); - break; - } + if ( ! same_type(args[2 + i], t_indexes[i]) ) + return AttrError("&on_change: index types do not match table"); } if ( ! type->IsSet() ) - if ( ! same_type(args[2 + t_indexes.size()], the_table->Yield()) ) { - Error("&on_change: value type does not match table"); - break; - } + if ( ! same_type(args[2 + t_indexes.size()], the_table->Yield()) ) + return AttrError("&on_change: value type does not match table"); } break; case ATTR_BACKEND: { - if ( ! global_var || type->Tag() != TYPE_TABLE ) { - Error("&backend only applicable to global sets/tables"); - break; - } + if ( ! global_var || type->Tag() != TYPE_TABLE ) + return AttrError("&backend only applicable to global sets/tables"); // cannot do better equality check - the Broker types are not // actually existing yet when we are here. We will do that // later - before actually attaching to a broker store - if ( a->GetExpr()->GetType()->Tag() != TYPE_ENUM ) { - Error("&backend must take an enum argument"); - break; - } + if ( a->GetExpr()->GetType()->Tag() != TYPE_ENUM ) + return AttrError("&backend must take an enum argument"); // Only support atomic types for the moment, unless // explicitly overridden if ( ! type->AsTableType()->IsSet() && ! input::Manager::IsCompatibleType(type->AsTableType()->Yield().get(), true) && - ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) { - Error("&backend only supports atomic types as table value"); - } + ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) + return AttrError("&backend only supports atomic types as table value"); if ( Find(ATTR_EXPIRE_FUNC) ) - Error("&backend and &expire_func cannot be used simultaneously"); + return AttrError("&backend and &expire_func cannot be used simultaneously"); if ( Find(ATTR_EXPIRE_READ) ) - Error("&backend and &read_expire cannot be used simultaneously"); + return AttrError("&backend and &read_expire cannot be used simultaneously"); if ( Find(ATTR_BROKER_STORE) ) - Error("&backend and &broker_store cannot be used simultaneously"); + return AttrError("&backend and &broker_store cannot be used simultaneously"); break; } case ATTR_BROKER_STORE: { - if ( type->Tag() != TYPE_TABLE ) { - Error("&broker_store only applicable to sets/tables"); - break; - } + if ( type->Tag() != TYPE_TABLE ) + return AttrError("&broker_store only applicable to sets/tables"); - if ( a->GetExpr()->GetType()->Tag() != TYPE_STRING ) { - Error("&broker_store must take a string argument"); - break; - } + if ( a->GetExpr()->GetType()->Tag() != TYPE_STRING ) + return AttrError("&broker_store must take a string argument"); // Only support atomic types for the moment, unless // explicitly overridden if ( ! type->AsTableType()->IsSet() && ! input::Manager::IsCompatibleType(type->AsTableType()->Yield().get(), true) && - ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) { - Error("&broker_store only supports atomic types as table value"); - } + ! Find(ATTR_BROKER_STORE_ALLOW_COMPLEX) ) + return AttrError("&broker_store only supports atomic types as table value"); if ( Find(ATTR_EXPIRE_FUNC) ) - Error("&broker_store and &expire_func cannot be used simultaneously"); + return AttrError("&broker_store and &expire_func cannot be used simultaneously"); if ( Find(ATTR_EXPIRE_READ) ) - Error("&broker_store and &read_expire cannot be used simultaneously"); + return AttrError("&broker_store and &read_expire cannot be used simultaneously"); if ( Find(ATTR_BACKEND) ) - Error("&backend and &broker_store cannot be used simultaneously"); + return AttrError("&backend and &broker_store cannot be used simultaneously"); break; } - case ATTR_BROKER_STORE_ALLOW_COMPLEX: { - if ( type->Tag() != TYPE_TABLE ) { - Error("&broker_allow_complex_type only applicable to sets/tables"); - break; - } - } + case ATTR_BROKER_STORE_ALLOW_COMPLEX: + if ( type->Tag() != TYPE_TABLE ) + return AttrError("&broker_allow_complex_type only applicable to sets/tables"); + break; case ATTR_TRACKED: // FIXME: Check here for global ID? @@ -535,49 +502,52 @@ void Attributes::CheckAttr(Attr* a) { case ATTR_RAW_OUTPUT: if ( type->Tag() != TYPE_FILE ) - Error("&raw_output only applicable to files"); + return AttrError("&raw_output only applicable to files"); break; - case ATTR_PRIORITY: Error("&priority only applicable to event bodies"); break; + case ATTR_PRIORITY: return AttrError("&priority only applicable to event bodies"); case ATTR_GROUP: if ( type->Tag() != TYPE_FUNC || type->AsFuncType()->Flavor() != FUNC_FLAVOR_EVENT ) - Error("&group only applicable to events"); + return AttrError("&group only applicable to events"); break; case ATTR_ERROR_HANDLER: if ( type->Tag() != TYPE_FUNC || type->AsFuncType()->Flavor() != FUNC_FLAVOR_EVENT ) - Error("&error_handler only applicable to events"); + return AttrError("&error_handler only applicable to events"); break; case ATTR_LOG: if ( ! threading::Value::IsCompatibleType(type.get()) ) - Error("&log applied to a type that cannot be logged"); + return AttrError("&log applied to a type that cannot be logged"); break; case ATTR_TYPE_COLUMN: { - if ( type->Tag() != TYPE_PORT ) { - Error("type_column tag only applicable to ports"); - break; - } + if ( type->Tag() != TYPE_PORT ) + return AttrError("type_column tag only applicable to ports"); const auto& atype = a->GetExpr()->GetType(); - if ( atype->Tag() != TYPE_STRING ) { - Error("type column needs to have a string argument"); - break; - } + if ( atype->Tag() != TYPE_STRING ) + return AttrError("type column needs to have a string argument"); break; } case ATTR_ORDERED: if ( type->Tag() != TYPE_TABLE ) - Error("&ordered only applicable to tables"); + return AttrError("&ordered only applicable to tables"); break; default: BadTag("Attributes::CheckAttr", attr_name(a->Tag())); } + + return true; +} + +bool Attributes::AttrError(const char* msg) { + Error(msg); + return false; } bool Attributes::operator==(const Attributes& other) const { diff --git a/src/Attr.h b/src/Attr.h index 94c04a380c..06da50eb85 100644 --- a/src/Attr.h +++ b/src/Attr.h @@ -131,7 +131,11 @@ public: detail::TraversalCode Traverse(detail::TraversalCallback* cb) const; protected: - void CheckAttr(Attr* attr); + // Returns true if the attribute is okay, false if not. + bool CheckAttr(Attr* attr); + + // Reports an attribute error and returns false (handy for CheckAttr()). + bool AttrError(const char* msg); TypePtr type; std::vector attrs; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index bb075c4d35..c7ae4f183c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -45,11 +45,13 @@ if (MSVC) # TODO: This is disabled for now because there a bunch of known # compiler warnings on Windows that we don't have good fixes for. #set(WERROR_FLAG "/WX") + #set(WNOERROR_FLAG "/WX:NO") endif () else () set(SIGN_COMPARE_FLAG "-Wno-sign-compare") if (BUILD_WITH_WERROR) set(WERROR_FLAG "-Werror") + set(WNOERROR_FLAG "-Wno-error") endif () endif () @@ -445,6 +447,10 @@ set(THIRD_PARTY_SRCS $<$:3rdparty/sqlite3.c> 3rdparty/strsep.c) +if (USE_SQLITE AND WNOERROR_FLAG) + set_source_files_properties(3rdparty/sqlite3.c PROPERTIES COMPILE_FLAGS ${WNOERROR_FLAG}) +endif () + # Highwayhash. Highwayhash is a bit special since it has architecture dependent # code... set(hhash_dir ${PROJECT_SOURCE_DIR}/auxil/highwayhash/highwayhash) diff --git a/src/Expr.cc b/src/Expr.cc index d715c66227..abbb07a3d4 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -99,11 +99,14 @@ const char* expr_name(ExprTag t) { "vec+=", "[]=", "$=", - "from_any_vec_coerce ", + "$=$", + "$+=$", + "[=+$]", + "from_any_vec_coerce", "any[]", "ZAM-builtin()", - "nop", + "nop", // don't add after this, it's used to compute NUM_EXPRS }; if ( int(t) >= NUM_EXPRS ) { @@ -2793,7 +2796,7 @@ void FieldExpr::Assign(Frame* f, ValPtr v) { if ( IsError() ) return; - Assign(op->Eval(f), v); + Assign(op->Eval(f), std::move(v)); } void FieldExpr::Assign(ValPtr lhs, ValPtr rhs) { @@ -2807,7 +2810,7 @@ ValPtr FieldExpr::Delete(Frame* f) { return nullptr; auto former = op_v->AsRecordVal()->GetField(field); - Assign(op_v, nullptr); + Assign(std::move(op_v), nullptr); // In the future we could return a value, such as former, here. return nullptr; } @@ -2916,7 +2919,8 @@ RecordConstructorExpr::RecordConstructorExpr(ListExprPtr constructor_list) Error("bad type in record constructor", constructor_error_expr); } -RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list) +RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list, + bool check_mandatory_fields) : Expr(EXPR_RECORD_CONSTRUCTOR), op(std::move(constructor_list)) { if ( IsError() ) return; @@ -2957,6 +2961,9 @@ RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr if ( IsError() ) return; + if ( ! check_mandatory_fields ) + return; + auto n = known_rt->NumFields(); for ( i = 0; i < n; ++i ) if ( fields_seen.count(i) == 0 ) { diff --git a/src/Expr.h b/src/Expr.h index 0cab292c92..73929a2114 100644 --- a/src/Expr.h +++ b/src/Expr.h @@ -103,6 +103,9 @@ enum ExprTag : int { EXPR_APPEND_TO, EXPR_INDEX_ASSIGN, EXPR_FIELD_LHS_ASSIGN, + EXPR_REC_ASSIGN_FIELDS, + EXPR_REC_ADD_FIELDS, + EXPR_REC_CONSTRUCT_WITH_REC, EXPR_FROM_ANY_VEC_COERCE, EXPR_ANY_INDEX, EXPR_SCRIPT_OPT_BUILTIN, @@ -1166,7 +1169,10 @@ public: explicit RecordConstructorExpr(ListExprPtr constructor_list); // This form is used to construct records of a known (ultimate) type. - explicit RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list); + // The flag allows skipping of checking for mandatory fields, for + // script optimization that may elide them. + explicit RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr constructor_list, + bool check_mandatory_fields = true); ListExprPtr Op() const { return op; } const auto& Map() const { return map; } diff --git a/src/Val.cc b/src/Val.cc index f97601236a..bf86360b70 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -329,8 +329,27 @@ TableValPtr Val::GetRecordFields() { return rt->GetRecordFieldsVal(rv); } -// This is a static method in this file to avoid including rapidjson's headers in Val.h because -// they're huge. +// A predicate to identify those types we render as a string in JSON. +static bool UsesJSONStringType(const TypePtr& t) { + if ( t == nullptr ) + return false; + + switch ( t->Tag() ) { + case TYPE_ADDR: + case TYPE_ENUM: + case TYPE_FILE: + case TYPE_FUNC: + case TYPE_INTERVAL: + case TYPE_PATTERN: + case TYPE_STRING: + case TYPE_SUBNET: + case TYPE_OPAQUE: return true; + default: return false; + } +} + +// This is a static method in this file to avoid including rapidjson's headers +// in Val.h, because they're huge. static void BuildJSON(json::detail::NullDoubleWriter& writer, Val* val, bool only_loggable = false, RE_Matcher* re = nullptr, const string& key = "") { if ( ! key.empty() ) @@ -421,8 +440,10 @@ static void BuildJSON(json::detail::NullDoubleWriter& writer, Val* val, bool onl BuildJSON(key_writer, entry_key, only_loggable, re); string key_str = buffer.GetString(); - if ( key_str.length() >= 2 && key_str[0] == '"' && key_str[key_str.length() - 1] == '"' ) - // Strip quotes. + // Strip the quotes for any type we render as a string. This + // makes the JSON object's keys look more natural, yielding + // '{ "foo": ... }', not '{ "\"foo\"": ... }', for such types. + if ( UsesJSONStringType(entry_key->GetType()) ) key_str = key_str.substr(1, key_str.length() - 2); BuildJSON(writer, entry->GetVal().get(), only_loggable, re, key_str); @@ -924,28 +945,50 @@ static std::variant BuildVal(const rapidjson::Value& j, con } case TYPE_PORT: { - if ( ! j.IsString() ) - return mismatch_err(); - - int port = 0; - if ( j.GetStringLength() > 0 && j.GetStringLength() < 10 ) { - char* slash; - errno = 0; - port = strtol(j.GetString(), &slash, 10); - if ( ! errno ) { - ++slash; - if ( util::streq(slash, "tcp") ) - return val_mgr->Port(port, TRANSPORT_TCP); - else if ( util::streq(slash, "udp") ) - return val_mgr->Port(port, TRANSPORT_UDP); - else if ( util::streq(slash, "icmp") ) - return val_mgr->Port(port, TRANSPORT_ICMP); - else if ( util::streq(slash, "unknown") ) - return val_mgr->Port(port, TRANSPORT_UNKNOWN); + if ( j.IsString() ) { + int port = 0; + if ( j.GetStringLength() > 0 && j.GetStringLength() < 10 ) { + char* slash; + errno = 0; + port = strtol(j.GetString(), &slash, 10); + if ( ! errno ) { + ++slash; + if ( util::streq(slash, "tcp") ) + return val_mgr->Port(port, TRANSPORT_TCP); + else if ( util::streq(slash, "udp") ) + return val_mgr->Port(port, TRANSPORT_UDP); + else if ( util::streq(slash, "icmp") ) + return val_mgr->Port(port, TRANSPORT_ICMP); + else if ( util::streq(slash, "unknown") ) + return val_mgr->Port(port, TRANSPORT_UNKNOWN); + } } - } - return "wrong port format, must be /[0-9]{1,5}\\/(tcp|udp|icmp|unknown)/"; + return "wrong port format, string must be /[0-9]{1,5}\\/(tcp|udp|icmp|unknown)/"; + } + else if ( j.IsObject() ) { + if ( ! j.HasMember("port") || ! j.HasMember("proto") ) + return "wrong port format, object must have 'port' and 'proto' members"; + if ( ! j["port"].IsNumber() ) + return "wrong port format, port must be a number"; + if ( ! j["proto"].IsString() ) + return "wrong port format, protocol must be a string"; + + std::string proto{j["proto"].GetString()}; + + if ( proto == "tcp" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_TCP); + if ( proto == "udp" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_UDP); + if ( proto == "icmp" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_ICMP); + if ( proto == "unknown" ) + return val_mgr->Port(j["port"].GetInt(), TRANSPORT_UNKNOWN); + + return "wrong port format, invalid protocol string"; + } + else + return "wrong port format, must be string or object"; } case TYPE_PATTERN: { @@ -953,11 +996,17 @@ static std::variant BuildVal(const rapidjson::Value& j, con return mismatch_err(); std::string candidate(j.GetString(), j.GetStringLength()); + // Remove any surrounding '/'s, not needed when creating an RE_matcher. if ( candidate.size() > 2 && candidate.front() == candidate.back() && candidate.back() == '/' ) { - // Remove the '/'s candidate.erase(0, 1); candidate.erase(candidate.size() - 1); } + // Remove any surrounding "^?(" and ")$?", automatically added below. + if ( candidate.size() > 6 && candidate.substr(0, 3) == "^?(" && + candidate.substr(candidate.size() - 3, 3) == ")$?" ) { + candidate.erase(0, 3); + candidate.erase(candidate.size() - 3); + } auto re = std::make_unique(candidate.c_str()); if ( ! re->Compile() ) @@ -1023,34 +1072,69 @@ static std::variant BuildVal(const rapidjson::Value& j, con } case TYPE_TABLE: { - if ( ! j.IsArray() ) - return mismatch_err(); - - if ( ! t->IsSet() ) - return util::fmt("tables are not supported"); - - auto tt = t->AsSetType(); - auto tl = tt->GetIndices(); + auto tt = t->AsTableType(); // The table vs set type does not matter below auto tv = make_intrusive(IntrusivePtr{NewRef{}, tt}); + auto tl = tt->GetIndices(); - for ( const auto& item : j.GetArray() ) { - std::variant v; + if ( t->IsSet() ) { + if ( ! j.IsArray() ) + return mismatch_err(); - if ( tl->GetTypes().size() == 1 ) - v = BuildVal(item, tl->GetPureType(), key_func); - else - v = BuildVal(item, tl, key_func); + for ( const auto& item : j.GetArray() ) { + std::variant v; - if ( ! get_if(&v) ) - return v; + if ( tl->GetTypes().size() == 1 ) + v = BuildVal(item, tl->GetPureType(), key_func); + else + v = BuildVal(item, tl, key_func); - if ( ! std::get(v) ) - continue; + if ( ! get_if(&v) ) + return v; + if ( ! std::get(v) ) + continue; - tv->Assign(std::move(std::get(v)), nullptr); + tv->Assign(std::move(std::get(v)), nullptr); + } + + return tv; } + else { + if ( ! j.IsObject() ) + return mismatch_err(); - return tv; + for ( auto it = j.MemberBegin(); it != j.MemberEnd(); ++it ) { + rapidjson::Document idxstr; + idxstr.Parse(it->name.GetString(), it->name.GetStringLength()); + + std::variant idx; + + if ( tl->GetTypes().size() > 1 ) + idx = BuildVal(idxstr, tl, key_func); + else if ( UsesJSONStringType(tl->GetPureType()) ) + // Parse this with the quotes the string came with. This + // mirrors the quote-stripping in BuildJSON(). + idx = BuildVal(it->name, tl->GetPureType(), key_func); + else + // Parse the string's content, not the full JSON string. + idx = BuildVal(idxstr, tl->GetPureType(), key_func); + + if ( ! get_if(&idx) ) + return idx; + if ( ! std::get(idx) ) + continue; + + auto v = BuildVal(it->value, tt->Yield(), key_func); + + if ( ! get_if(&v) ) + return v; + if ( ! std::get(v) ) + continue; + + tv->Assign(std::move(std::get(idx)), std::move(std::get(v))); + } + + return tv; + } } case TYPE_RECORD: { diff --git a/src/Val.h b/src/Val.h index c8392bbcfd..a74f68f8d6 100644 --- a/src/Val.h +++ b/src/Val.h @@ -236,6 +236,20 @@ public: TableValPtr GetRecordFields(); + /** + * Renders the Val into JSON string representation. For record values + * contained anywhere in the Val, two arguments control the JSON result + * (they have no effect on other types): + * + * @param only_loggable If true, skips any fields that don't have the &log + * attribute. + * + * @param re The regular expression matcher, if given, is used to strip the + * first match on any record field name in the resulting output. See the + * to_json() BiF for context. + * + * @return JSON data representing the Val. + */ StringValPtr ToJSON(bool only_loggable = false, RE_Matcher* re = nullptr); template diff --git a/src/analyzer/protocol/http/HTTP.cc b/src/analyzer/protocol/http/HTTP.cc index 684abce01b..3d5bdc246f 100644 --- a/src/analyzer/protocol/http/HTTP.cc +++ b/src/analyzer/protocol/http/HTTP.cc @@ -1020,6 +1020,36 @@ void HTTP_Analyzer::Undelivered(uint64_t seq, int len, bool is_orig) { } } +void HTTP_Analyzer::FlipRoles() { + analyzer::tcp::TCP_ApplicationAnalyzer::FlipRoles(); + + // If FlipRoles() is invoked after we've upgraded to something, + // don't do anything. This shouldn't happen as flipping of TCP + // connections currently happens before any data is transferred, + // but better safe than sorry. + if ( upgraded || pia ) { + Weird("HTTP_late_flip_roles"); + return; + } + + // If we haven't upgraded but saw request or replies, just bail + // for the rest of this connection. Again, this should never happen + // right now, but raise a weird in case it starts to happen. + if ( num_requests > 0 || num_replies > 0 ) { + Weird("HTTP_late_flip_roles"); + SetSkip(true); + return; + } + + // IsOrig() of the support analyzer has been updated, but we still need + // to change the analyzer's local state and the partial skipping setting. + bool skip_partial_orig = content_line_orig->SkipPartial(); + bool skip_partial_resp = content_line_resp->SkipPartial(); + std::swap(content_line_orig, content_line_resp); + content_line_orig->SetSkipPartial(skip_partial_orig); + content_line_resp->SetSkipPartial(skip_partial_resp); +} + void HTTP_Analyzer::EndpointEOF(bool is_orig) { analyzer::tcp::TCP_ApplicationAnalyzer::EndpointEOF(is_orig); diff --git a/src/analyzer/protocol/http/HTTP.h b/src/analyzer/protocol/http/HTTP.h index 62f519201d..15feb9e313 100644 --- a/src/analyzer/protocol/http/HTTP.h +++ b/src/analyzer/protocol/http/HTTP.h @@ -167,6 +167,7 @@ public: void Done() override; void DeliverStream(int len, const u_char* data, bool orig) override; void Undelivered(uint64_t seq, int len, bool orig) override; + void FlipRoles() override; // Overridden from analyzer::tcp::TCP_ApplicationAnalyzer void EndpointEOF(bool is_orig) override; diff --git a/src/analyzer/protocol/ldap/ldap.evt b/src/analyzer/protocol/ldap/ldap.evt index 369a21d331..96baef6f98 100644 --- a/src/analyzer/protocol/ldap/ldap.evt +++ b/src/analyzer/protocol/ldap/ldap.evt @@ -14,9 +14,9 @@ import LDAP; on LDAP::Message -> event LDAP::message($conn, self.messageID, self.opcode, - self.result.code, - self.result.matchedDN, - self.result.diagnosticMessage, + self.result_.code, + self.result_.matchedDN, + self.result_.diagnosticMessage, self.obj, self.arg); diff --git a/src/analyzer/protocol/ldap/ldap.spicy b/src/analyzer/protocol/ldap/ldap.spicy index 7c60d1ec19..8d74b85237 100644 --- a/src/analyzer/protocol/ldap/ldap.spicy +++ b/src/analyzer/protocol/ldap/ldap.spicy @@ -223,7 +223,7 @@ public type Message = unit { var opcode: ProtocolOpcode = ProtocolOpcode::Undef; var applicationBytes: bytes; var unsetResultDefault: Result; - var result: Result& = self.unsetResultDefault; + var result_: Result& = self.unsetResultDefault; var obj: string = ""; var arg: string = ""; var success: bool = False; @@ -328,7 +328,7 @@ type BindRequest = unit(inout message: Message) { type BindResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } # TODO: if we want to parse SASL credentials returned @@ -777,7 +777,7 @@ type SearchResultEntry = unit(inout message: Message) { type SearchResultDone = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -800,7 +800,7 @@ type ModifyRequest = unit(inout message: Message) { type ModifyResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -816,7 +816,7 @@ type ModifyResponse = unit(inout message: Message) { type AddResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -832,7 +832,7 @@ type DelRequest = unit(inout message: Message) { type DelResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -847,7 +847,7 @@ type DelResponse = unit(inout message: Message) { type ModDNResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; @@ -862,7 +862,7 @@ type ModDNResponse = unit(inout message: Message) { type CompareResponse = unit(inout message: Message) { : Result { - message.result = $$; + message.result_ = $$; } }; diff --git a/src/analyzer/protocol/quic/QUIC.evt b/src/analyzer/protocol/quic/QUIC.evt index b81b0084db..a985acf519 100644 --- a/src/analyzer/protocol/quic/QUIC.evt +++ b/src/analyzer/protocol/quic/QUIC.evt @@ -19,6 +19,6 @@ on QUIC::HandshakePacket -> event QUIC::handshake_packet($conn, $is_orig, self.h on QUIC::ZeroRTTPacket -> event QUIC::zero_rtt_packet($conn, $is_orig, self.header.version, self.header.dest_conn_id, self.header.src_conn_id); on QUIC::ConnectionClosePayload -> event QUIC::connection_close_frame($conn, $is_orig, self.header.version, self.header.dest_conn_id, self.header.src_conn_id, - self.error_code.result, self.reason_phrase); + self.error_code.result_, self.reason_phrase); on QUIC::UnhandledVersion -> event QUIC::unhandled_version($conn, $is_orig, self.header.version, self.header.dest_conn_id, self.header.src_conn_id); diff --git a/src/analyzer/protocol/quic/QUIC.spicy b/src/analyzer/protocol/quic/QUIC.spicy index fb2f854a3a..bde1650ee2 100644 --- a/src/analyzer/protocol/quic/QUIC.spicy +++ b/src/analyzer/protocol/quic/QUIC.spicy @@ -157,7 +157,7 @@ type FrameType = enum { type VariableLengthInteger = unit { var bytes_to_parse: uint64; - var result: uint64; + var result_: uint64; # Value of the two most significant bits indicates number of bytes # to parse for the variable length integer. @@ -166,11 +166,11 @@ type VariableLengthInteger = unit { # Section 16 and Appendix A : uint8 { self.bytes_to_parse = 2**((0xC0 & $$) >> 6); - self.result = $$ & 0x3F; + self.result_ = $$ & 0x3F; } : uint8[self.bytes_to_parse - 1] if (self.bytes_to_parse > 1) foreach { - self.result = (self.result << 8) | $$; + self.result_ = (self.result_ << 8) | $$; } }; @@ -185,8 +185,8 @@ public type LongHeaderPacketV1 = unit(inout outer: LongHeaderPacket) { outer.encrypted_offset = outer.offset() + self.initial_hdr.length.bytes_to_parse + self.initial_hdr.token_length.bytes_to_parse + - self.initial_hdr.token_length.result; - outer.payload_length = self.initial_hdr.length.result; + self.initial_hdr.token_length.result_; + outer.payload_length = self.initial_hdr.length.result_; } LongPacketTypeV1::ZERO_RTT -> zerortt_hdr : ZeroRTTPacket(outer); @@ -204,8 +204,8 @@ public type LongHeaderPacketV2 = unit(inout outer: LongHeaderPacket) { outer.encrypted_offset = outer.offset() + self.initial_hdr.length.bytes_to_parse + self.initial_hdr.token_length.bytes_to_parse + - self.initial_hdr.token_length.result; - outer.payload_length = self.initial_hdr.length.result; + self.initial_hdr.token_length.result_; + outer.payload_length = self.initial_hdr.length.result_; } LongPacketTypeV2::ZERO_RTT -> zerortt_hdr : ZeroRTTPacket(outer); @@ -281,7 +281,7 @@ public type Frame = unit(header: LongHeaderPacket, from_client: bool, crypto_sin FrameType::ACK2 -> b: ACKPayload; FrameType::CRYPTO -> c: CRYPTOPayload(from_client) { # Have the sink re-assemble potentially out-of-order cryptodata - crypto_sink.write(self.c.cryptodata, self.c.offset.result); + crypto_sink.write(self.c.cryptodata, self.c.offset.result_); } FrameType::CONNECTION_CLOSE1 -> : ConnectionClosePayload(header); FrameType::PADDING -> : skip /\x00*/; # eat the padding @@ -295,7 +295,7 @@ public type Frame = unit(header: LongHeaderPacket, from_client: bool, crypto_sin type CRYPTOPayload = unit(from_client: bool) { offset: VariableLengthInteger; length: VariableLengthInteger; - cryptodata: bytes &size=self.length.result; + cryptodata: bytes &size=self.length.result_; }; type ACKPayload = unit { @@ -313,7 +313,7 @@ type ConnectionClosePayload = unit(header: LongHeaderPacket) { -> frame_type: VariableLengthInteger; }; reason_phrase_length: VariableLengthInteger; - reason_phrase: bytes &size=self.reason_phrase_length.result; + reason_phrase: bytes &size=self.reason_phrase_length.result_; }; @@ -326,7 +326,7 @@ type ConnectionClosePayload = unit(header: LongHeaderPacket) { type InitialPacket = unit(header: LongHeaderPacket) { var header: LongHeaderPacket = header; token_length: VariableLengthInteger; - token: bytes &size=self.token_length.result; + token: bytes &size=self.token_length.result_; # 5.4.2. Header Protection Sample # @@ -336,25 +336,25 @@ type InitialPacket = unit(header: LongHeaderPacket) { # # Enforce 4 bytes Packet Number length + 16 bytes sample # ciphertext available. - length: VariableLengthInteger &requires=self.length.result >= 20; + length: VariableLengthInteger &requires=self.length.result_ >= 20; # Consume the remainder of payload. This # includes the packet number field, but we # do not know its length yet. We need the # payload for sampling, however. - payload: skip bytes &size=self.length.result; + payload: skip bytes &size=self.length.result_; }; type ZeroRTTPacket = unit(header: LongHeaderPacket) { var header: LongHeaderPacket = header; length: VariableLengthInteger; - payload: skip bytes &size=self.length.result; + payload: skip bytes &size=self.length.result_; }; type HandshakePacket = unit(header: LongHeaderPacket) { var header: LongHeaderPacket = header; length: VariableLengthInteger; - payload: skip bytes &size=self.length.result; + payload: skip bytes &size=self.length.result_; }; diff --git a/src/analyzer/protocol/ssh/ssh-protocol.pac b/src/analyzer/protocol/ssh/ssh-protocol.pac index 3709238405..c152f29b93 100644 --- a/src/analyzer/protocol/ssh/ssh-protocol.pac +++ b/src/analyzer/protocol/ssh/ssh-protocol.pac @@ -37,7 +37,6 @@ type SSH_Key_Exchange(is_orig: bool) = record { key_ex: case $context.connection.get_version() of { SSH1 -> ssh1_msg : SSH1_Key_Exchange(is_orig, packet_length); SSH2 -> ssh2_msg : SSH2_Key_Exchange(is_orig, packet_length); - default -> terminate : bytestring &restofdata &transient; }; } &length = $context.flow.get_kex_length($context.connection.get_version(), packet_length); diff --git a/src/analyzer/protocol/tcp/ContentLine.h b/src/analyzer/protocol/tcp/ContentLine.h index 4d473a5f84..de069ebb62 100644 --- a/src/analyzer/protocol/tcp/ContentLine.h +++ b/src/analyzer/protocol/tcp/ContentLine.h @@ -22,6 +22,9 @@ public: // If enabled, flag (first) line with embedded NUL. Default off. void SetIsNULSensitive(bool enable) { flag_NULs = enable; } + // Returns true if skipping data above a hole. + bool SkipPartial() const { return skip_partial; } + // If enabled, skip data above a hole. Default off. void SetSkipPartial(bool enable) { skip_partial = enable; } diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc index fcb4a1e961..bbef07a922 100644 --- a/src/iosource/Manager.cc +++ b/src/iosource/Manager.cc @@ -76,6 +76,9 @@ Manager::~Manager() { pkt_dumpers.clear(); + // Was registered without lifetime management. + delete pkt_src; + #ifndef _MSC_VER // There's a bug here with builds on Windows that causes an assertion with debug builds // related to libkqueue returning a zero for the file descriptor. The assert happens @@ -104,24 +107,25 @@ void Manager::Wakeup(std::string_view where) { wakeup->Ping(where); } +void Manager::ReapSource(Source* src) { + auto* iosource = src->src; + assert(! iosource->IsOpen()); + + DBG_LOG(DBG_MAINLOOP, "Reaping %s", src->src->Tag()); + iosource->Done(); + + if ( src->manage_lifetime ) + delete iosource; + + if ( src->dont_count ) + dont_counts--; + + delete src; +} + void Manager::FindReadySources(ReadySources* ready) { ready->clear(); - // Remove sources which have gone dry. For simplicity, we only - // remove at most one each time. - for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) - if ( ! (*i)->src->IsOpen() ) { - (*i)->src->Done(); - delete *i; - sources.erase(i); - break; - } - - // If there aren't any sources and exit_only_after_terminate is false, just - // return an empty set of sources. We want the main loop to end. - if ( Size() == 0 && (! BifConst::exit_only_after_terminate || run_state::terminating) ) - return; - double timeout = -1; IOSource* timeout_src = nullptr; bool time_to_poll = false; @@ -133,7 +137,8 @@ void Manager::FindReadySources(ReadySources* ready) { } // Find the source with the next timeout value. - for ( auto src : sources ) { + for ( auto i = sources.begin(); i != sources.end(); /* noop */ ) { + auto* src = *i; auto iosource = src->src; if ( iosource->IsOpen() ) { double next = iosource->GetNextTimeout(); @@ -161,7 +166,19 @@ void Manager::FindReadySources(ReadySources* ready) { ready->push_back({pkt_src, -1, 0}); } } + ++i; } + else { + ReapSource(src); + i = sources.erase(i); + } + } + + // If there aren't any sources and exit_only_after_terminate is false, just + // return an empty set of sources. We want the main loop to end. + if ( Size() == 0 && (! BifConst::exit_only_after_terminate || run_state::terminating) ) { + ready->clear(); + return; } DBG_LOG(DBG_MAINLOOP, "timeout: %f ready size: %zu time_to_poll: %d\n", timeout, ready->size(), time_to_poll); @@ -342,7 +359,7 @@ void Manager::Register(IOSource* src, bool dont_count, bool manage_lifetime) { void Manager::Register(PktSrc* src) { pkt_src = src; - Register(src, false); + Register(src, false, false); // Once we know if the source is live or not, adapt the // poll_interval accordingly. diff --git a/src/iosource/Manager.h b/src/iosource/Manager.h index c533afb982..48f8814b2b 100644 --- a/src/iosource/Manager.h +++ b/src/iosource/Manager.h @@ -143,6 +143,15 @@ public: void Wakeup(std::string_view where); private: + /** + * Internal data structure for managing registered IOSources. + */ + struct Source { + IOSource* src = nullptr; + bool dont_count = false; + bool manage_lifetime = false; + }; + /** * Calls the appropriate poll method to gather a set of IOSources that are * ready for processing. @@ -170,6 +179,19 @@ private: void RemoveAll(); + /** + * Reap a closed IO source. + * + * Reaping involves calling IOSource::Done() on the underlying IOSource, + * freeing it if Source.manage_lifetime is \c true, updating \c dont_counts + * and freeing \a src, making it invalid. + * + * The caller ensures \a src is removed from Manager.sources. + * + * @param src The source to reap. + */ + void ReapSource(Source* src); + class WakeupHandler final : public IOSource { public: WakeupHandler(); @@ -192,12 +214,6 @@ private: zeek::detail::Flare flare; }; - struct Source { - IOSource* src = nullptr; - bool dont_count = false; - bool manage_lifetime = false; - }; - using SourceList = std::vector; SourceList sources; diff --git a/src/reporter.bif b/src/reporter.bif index e1799bd163..3905d14936 100644 --- a/src/reporter.bif +++ b/src/reporter.bif @@ -43,8 +43,9 @@ function Reporter::warning%(msg: string%): bool return zeek::val_mgr->True(); %} -## Generates a non-fatal error indicative of a definite problem that should -## be addressed. Program execution does not terminate. +## Generates a usually non-fatal error indicative of a definite problem that +## should be addressed. Program execution does not terminate unless the error +## is reported during initialization (e.g., :zeek:see:`zeek_init`). ## ## msg: The error message to report. ## diff --git a/src/script_opt/CSE.cc b/src/script_opt/CSE.cc index 9830399ffe..6f122468af 100644 --- a/src/script_opt/CSE.cc +++ b/src/script_opt/CSE.cc @@ -133,6 +133,7 @@ TraversalCode CSE_ValidityChecker::PreExpr(const Expr* e) { case EXPR_RECORD_COERCE: case EXPR_RECORD_CONSTRUCTOR: + case EXPR_REC_CONSTRUCT_WITH_REC: // Note, record coercion behaves like constructors in terms of // potentially executing &default functions. In either case, // the type of the expression reflects the type we want to analyze diff --git a/src/script_opt/Expr.cc b/src/script_opt/Expr.cc index 8e3d1fcb9a..1ad24c5277 100644 --- a/src/script_opt/Expr.cc +++ b/src/script_opt/Expr.cc @@ -1783,7 +1783,7 @@ ExprPtr RecordConstructorExpr::Duplicate() { if ( map ) { auto rt = cast_intrusive(type); - return SetSucc(new RecordConstructorExpr(rt, op_l)); + return SetSucc(new RecordConstructorExpr(rt, op_l, false)); } else return SetSucc(new RecordConstructorExpr(op_l)); @@ -1807,6 +1807,11 @@ bool RecordConstructorExpr::HasReducedOps(Reducer* c) const { } ExprPtr RecordConstructorExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { + if ( ConstructFromRecordExpr::FindMostCommonRecordSource(op) ) { + auto cfr = with_location_of(make_intrusive(this), this); + return cfr->Reduce(c, red_stmt); + } + red_stmt = ReduceToSingletons(c); if ( c->Optimizing() ) @@ -2846,6 +2851,242 @@ void FieldLHSAssignExpr::ExprDescribe(ODesc* d) const { op2->Describe(d); } +// Helper functions. +// This first one mines out of a given statement in an assignment chain the +// variable that occurs as a LHS target, so 'x' for "x$foo = y$bar". +static NameExprPtr get_RFU_LHS_var(const Stmt* s) { + auto s_e = s->AsExprStmt()->StmtExpr(); + auto var = s_e->GetOp1()->GetOp1()->GetOp1(); + ASSERT(var->Tag() == EXPR_NAME); + return cast_intrusive(std::move(var)); +} + +// This one mines out the RHS, so 'y' for "x$foo = y$bar", or for +// "x$foo = x$foo + y$bar" (which is what "x$foo += y$bar" is at this point). +static NameExprPtr get_RFU_RHS_var(const Stmt* s) { + auto s_e = s->AsExprStmt()->StmtExpr(); + auto rhs = s_e->GetOp2(); + + ExprPtr var; + if ( rhs->Tag() == EXPR_FIELD ) + var = rhs->GetOp1(); + else + var = rhs->GetOp2()->GetOp1(); + + ASSERT(var->Tag() == EXPR_NAME); + return cast_intrusive(std::move(var)); +} + +RecordFieldUpdatesExpr::RecordFieldUpdatesExpr(ExprTag t, const std::vector& stmts, + std::set& stmt_pool) + : BinaryExpr(t, get_RFU_LHS_var(stmts[0]), get_RFU_RHS_var(stmts[0])) { + // Build up the LHS map (record fields we're assigning/adding) and RHS map + // (record fields from which we're assigning). + for ( auto s : stmts ) { + auto s_e = s->AsExprStmt()->StmtExpr(); + auto lhs = s_e->GetOp1()->GetOp1(); + auto lhs_field = lhs->AsFieldExpr()->Field(); + + auto rhs = s_e->GetOp2(); + if ( rhs->Tag() != EXPR_FIELD ) + // It's "x$foo = x$foo + y$bar". + rhs = rhs->GetOp2(); + + auto rhs_field = rhs->AsFieldExpr()->Field(); + + lhs_map.push_back(lhs_field); + rhs_map.push_back(rhs_field); + + // Consistency check that the statement is indeed in the pool, + // before we remove it. + ASSERT(stmt_pool.count(s) > 0); + stmt_pool.erase(s); + } +} + +RecordFieldUpdatesExpr::RecordFieldUpdatesExpr(ExprTag t, ExprPtr e1, ExprPtr e2, std::vector _lhs_map, + std::vector _rhs_map) + : BinaryExpr(t, std::move(e1), std::move(e2)) { + lhs_map = std::move(_lhs_map); + rhs_map = std::move(_rhs_map); +} + +ValPtr RecordFieldUpdatesExpr::Fold(Val* v1, Val* v2) const { + auto rv1 = v1->AsRecordVal(); + auto rv2 = v2->AsRecordVal(); + + for ( size_t i = 0; i < lhs_map.size(); ++i ) + FoldField(rv1, rv2, i); + + return nullptr; +} + +bool RecordFieldUpdatesExpr::IsReduced(Reducer* c) const { return HasReducedOps(c); } + +void RecordFieldUpdatesExpr::ExprDescribe(ODesc* d) const { + op1->Describe(d); + d->Add(expr_name(tag)); + op2->Describe(d); +} + +ExprPtr RecordFieldUpdatesExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { + if ( c->Optimizing() ) { + op1 = c->UpdateExpr(op1); + op2 = c->UpdateExpr(op2); + } + + red_stmt = nullptr; + + if ( ! op1->IsSingleton(c) ) + op1 = op1->ReduceToSingleton(c, red_stmt); + + StmtPtr red2_stmt; + if ( ! op2->IsSingleton(c) ) + op2 = op2->ReduceToSingleton(c, red2_stmt); + + red_stmt = MergeStmts(red_stmt, std::move(red2_stmt)); + + return ThisPtr(); +} + +ExprPtr AssignRecordFieldsExpr::Duplicate() { + auto e1 = op1->Duplicate(); + auto e2 = op2->Duplicate(); + return SetSucc(new AssignRecordFieldsExpr(std::move(e1), std::move(e2), lhs_map, rhs_map)); +} + +void AssignRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const { + rv1->Assign(lhs_map[i], rv2->GetField(rhs_map[i])); +} + +ConstructFromRecordExpr::ConstructFromRecordExpr(const RecordConstructorExpr* orig) + : AssignRecordFieldsExpr(nullptr, nullptr, {}, {}) { + tag = EXPR_REC_CONSTRUCT_WITH_REC; + SetType(orig->GetType()); + + // Arguments used in original and final constructor. + auto& orig_args = orig->Op()->Exprs(); + // The one we'll build up below: + auto args = with_location_of(make_intrusive(), orig); + + auto src_id = FindMostCommonRecordSource(orig->Op()); + auto& map = orig->Map(); + + for ( size_t i = 0; i < orig_args.size(); ++i ) { + auto e = orig_args[i]; + auto src = FindRecordSource(e); + if ( src && src->GetOp1()->AsNameExpr()->IdPtr() == src_id ) { + // "map" might be nil if we're optimize [$x = foo$bar]. + lhs_map.push_back(map ? (*map)[i] : i); + rhs_map.push_back(src->Field()); + } + else + args->Append({NewRef{}, e}); + } + + auto rt = cast_intrusive(orig->GetType()); + op1 = with_location_of(make_intrusive(std::move(rt), std::move(args), false), orig); + op2 = with_location_of(make_intrusive(std::move(src_id)), orig); +} + +IDPtr ConstructFromRecordExpr::FindMostCommonRecordSource(const ListExprPtr& exprs) { + // Maps identifiers to how often they appear in the constructor's + // arguments as a field reference. Used to find the most common. + std::unordered_map id_cnt; + + for ( auto e : exprs->Exprs() ) { + auto src = FindRecordSource(e); + if ( src ) { + auto id = src->GetOp1()->AsNameExpr()->IdPtr(); + ++id_cnt[id]; + } + } + + if ( id_cnt.empty() ) + return nullptr; + + // Return the most common. + auto max_entry = std::max_element(id_cnt.begin(), id_cnt.end(), + [](const std::pair& p1, const std::pair& p2) { + return p1.second < p2.second; + }); + return max_entry->first; +} + +FieldExprPtr ConstructFromRecordExpr::FindRecordSource(const Expr* const_e) { + // The following cast just saves us from having to define a "const" version + // of AsFieldAssignExprPtr(). + auto e = const_cast(const_e); + const auto fa = e->AsFieldAssignExprPtr(); + auto fa_rhs = e->GetOp1(); + + if ( fa_rhs->Tag() != EXPR_FIELD ) + return nullptr; + + auto rhs_rec = fa_rhs->GetOp1(); + if ( rhs_rec->Tag() != EXPR_NAME ) + return nullptr; + + return cast_intrusive(std::move(fa_rhs)); +} + +ExprPtr ConstructFromRecordExpr::Duplicate() { + auto e1 = op1->Duplicate(); + auto e2 = op2->Duplicate(); + return SetSucc(new ConstructFromRecordExpr(std::move(e1), std::move(e2), lhs_map, rhs_map)); +} + +bool ConstructFromRecordExpr::IsReduced(Reducer* c) const { return op1->HasReducedOps(c) && op2->IsReduced(c); } + +bool ConstructFromRecordExpr::HasReducedOps(Reducer* c) const { return IsReduced(c); } + +ExprPtr ConstructFromRecordExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { + if ( c->Optimizing() ) { + op1 = c->UpdateExpr(op1); + op2 = c->UpdateExpr(op2); + } + + red_stmt = nullptr; + + if ( ! op1->HasReducedOps(c) ) + red_stmt = op1->ReduceToSingletons(c); + + StmtPtr red2_stmt; + if ( ! op2->IsSingleton(c) ) + op2 = op2->ReduceToSingleton(c, red2_stmt); + + red_stmt = MergeStmts(red_stmt, std::move(red2_stmt)); + + if ( c->Optimizing() ) + return ThisPtr(); + else + return AssignToTemporary(c, red_stmt); +} + +ExprPtr AddRecordFieldsExpr::Duplicate() { + auto e1 = op1->Duplicate(); + auto e2 = op2->Duplicate(); + return SetSucc(new AddRecordFieldsExpr(std::move(e1), std::move(e2), lhs_map, rhs_map)); +} + +void AddRecordFieldsExpr::FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const { + // The goal here is correctness, not efficiency, since normally this + // expression only exists temporarily before being compiled to ZAM. + // Doing it this way saves us from having to switch on the type of the '+' + // operands. + auto lhs_val = rv1->GetField(lhs_map[i]); + auto rhs_val = rv2->GetField(rhs_map[i]); + + auto lhs_const = make_intrusive(lhs_val); + auto rhs_const = make_intrusive(rhs_val); + + auto add_expr = make_intrusive(lhs_const, rhs_const); + auto sum = add_expr->Eval(nullptr); + ASSERT(sum); + + rv1->Assign(lhs_map[i], std::move(sum)); +} + CoerceToAnyExpr::CoerceToAnyExpr(ExprPtr arg_op) : UnaryExpr(EXPR_TO_ANY_COERCE, std::move(arg_op)) { type = base_type(TYPE_ANY); } diff --git a/src/script_opt/Expr.h b/src/script_opt/Expr.h index 3d7bc382c0..f1b8427319 100644 --- a/src/script_opt/Expr.h +++ b/src/script_opt/Expr.h @@ -104,6 +104,100 @@ protected: int field; }; +// Base class for updating a number of record fields from fields in +// another record. +class RecordFieldUpdatesExpr : public BinaryExpr { +public: + const auto& LHSMap() const { return lhs_map; } + const auto& RHSMap() const { return rhs_map; } + + // Only needed if we're transforming-but-not-compiling. + ValPtr Fold(Val* v1, Val* v2) const override; + + bool IsPure() const override { return false; } + bool IsReduced(Reducer* c) const override; + ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override; + +protected: + RecordFieldUpdatesExpr(ExprTag t, const std::vector& stmts, std::set& stmt_pool); + RecordFieldUpdatesExpr(ExprTag t, ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map); + + // Apply the operation for the given index 'i' from rv2 to rv1. + // Does not return a value since we're modifying rv1 in-place. + virtual void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const = 0; + + void ExprDescribe(ODesc* d) const override; + + std::vector lhs_map; + std::vector rhs_map; +}; + +// Assign a bunch of record fields en masse from fields in another record. +class AssignRecordFieldsExpr : public RecordFieldUpdatesExpr { +public: + AssignRecordFieldsExpr(const std::vector& stmts, std::set& stmt_pool) + : RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, stmts, stmt_pool) {} + + ExprPtr Duplicate() override; + +protected: + // Used for duplicating. + AssignRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) + : RecordFieldUpdatesExpr(EXPR_REC_ASSIGN_FIELDS, std::move(e1), std::move(e2), std::move(_lhs_map), + std::move(_rhs_map)) {} + + void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override; +}; + +// Construct a record with some of the fields taken directly from another +// record. After full construction, the first operand is the base constructor +// (a subset of the original) and the second is the source record being used +// for some of the initialization. +using FieldExprPtr = IntrusivePtr; +class ConstructFromRecordExpr : public AssignRecordFieldsExpr { +public: + ConstructFromRecordExpr(const RecordConstructorExpr* orig); + + // Helper function that finds the most common source value. + // Returns its identifier, or nil if there is no "$field = x$y" + // to leverage. + static IDPtr FindMostCommonRecordSource(const ListExprPtr& exprs); + + ExprPtr Duplicate() override; + + bool IsReduced(Reducer* c) const override; + bool HasReducedOps(Reducer* c) const override; + ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override; + +protected: + ConstructFromRecordExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) + : AssignRecordFieldsExpr(std::move(e1), std::move(e2), std::move(_lhs_map), std::move(_rhs_map)) { + tag = EXPR_REC_CONSTRUCT_WITH_REC; + } + + // Helper function that for a given "$field = x$y" returns the + // "x$y" node, or nil if that's not the nature of the expression. + static FieldExprPtr FindRecordSource(const Expr* e); +}; + +// Add en masse fields from one record to fields in another record. +// We could add additional such expressions for other common operations +// like "x$foo -= y$bar", but in practice these are quite rare. +class AddRecordFieldsExpr : public RecordFieldUpdatesExpr { +public: + AddRecordFieldsExpr(const std::vector& stmts, std::set& stmt_pool) + : RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, stmts, stmt_pool) {} + + ExprPtr Duplicate() override; + +protected: + AddRecordFieldsExpr(ExprPtr e1, ExprPtr e2, std::vector _lhs_map, std::vector _rhs_map) + : RecordFieldUpdatesExpr(EXPR_REC_ADD_FIELDS, std::move(e1), std::move(e2), std::move(_lhs_map), + std::move(_rhs_map)) {} + + void FoldField(RecordVal* rv1, RecordVal* rv2, size_t i) const override; +}; + // ... and for conversion from a "vector of any" type. class CoerceFromAnyVecExpr : public UnaryExpr { public: diff --git a/src/script_opt/FuncInfo.cc b/src/script_opt/FuncInfo.cc index fd39c4779b..8b6314dd4d 100644 --- a/src/script_opt/FuncInfo.cc +++ b/src/script_opt/FuncInfo.cc @@ -142,7 +142,6 @@ static std::unordered_map func_attrs = { {"Spicy::__toggle_analyzer", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"Supervisor::__create", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"Supervisor::__destroy", ATTR_NO_SCRIPT_SIDE_EFFECTS}, - {"Supervisor::__init_cluster", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"Supervisor::__is_supervised", ATTR_IDEMPOTENT}, {"Supervisor::__is_supervisor", ATTR_IDEMPOTENT}, {"Supervisor::__node", ATTR_IDEMPOTENT}, @@ -344,6 +343,7 @@ static std::unordered_map func_attrs = { {"lookup_addr", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_autonomous_system", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_connection", ATTR_NO_ZEEK_SIDE_EFFECTS}, + {"lookup_connection_analyzer_id", ATTR_NO_ZEEK_SIDE_EFFECTS}, {"lookup_hostname", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_hostname_txt", ATTR_NO_SCRIPT_SIDE_EFFECTS}, {"lookup_location", ATTR_NO_SCRIPT_SIDE_EFFECTS}, diff --git a/src/script_opt/ProfileFunc.cc b/src/script_opt/ProfileFunc.cc index b10fbda940..a62e436774 100644 --- a/src/script_opt/ProfileFunc.cc +++ b/src/script_opt/ProfileFunc.cc @@ -442,7 +442,8 @@ TraversalCode ProfileFunc::PreExpr(const Expr* e) { return TC_ABORTSTMT; } - case EXPR_RECORD_CONSTRUCTOR: CheckRecordConstructor(e->GetType()); break; + case EXPR_RECORD_CONSTRUCTOR: + case EXPR_REC_CONSTRUCT_WITH_REC: CheckRecordConstructor(e->GetType()); break; case EXPR_SET_CONSTRUCTOR: { auto sc = static_cast(e); diff --git a/src/script_opt/Reduce.cc b/src/script_opt/Reduce.cc index fbf5bc7eb1..344f8ec07b 100644 --- a/src/script_opt/Reduce.cc +++ b/src/script_opt/Reduce.cc @@ -57,7 +57,7 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) { return def_1 == def_2 && def_1 != NO_DEF; } - else if ( op1->Tag() == EXPR_CONST ) { + if ( op1->Tag() == EXPR_CONST ) { auto op1_c = op1->AsConstExpr(); auto op2_c = op2->AsConstExpr(); @@ -67,7 +67,7 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) { return same_val(op1_v, op2_v); } - else if ( op1->Tag() == EXPR_LIST ) { + if ( op1->Tag() == EXPR_LIST ) { auto op1_l = op1->AsListExpr()->Exprs(); auto op2_l = op2->AsListExpr()->Exprs(); @@ -81,8 +81,22 @@ static bool same_op(const Expr* op1, const Expr* op2, bool check_defs) { return true; } - reporter->InternalError("bad singleton tag"); - return false; + // We only get here if dealing with non-reduced operands. + auto subop1_1 = op1->GetOp1(); + auto subop1_2 = op2->GetOp1(); + ASSERT(subop1_1 && subop1_2); + + if ( ! same_expr(subop1_1, subop1_2) ) + return false; + + auto subop2_1 = op1->GetOp2(); + auto subop2_2 = op2->GetOp2(); + if ( subop2_1 && ! same_expr(subop2_1, subop2_2) ) + return false; + + auto subop3_1 = op1->GetOp3(); + auto subop3_2 = op2->GetOp3(); + return ! subop3_1 || same_expr(subop3_1, subop3_2); } static bool same_op(const ExprPtr& op1, const ExprPtr& op2, bool check_defs) { @@ -107,6 +121,7 @@ static bool same_expr(const Expr* e1, const Expr* e2, bool check_defs) { case EXPR_CLONE: case EXPR_RECORD_CONSTRUCTOR: + case EXPR_REC_CONSTRUCT_WITH_REC: case EXPR_TABLE_CONSTRUCTOR: case EXPR_SET_CONSTRUCTOR: case EXPR_VECTOR_CONSTRUCTOR: @@ -384,7 +399,7 @@ NameExprPtr Reducer::GetRetVar(TypePtr type) { return nullptr; IDPtr ret_id = install_ID("@retvar", "", false, false); - ret_id->SetType(type); + ret_id->SetType(std::move(type)); ret_id->GetOptInfo()->SetTemp(); ret_vars.insert(ret_id.get()); @@ -473,7 +488,8 @@ bool Reducer::ExprValid(const ID* id, const Expr* e1, const Expr* e2) const { has_side_effects = true; } - else if ( e1->Tag() == EXPR_RECORD_CONSTRUCTOR || e1->Tag() == EXPR_RECORD_COERCE ) + else if ( e1->Tag() == EXPR_RECORD_CONSTRUCTOR || e1->Tag() == EXPR_REC_CONSTRUCT_WITH_REC || + e1->Tag() == EXPR_RECORD_COERCE ) has_side_effects = pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, e1->GetType()); e1_se = ExprSideEffects(has_side_effects); diff --git a/src/script_opt/ScriptOpt.cc b/src/script_opt/ScriptOpt.cc index 08d156b692..c41e6cf429 100644 --- a/src/script_opt/ScriptOpt.cc +++ b/src/script_opt/ScriptOpt.cc @@ -592,7 +592,7 @@ void analyze_scripts(bool no_unused_warnings) { func.SetShouldNotAnalyze(); if ( ! have_one_to_do ) - reporter->FatalError("no matching functions/files for C++ compilation"); + reporter->FatalError("no matching functions/files for script optimization"); if ( CPP_init_hook ) { (*CPP_init_hook)(); diff --git a/src/script_opt/Stmt.cc b/src/script_opt/Stmt.cc index 6a237a07be..618d52ebda 100644 --- a/src/script_opt/Stmt.cc +++ b/src/script_opt/Stmt.cc @@ -143,8 +143,13 @@ StmtPtr ExprStmt::DoReduce(Reducer* c) { // it has a non-void type it'll generate an // assignment to a temporary. red_e_stmt = e->ReduceToSingletons(c); - else + else { e = e->Reduce(c, red_e_stmt); + // It's possible that 'e' has gone away because it was a call + // to an inlined function that doesn't have a return value. + if ( ! e ) + return red_e_stmt; + } if ( red_e_stmt ) { auto s = make_intrusive(red_e_stmt, ThisPtr()); @@ -735,11 +740,203 @@ StmtPtr StmtList::DoReduce(Reducer* c) { return ThisPtr(); } +static unsigned int find_rec_assignment_chain(const std::vector& stmts, unsigned int i) { + const NameExpr* targ_rec = nullptr; + std::set fields_seen; + + for ( ; i < stmts.size(); ++i ) { + const auto& s = stmts[i]; + + // We're looking for either "x$a = y$b" or "x$a = x$a + y$b". + if ( s->Tag() != STMT_EXPR ) + // No way it's an assignment. + return i; + + auto se = s->AsExprStmt()->StmtExpr(); + if ( se->Tag() != EXPR_ASSIGN ) + return i; + + // The LHS of an assignment starts with a RefExpr. + auto lhs_ref = se->GetOp1(); + ASSERT(lhs_ref->Tag() == EXPR_REF); + + auto lhs = lhs_ref->GetOp1(); + if ( lhs->Tag() != EXPR_FIELD ) + // Not of the form "x$a = ...". + return i; + + auto lhs_field = lhs->AsFieldExpr()->Field(); + if ( fields_seen.count(lhs_field) > 0 ) + // Earlier in this chain we've already seen "x$a", so end the + // chain at this repeated use because it's no longer a simple + // block of field assignments. + return i; + + fields_seen.insert(lhs_field); + + auto lhs_rec = lhs->GetOp1(); + if ( lhs_rec->Tag() != EXPR_NAME ) + // Not a simple field reference, e.g. "x$y$a". + return i; + + auto lhs_rec_n = lhs_rec->AsNameExpr(); + + if ( targ_rec ) { + if ( lhs_rec_n->Id() != targ_rec->Id() ) + // It's no longer "x$..." but some new variable "z$...". + return i; + } + else + targ_rec = lhs_rec_n; + } + + return i; +} + +using OpChain = std::map>; + +static void update_assignment_chains(const StmtPtr& s, OpChain& assign_chains, OpChain& add_chains) { + auto se = s->AsExprStmt()->StmtExpr(); + ASSERT(se->Tag() == EXPR_ASSIGN); + + // The first GetOp1() here accesses the EXPR_ASSIGN's first operand, + // which is a RefExpr; the second gets its operand, which we've guaranteed + // in find_rec_assignment_chain is a FieldExpr. + auto lhs_fe = se->GetOp1()->GetOp1()->AsFieldExpr(); + auto lhs_id = lhs_fe->GetOp1()->AsNameExpr()->Id(); + auto rhs = se->GetOp2(); + const FieldExpr* f; + OpChain* c; + + // Check whether RHS is either "y$b" or "x$a + y$b". + + if ( rhs->Tag() == EXPR_ADD ) { + auto rhs_op1 = rhs->GetOp1(); // need to see that it's "x$a" + + if ( rhs_op1->Tag() != EXPR_FIELD ) + return; + + auto rhs1_fe = rhs_op1->AsFieldExpr(); + auto rhs_op1_rec = rhs1_fe->GetOp1(); + if ( rhs_op1_rec->Tag() != EXPR_NAME || rhs_op1_rec->AsNameExpr()->Id() != lhs_id || + rhs1_fe->Field() != lhs_fe->Field() ) + return; + + auto rhs_op2 = rhs->GetOp2(); // need to see that it's "y$b" + if ( rhs_op2->Tag() != EXPR_FIELD ) + return; + + if ( ! IsArithmetic(rhs_op2->GetType()->Tag()) ) + // Avoid esoteric forms of adding. + return; + + f = rhs_op2->AsFieldExpr(); + c = &add_chains; + } + + else if ( rhs->Tag() == EXPR_FIELD ) { + f = rhs->AsFieldExpr(); + c = &assign_chains; + } + + else + // Not a RHS we know how to leverage. + return; + + auto f_rec = f->GetOp1(); + if ( f_rec->Tag() != EXPR_NAME ) + // Not a simple RHS, instead something like "y$z$b". + return; + + // If we get here, it's a keeper, record the associated statement. + auto id = f_rec->AsNameExpr()->Id(); + (*c)[id].push_back(s.get()); +} + +static StmtPtr transform_chain(const OpChain& c, ExprTag t, std::set& chain_stmts) { + IntrusivePtr sl; + + for ( auto& id_stmts : c ) { + auto orig_s = id_stmts.second; + + if ( ! sl ) + // Now that we have a statement, create our list and associate + // its location with the statement. + sl = with_location_of(make_intrusive(), orig_s[0]); + + ExprPtr e; + if ( t == EXPR_ASSIGN ) + e = make_intrusive(orig_s, chain_stmts); + else if ( t == EXPR_ADD ) + e = make_intrusive(orig_s, chain_stmts); + else + reporter->InternalError("inconsistency transforming assignment chain"); + + e->SetLocationInfo(sl->GetLocationInfo()); + auto es = with_location_of(make_intrusive(std::move(e)), sl); + sl->Stmts().emplace_back(std::move(es)); + } + + return sl; +} + +static bool simplify_chain(const std::vector& stmts, unsigned int start, unsigned int end, + std::vector& f_stmts) { + OpChain assign_chains; + OpChain add_chains; + std::set chain_stmts; + + for ( auto i = start; i <= end; ++i ) { + auto& s = stmts[i]; + chain_stmts.insert(s.get()); + update_assignment_chains(s, assign_chains, add_chains); + } + + // An add-chain of any size is a win. For an assign-chain to be a win, + // it needs to have at least two elements, because a single "x$a = y$b" + // can be expressed using one ZAM instructino (but "x$a += y$b" cannot). + if ( add_chains.empty() ) { + bool have_useful_assign_chain = false; + for ( auto& ac : assign_chains ) + if ( ac.second.size() > 1 ) { + have_useful_assign_chain = true; + break; + } + + if ( ! have_useful_assign_chain ) + // No gains available. + return false; + } + + auto as_c = transform_chain(assign_chains, EXPR_ASSIGN, chain_stmts); + auto ad_c = transform_chain(add_chains, EXPR_ADD, chain_stmts); + + ASSERT(as_c || ad_c); + + if ( as_c ) + f_stmts.push_back(as_c); + if ( ad_c ) + f_stmts.push_back(ad_c); + + // At this point, chain_stmts has only the remainders that weren't removed. + for ( auto s : stmts ) + if ( chain_stmts.count(s.get()) > 0 ) + f_stmts.push_back(s); + + return true; +} + bool StmtList::ReduceStmt(unsigned int& s_i, std::vector& f_stmts, Reducer* c) { bool did_change = false; auto& stmt_i = stmts[s_i]; auto old_stmt = stmt_i; + auto chain_end = find_rec_assignment_chain(stmts, s_i); + if ( chain_end > s_i && simplify_chain(stmts, s_i, chain_end - 1, f_stmts) ) { + s_i = chain_end - 1; + return true; + } + auto stmt = stmt_i->Reduce(c); if ( stmt != old_stmt ) diff --git a/src/script_opt/UseDefs.cc b/src/script_opt/UseDefs.cc index 2ff5013c90..e0edc9a85b 100644 --- a/src/script_opt/UseDefs.cc +++ b/src/script_opt/UseDefs.cc @@ -464,6 +464,13 @@ UDs UseDefs::ExprUDs(const Expr* e) { break; } + case EXPR_REC_CONSTRUCT_WITH_REC: { + auto constructor_UDs = ExprUDs(e->GetOp1().get()); + AddInExprUDs(uds, e->GetOp2().get()); + uds = UD_Union(uds, constructor_UDs); + break; + } + case EXPR_TABLE_CONSTRUCTOR: { auto t = static_cast(e); AddInExprUDs(uds, t->GetOp1().get()); diff --git a/src/script_opt/ZAM/Compile.h b/src/script_opt/ZAM/Compile.h index a952f2b8f2..50d2c1ce1c 100644 --- a/src/script_opt/ZAM/Compile.h +++ b/src/script_opt/ZAM/Compile.h @@ -189,6 +189,7 @@ private: const ZAMStmt CompileAddToExpr(const AddToExpr* e); const ZAMStmt CompileRemoveFromExpr(const RemoveFromExpr* e); const ZAMStmt CompileAssignExpr(const AssignExpr* e); + const ZAMStmt CompileRecFieldUpdates(const RecordFieldUpdatesExpr* e); const ZAMStmt CompileZAMBuiltin(const NameExpr* lhs, const ScriptOptBuiltinExpr* zbi); const ZAMStmt CompileAssignToIndex(const NameExpr* lhs, const IndexExpr* rhs); const ZAMStmt CompileFieldLHSAssignExpr(const FieldLHSAssignExpr* e); @@ -244,7 +245,9 @@ private: const ZAMStmt ConstructTable(const NameExpr* n, const Expr* e); const ZAMStmt ConstructSet(const NameExpr* n, const Expr* e); - const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e); + const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e) { return ConstructRecord(n, e, false); } + const ZAMStmt ConstructRecordFromRecord(const NameExpr* n, const Expr* e) { return ConstructRecord(n, e, true); } + const ZAMStmt ConstructRecord(const NameExpr* n, const Expr* e, bool is_from_rec); const ZAMStmt ConstructVector(const NameExpr* n, const Expr* e); const ZAMStmt ArithCoerce(const NameExpr* n, const Expr* e); @@ -321,7 +324,7 @@ private: // Returns a handle to state associated with building // up a list of values. - OpaqueVals* BuildVals(const ListExprPtr&); + std::unique_ptr BuildVals(const ListExprPtr&); // "stride" is how many slots each element of l will consume. ZInstAux* InternalBuildVals(const ListExpr* l, int stride = 1); diff --git a/src/script_opt/ZAM/Expr.cc b/src/script_opt/ZAM/Expr.cc index 6c18c02318..4670033811 100644 --- a/src/script_opt/ZAM/Expr.cc +++ b/src/script_opt/ZAM/Expr.cc @@ -26,6 +26,9 @@ const ZAMStmt ZAMCompiler::CompileExpr(const Expr* e) { case EXPR_ASSIGN: return CompileAssignExpr(static_cast(e)); + case EXPR_REC_ASSIGN_FIELDS: + case EXPR_REC_ADD_FIELDS: return CompileRecFieldUpdates(static_cast(e)); + case EXPR_INDEX_ASSIGN: { auto iae = static_cast(e); auto t = iae->GetOp1()->GetType()->Tag(); @@ -101,7 +104,7 @@ const ZAMStmt ZAMCompiler::CompileAdd(const AggrAddExpr* e) { return AddStmt1VC(aggr, e1->AsConstExpr()); } - return AddStmtVO(aggr, BuildVals(indices)); + return AddStmtVO(aggr, BuildVals(indices).get()); } const ZAMStmt ZAMCompiler::CompileDel(const AggrDelExpr* e) { @@ -128,7 +131,7 @@ const ZAMStmt ZAMCompiler::CompileDel(const AggrDelExpr* e) { if ( index_list->Tag() != EXPR_LIST ) reporter->InternalError("non-list in \"delete\""); - auto internal_ind = std::unique_ptr(BuildVals(index_list->AsListExprPtr())); + auto internal_ind = BuildVals(index_list->AsListExprPtr()); return DelTableVO(aggr, internal_ind.get()); } @@ -281,6 +284,77 @@ const ZAMStmt ZAMCompiler::CompileAssignExpr(const AssignExpr* e) { #include "ZAM-GenExprsDefsV.h" } +const ZAMStmt ZAMCompiler::CompileRecFieldUpdates(const RecordFieldUpdatesExpr* e) { + auto rhs = e->GetOp2()->AsNameExpr(); + + auto& rhs_map = e->RHSMap(); + + auto aux = new ZInstAux(0); + aux->map = e->LHSMap(); + aux->rhs_map = rhs_map; + + // Used to track the different types present, so we can see whether + // we can use a homogeneous operator or need a mixed one. Won't be + // needed if we're doing assignments, but handy if we're doing adds. + std::set field_tags; + + size_t num_managed = 0; + + for ( auto i : rhs_map ) { + auto rt = rhs->GetType()->AsRecordType(); + auto rt_ft_i = rt->GetFieldType(i); + field_tags.insert(rt_ft_i->Tag()); + + if ( ZVal::IsManagedType(rt_ft_i) ) { + aux->is_managed.push_back(true); + ++num_managed; + } + else + // This will only be needed if is_managed winds up being true, + // but it's harmless to build it up in any case. + aux->is_managed.push_back(false); + + // The following is only needed for non-homogeneous "add"s, but + // likewise it's harmless to build it anyway. + aux->types.push_back(rt_ft_i); + } + + bool homogeneous = field_tags.size() == 1; + // Here we leverage the fact that C++ "+=" works identically for + // signed and unsigned int's. + if ( ! homogeneous && field_tags.size() == 2 && field_tags.count(TYPE_INT) > 0 && field_tags.count(TYPE_COUNT) > 0 ) + homogeneous = true; + + ZOp op; + + if ( e->Tag() == EXPR_REC_ASSIGN_FIELDS ) { + if ( num_managed == rhs_map.size() ) + // This operand allows for a simpler implementation. + op = OP_REC_ASSIGN_FIELDS_ALL_MANAGED_VV; + else if ( num_managed > 0 ) + op = OP_REC_ASSIGN_FIELDS_MANAGED_VV; + else + op = OP_REC_ASSIGN_FIELDS_VV; + } + + else if ( homogeneous ) { + if ( field_tags.count(TYPE_DOUBLE) > 0 ) + op = OP_REC_ADD_DOUBLE_FIELDS_VV; + else + // Here we leverage that += will work for both signed/unsigned. + op = OP_REC_ADD_INT_FIELDS_VV; + } + + else + op = OP_REC_ADD_FIELDS_VV; + + auto lhs = e->GetOp1()->AsNameExpr(); + auto z = GenInst(op, lhs, rhs); + z.aux = aux; + + return AddInst(z); +} + const ZAMStmt ZAMCompiler::CompileZAMBuiltin(const NameExpr* lhs, const ScriptOptBuiltinExpr* zbi) { auto op1 = zbi->GetOp1(); auto op2 = zbi->GetOp2(); @@ -1253,10 +1327,11 @@ const ZAMStmt ZAMCompiler::ConstructSet(const NameExpr* n, const Expr* e) { return AddInst(z); } -const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) { - ASSERT(e->Tag() == EXPR_RECORD_CONSTRUCTOR); - auto rc = static_cast(e); - auto rt = e->GetType()->AsRecordType(); +const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e, bool is_from_rec) { + auto rec_e = is_from_rec ? e->GetOp1().get() : e; + ASSERT(rec_e->Tag() == EXPR_RECORD_CONSTRUCTOR); + auto rc = static_cast(rec_e); + auto rt = rec_e->GetType()->AsRecordType(); auto aux = InternalBuildVals(rc->Op().get()); @@ -1266,7 +1341,7 @@ const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) { // constructor. aux->zvec.resize(rt->NumFields()); - if ( pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, e->GetType()) ) + if ( pfs->HasSideEffects(SideEffectsOp::CONSTRUCTION, rec_e->GetType()) ) aux->can_change_non_locals = true; ZOp op; @@ -1331,33 +1406,89 @@ const ZAMStmt ZAMCompiler::ConstructRecord(const NameExpr* n, const Expr* e) { else op = OP_CONSTRUCT_DIRECT_RECORD_V; - ZInstI z = network_time_index >= 0 ? GenInst(op, n, network_time_index) : GenInst(op, n); + ZInstI z; + + if ( is_from_rec ) { + // Map non-from-rec operand to the from-rec equivalent. + switch ( op ) { + case OP_CONSTRUCT_KNOWN_RECORD_WITH_NT_VV: op = OP_CONSTRUCT_KNOWN_RECORD_WITH_NT_FROM_VVV; break; + + case OP_CONSTRUCT_KNOWN_RECORD_V: op = OP_CONSTRUCT_KNOWN_RECORD_FROM_VV; break; + + case OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_AND_NT_VV: + op = OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_AND_NT_FROM_VVV; + break; + + case OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_V: + op = OP_CONSTRUCT_KNOWN_RECORD_WITH_INITS_FROM_VV; + break; + + // Note, no case for OP_CONSTRUCT_DIRECT_RECORD_V - shouldn't + // happen given how we construct ConstructFromRecordExpr's. + default: reporter->InternalError("bad op in ZAMCompiler::ConstructRecord"); + } + + auto cfr = static_cast(e); + auto from_n = cfr->GetOp2()->AsNameExpr(); + if ( network_time_index >= 0 ) + z = GenInst(op, n, from_n, network_time_index); + else + z = GenInst(op, n, from_n); + + aux->lhs_map = cfr->LHSMap(); + aux->rhs_map = cfr->RHSMap(); + + for ( auto i : aux->lhs_map ) { + auto& field_t = rt->GetFieldType(i); + aux->is_managed.push_back(ZVal::IsManagedType(field_t)); + } + } + + else + z = network_time_index >= 0 ? GenInst(op, n, network_time_index) : GenInst(op, n); z.aux = aux; - z.t = e->GetType(); + z.t = rec_e->GetType(); auto inst = AddInst(z); // If one of the initialization values is an unspecified vector (which // in general we can't know until run-time) then we'll need to // "concretize" it. We first see whether this is a possibility, since - // it usually isn't, by counting up how many of the record fields are - // vectors. - std::vector vector_fields; // holds indices of the vector fields + // it usually isn't, by counting up how many of the initialized record + // fields are vectors. + + // First just gather up the types of all the fields, and their location + // in the target. + std::vector> init_field_types; + for ( int i = 0; i < z.aux->n; ++i ) { auto field_ind = map ? (*map)[i] : i; auto& field_t = rt->GetFieldType(field_ind); - if ( field_t->Tag() == TYPE_VECTOR && field_t->Yield()->Tag() != TYPE_ANY ) - vector_fields.push_back(field_ind); + init_field_types.emplace_back(field_t, field_ind); } + if ( is_from_rec ) + // Need to also check the source record. + for ( auto i : aux->lhs_map ) { + auto& field_t = rt->GetFieldType(i); + init_field_types.emplace_back(field_t, i); + } + + // Now spin through to find the vector fields. + + std::vector vector_fields; // holds indices of the vector fields + for ( auto& ft : init_field_types ) + if ( ft.first->Tag() == TYPE_VECTOR && ft.first->Yield()->Tag() != TYPE_ANY ) + vector_fields.push_back(ft.second); + if ( vector_fields.empty() ) // Common case of no vector fields, we're done. return inst; // Need to add a separate instruction for concretizing the fields. z = GenInst(OP_CONCRETIZE_VECTOR_FIELDS_V, n); - z.t = e->GetType(); + z.t = rec_e->GetType(); int nf = static_cast(vector_fields.size()); z.aux = new ZInstAux(nf); z.aux->elems_has_slots = false; // we're storing field offsets, not slots diff --git a/src/script_opt/ZAM/IterInfo.h b/src/script_opt/ZAM/IterInfo.h index 5a66680cb3..be81e00e4b 100644 --- a/src/script_opt/ZAM/IterInfo.h +++ b/src/script_opt/ZAM/IterInfo.h @@ -51,9 +51,9 @@ public: if ( lv < 0 ) continue; auto& var = frame[lv]; - if ( aux->lvt_is_managed[i] ) + if ( aux->is_managed[i] ) ZVal::DeleteManagedType(var); - auto& t = aux->loop_var_types[i]; + auto& t = aux->types[i]; var = ZVal(ind_lv_p, t); } diff --git a/src/script_opt/ZAM/Low-Level.cc b/src/script_opt/ZAM/Low-Level.cc index fa1048431d..52981cb5ee 100644 --- a/src/script_opt/ZAM/Low-Level.cc +++ b/src/script_opt/ZAM/Low-Level.cc @@ -24,7 +24,9 @@ const ZAMStmt ZAMCompiler::LastInst() { return ZAMStmt(insts1.size() - 1); } const ZAMStmt ZAMCompiler::ErrorStmt() { return ZAMStmt(0); } -OpaqueVals* ZAMCompiler::BuildVals(const ListExprPtr& l) { return new OpaqueVals(InternalBuildVals(l.get())); } +std::unique_ptr ZAMCompiler::BuildVals(const ListExprPtr& l) { + return std::make_unique(InternalBuildVals(l.get())); +} ZInstAux* ZAMCompiler::InternalBuildVals(const ListExpr* l, int stride) { auto exprs = l->Exprs(); diff --git a/src/script_opt/ZAM/OPs/ZAM.op b/src/script_opt/ZAM/OPs/ZAM.op index 2904afb4a5..6b18b4156e 100644 --- a/src/script_opt/ZAM/OPs/ZAM.op +++ b/src/script_opt/ZAM/OPs/ZAM.op @@ -1230,6 +1230,8 @@ eval ConstructTableOrSetPre() direct-unary-op Record-Constructor ConstructRecord +direct-unary-op Rec-Construct-With-Rec ConstructRecordFromRecord + macro ConstructRecordPost() auto& r = frame[z.v1].record_val; Unref(r); @@ -1245,29 +1247,150 @@ type V eval auto init_vals = z.aux->ToZValVecWithMap(frame); ConstructRecordPost() +macro AssignFromRec() + /* The following is defined below, for use by Rec-Assign-Fields */ + SetUpRecFieldOps(lhs_map) + auto is_managed = aux->is_managed; + for ( size_t i = 0U; i < n; ++i ) + { + auto rhs_i = rhs->RawField(rhs_map[i]); + if ( is_managed[i] ) + zeek::Ref(rhs_i.ManagedVal()); + init_vals[lhs_map[i]] = rhs_i; + } + +op Construct-Known-Record-From +type VV +eval auto init_vals = z.aux->ToZValVecWithMap(frame); + AssignFromRec() + ConstructRecordPost() + +macro DoNetworkTimeInit(slot) + init_vals[slot] = ZVal(run_state::network_time); + op Construct-Known-Record-With-NT type VV eval auto init_vals = z.aux->ToZValVecWithMap(frame); - ASSERT(! init_vals[z.v2]); - init_vals[z.v2] = ZVal(run_state::network_time); + DoNetworkTimeInit(z.v2) ConstructRecordPost() +op Construct-Known-Record-With-NT-From +type VVV +eval auto init_vals = z.aux->ToZValVecWithMap(frame); + DoNetworkTimeInit(z.v3) + AssignFromRec() + ConstructRecordPost() + +macro GenInits() + auto init_vals = z.aux->ToZValVecWithMap(frame); + for ( auto& fi : *z.aux->field_inits ) + init_vals[fi.first] = fi.second->Generate(); + op Construct-Known-Record-With-Inits type V -eval auto init_vals = z.aux->ToZValVecWithMap(frame); - for ( auto& fi : *z.aux->field_inits ) - init_vals[fi.first] = fi.second->Generate(); +eval GenInits() + ConstructRecordPost() + +op Construct-Known-Record-With-Inits-From +type VV +eval GenInits() + AssignFromRec() ConstructRecordPost() op Construct-Known-Record-With-Inits-And-NT type VV -eval auto init_vals = z.aux->ToZValVecWithMap(frame); - for ( auto& fi : *z.aux->field_inits ) - init_vals[fi.first] = fi.second->Generate(); - ASSERT(! init_vals[z.v2]); - init_vals[z.v2] = ZVal(run_state::network_time); +eval GenInits() + DoNetworkTimeInit(z.v2) ConstructRecordPost() +op Construct-Known-Record-With-Inits-And-NT-From +type VVV +eval GenInits() + DoNetworkTimeInit(z.v3) + AssignFromRec() + ConstructRecordPost() + +macro SetUpRecFieldOps(which_lhs_map) + auto lhs = frame[z.v1].record_val; + auto rhs = frame[z.v2].record_val; + auto aux = z.aux; + auto& lhs_map = aux->which_lhs_map; + auto& rhs_map = aux->rhs_map; + auto n = rhs_map.size(); + +op Rec-Assign-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + lhs->RawOptField(lhs_map[i]) = rhs->RawField(rhs_map[i]); + +macro DoManagedRecAssign() + auto is_managed = aux->is_managed; + for ( size_t i = 0U; i < n; ++i ) + if ( is_managed[i] ) + { + auto& lhs_i = lhs->RawOptField(lhs_map[i]); + auto rhs_i = rhs->RawField(rhs_map[i]); + zeek::Ref(rhs_i.ManagedVal()); + if ( lhs_i ) + ZVal::DeleteManagedType(*lhs_i); + lhs_i = rhs_i; + } + else + lhs->RawOptField(lhs_map[i]) = rhs->RawField(rhs_map[i]); +op Rec-Assign-Fields-Managed +op1-read +type VV +eval SetUpRecFieldOps(map) + DoManagedRecAssign() + +op Rec-Assign-Fields-All-Managed +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + { + auto& lhs_i = lhs->RawOptField(lhs_map[i]); + auto rhs_i = rhs->RawField(rhs_map[i]); + zeek::Ref(rhs_i.ManagedVal()); + if ( lhs_i ) + ZVal::DeleteManagedType(*lhs_i); + lhs_i = rhs_i; + } + +op Rec-Add-Int-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + lhs->RawField(lhs_map[i]).int_val += rhs->RawField(rhs_map[i]).int_val; + +op Rec-Add-Double-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + for ( size_t i = 0U; i < n; ++i ) + lhs->RawField(lhs_map[i]).double_val += rhs->RawField(rhs_map[i]).double_val; + +op Rec-Add-Fields +op1-read +type VV +eval SetUpRecFieldOps(map) + auto& types = aux->types; + for ( size_t i = 0U; i < n; ++i ) + { + auto& lhs_i = lhs->RawField(lhs_map[i]); + auto rhs_i = rhs->RawField(rhs_map[i]); + auto tag = types[i]->Tag(); + if ( tag == TYPE_INT ) + lhs_i.int_val += rhs_i.int_val; + else if ( tag == TYPE_COUNT ) + lhs_i.uint_val += rhs_i.uint_val; + else + lhs_i.double_val += rhs_i.double_val; + } + # Special instruction for concretizing vectors that are fields in a # newly-constructed record. "aux" holds which fields in the record to # inspect. @@ -2414,7 +2537,7 @@ internal-op Subnet-To-Addr type VV eval auto addr_v = make_intrusive(frame[z.v2].subnet_val->Prefix()); Unref(frame[z.v1].addr_val); - frame[z.v1] = ZVal(addr_v); + frame[z.v1] = ZVal(std::move(addr_v)); internal-op Sub-Bytes type VVVV diff --git a/src/script_opt/ZAM/Stmt.cc b/src/script_opt/ZAM/Stmt.cc index 2749729b1c..a8372ba332 100644 --- a/src/script_opt/ZAM/Stmt.cc +++ b/src/script_opt/ZAM/Stmt.cc @@ -78,7 +78,7 @@ const ZAMStmt ZAMCompiler::CompilePrint(const PrintStmt* ps) { return Print1C(e0->AsConstExpr()); } - return PrintO(BuildVals(l)); + return PrintO(BuildVals(l).get()); } const ZAMStmt ZAMCompiler::CompileExpr(const ExprStmt* es) { @@ -711,8 +711,8 @@ const ZAMStmt ZAMCompiler::LoopOverTable(const ForStmt* f, const NameExpr* val) int slot = id->IsBlank() ? -1 : FrameSlot(id); aux->loop_vars.push_back(slot); auto& t = id->GetType(); - aux->loop_var_types.push_back(t); - aux->lvt_is_managed.push_back(ZVal::IsManagedType(t)); + aux->types.push_back(t); + aux->is_managed.push_back(ZVal::IsManagedType(t)); } bool no_loop_vars = (num_unused == loop_vars->length()); diff --git a/src/script_opt/ZAM/ZInst.h b/src/script_opt/ZAM/ZInst.h index 8bde9bc571..a0d56e9c73 100644 --- a/src/script_opt/ZAM/ZInst.h +++ b/src/script_opt/ZAM/ZInst.h @@ -484,20 +484,34 @@ public: // store here. bool can_change_non_locals = false; - // The following is used for constructing records, to map elements in - // slots/constants/types to record field offsets. + // The following is used for constructing records or in record chain + // operations, to map elements in slots/constants/types to record field + // offsets. std::vector map; + // The following is used when we need two maps, a LHS one (done with + // the above) and a RHS one. + std::vector rhs_map; + + // ... and the following when we need *three* (for constructing certain + // types of records). We could hack it in by adding onto "map" but + // this is cleaner, and we're not really concerned with the size of + // ZAM auxiliary information as it's not that commonly used, and doesn't + // grow during execution. + std::vector lhs_map; + + // For operations that need to track types corresponding to other vectors. + std::vector types; + + // For operations that mix managed and unmanaged assignments. + std::vector is_managed; + ///// The following four apply to looping over the elements of tables. // Frame slots of iteration variables, such as "[v1, v2, v3] in aggr". // A negative value means "skip assignment". std::vector loop_vars; - // Their types and whether they're managed. - std::vector loop_var_types; - std::vector lvt_is_managed; - // Type associated with the "value" entry, for "k, value in aggr" // iteration. TypePtr value_var_type; diff --git a/src/spicy/manager.cc b/src/spicy/manager.cc index 9ad40b10ad..1a9420e22a 100644 --- a/src/spicy/manager.cc +++ b/src/spicy/manager.cc @@ -897,14 +897,21 @@ void Manager::disableReplacedAnalyzers() { if ( file_mgr->Lookup(replaces, false) || packet_mgr->Lookup(replaces, false) ) reporter->FatalError("cannot replace '%s' analyzer with a protocol analyzer", replaces); - auto tag = analyzer_mgr->GetAnalyzerTag(replaces); - if ( ! tag ) { + auto component = analyzer_mgr->Lookup(replaces, false); + if ( ! component ) { SPICY_DEBUG(hilti::rt::fmt("%s is supposed to replace protocol analyzer %s, but that does not exist", info.name_analyzer, replaces)); continue; } + auto tag = component->Tag(); + if ( analyzer_mgr->HasComponentMapping(tag) ) + reporter->FatalError( + "%s: protocol analyzer %s is already mapped to a different analyzer; cannot replace an analyzer " + "multiple times", + info.name_analyzer.c_str(), component->Name().c_str()); + SPICY_DEBUG(hilti::rt::fmt("%s replaces existing protocol analyzer %s", info.name_analyzer, replaces)); info.replaces = tag; analyzer_mgr->DisableAnalyzer(tag); @@ -928,10 +935,17 @@ void Manager::disableReplacedAnalyzers() { continue; } + auto tag = component->Tag(); + if ( file_mgr->HasComponentMapping(tag) ) + reporter->FatalError( + "%s: file analyzer %s is already mapped to a different analyzer; cannot replace an analyzer multiple " + "times", + info.name_analyzer.c_str(), component->Name().c_str()); + SPICY_DEBUG(hilti::rt::fmt("%s replaces existing file analyzer %s", info.name_analyzer, replaces)); - info.replaces = component->Tag(); + info.replaces = tag; component->SetEnabled(false); - file_mgr->AddComponentMapping(component->Tag(), info.tag); + file_mgr->AddComponentMapping(tag, info.tag); } for ( auto& info : _packet_analyzers_by_type ) { @@ -948,10 +962,17 @@ void Manager::disableReplacedAnalyzers() { continue; } + auto tag = component->Tag(); + if ( packet_mgr->HasComponentMapping(tag) ) + reporter->FatalError( + "%s: packet analyzer %s is already mapped to a different analyzer; cannot replace an analyzer multiple " + "times", + info.name_analyzer.c_str(), component->Name().c_str()); + SPICY_DEBUG(hilti::rt::fmt("%s replaces existing packet analyzer %s", info.name_analyzer, replaces)); - info.replaces = component->Tag(); + info.replaces = tag; component->SetEnabled(false); - packet_mgr->AddComponentMapping(component->Tag(), info.tag); + packet_mgr->AddComponentMapping(tag, info.tag); } } diff --git a/src/spicy/runtime-support.cc b/src/spicy/runtime-support.cc index 610e0a0153..f5afd37461 100644 --- a/src/spicy/runtime-support.cc +++ b/src/spicy/runtime-support.cc @@ -133,15 +133,15 @@ TypePtr rt::create_record_type(const std::string& ns, const std::string& id, auto decls = std::make_unique(); for ( const auto& f : fields ) { - auto attrs = make_intrusive(nullptr, true, false); + auto attrs = make_intrusive<::zeek::detail::Attributes>(nullptr, true, false); if ( f.is_optional ) { - auto optional_ = make_intrusive(detail::ATTR_OPTIONAL); + auto optional_ = make_intrusive<::zeek::detail::Attr>(::zeek::detail::ATTR_OPTIONAL); attrs->AddAttr(std::move(optional_)); } if ( f.is_log ) { - auto log_ = make_intrusive(detail::ATTR_LOG); + auto log_ = make_intrusive<::zeek::detail::Attr>(::zeek::detail::ATTR_LOG); attrs->AddAttr(std::move(log_)); } diff --git a/src/spicy/runtime-support.h b/src/spicy/runtime-support.h index f2a122b0c9..9ffef2d9d0 100644 --- a/src/spicy/runtime-support.h +++ b/src/spicy/runtime-support.h @@ -19,8 +19,11 @@ #include #include #include +#include #include "zeek/Desc.h" +#include "zeek/IntrusivePtr.h" +#include "zeek/Type.h" #include "zeek/Val.h" #include "zeek/spicy/cookie.h" #include "zeek/spicy/manager.h" @@ -59,15 +62,22 @@ public: }; /** - * Exception thrown by event generation code if there's a type mismatch - * between the Spicy-side value and what the Zeek event expects. + * Exception thrown if there's a type mismatch between Spicy and Zeek side. */ class TypeMismatch : public UsageError { + using UsageError::UsageError; +}; + +/** + * Exception thrown by event generation code if there's a type mismatch between + * a Spicy-side parameter value and what the Zeek event expects. + */ +class ParameterMismatch : public TypeMismatch { public: - TypeMismatch(const std::string_view& msg, std::string_view location = "") - : UsageError(hilti::rt::fmt("Event parameter mismatch, %s", msg)) {} - TypeMismatch(const std::string_view& have, const TypePtr& want, std::string_view location = "") - : TypeMismatch(_fmt(have, want)) {} + ParameterMismatch(std::string_view msg, std::string_view location = "") + : TypeMismatch(hilti::rt::fmt("Event parameter mismatch, %s", msg)) {} + ParameterMismatch(std::string_view have, const TypePtr& want, std::string_view location = "") + : ParameterMismatch(_fmt(have, want)) {} private: std::string _fmt(const std::string_view& have, const TypePtr& want) { @@ -90,13 +100,13 @@ public: * Begins registration of a Spicy EVT module. All subsequent, other `register_*()` * function call will be associated with this module for documentation purposes. */ -void register_spicy_module_begin(const std::string& name, const std::string& description); +void register_spicy_module_begin(const std::string& id, const std::string& description); /** * Registers a Spicy protocol analyzer with its EVT meta information with the * plugin's runtime. */ -void register_protocol_analyzer(const std::string& name, hilti::rt::Protocol proto, +void register_protocol_analyzer(const std::string& id, hilti::rt::Protocol proto, const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports, const std::string& parser_orig, const std::string& parser_resp, const std::string& replaces, const std::string& linker_scope); @@ -105,7 +115,7 @@ void register_protocol_analyzer(const std::string& name, hilti::rt::Protocol pro * Registers a Spicy file analyzer with its EVT meta information with the * plugin's runtime. */ -void register_file_analyzer(const std::string& name, const hilti::rt::Vector& mime_types, +void register_file_analyzer(const std::string& id, const hilti::rt::Vector& mime_types, const std::string& parser, const std::string& replaces, const std::string& linker_scope); /** Reports a Zeek-side "weird". */ @@ -115,7 +125,7 @@ void weird(const std::string& id, const std::string& addl); * Registers a Spicy packet analyzer with its EVT meta information with the * plugin's runtime. */ -void register_packet_analyzer(const std::string& name, const std::string& parser, const std::string& replaces, +void register_packet_analyzer(const std::string& id, const std::string& parser, const std::string& replaces, const std::string& linker_scope); /** Registers a Spicy-generated type to make it available inside Zeek. */ @@ -538,7 +548,7 @@ inline ValPtr to_val(const hilti::rt::DeferredExpression& t, const TypePtr */ inline ValPtr to_val(const std::string& s, const TypePtr& target) { if ( target->Tag() != TYPE_STRING ) - throw TypeMismatch("string", target); + throw ParameterMismatch("string", target); return make_intrusive(s); } @@ -549,7 +559,7 @@ inline ValPtr to_val(const std::string& s, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Bytes& b, const TypePtr& target) { if ( target->Tag() != TYPE_STRING ) - throw TypeMismatch("string", target); + throw ParameterMismatch("string", target); return make_intrusive(b.str()); } @@ -568,7 +578,7 @@ inline ValPtr to_val(hilti::rt::integer::safe i, const TypePtr& target) { if ( target->Tag() == TYPE_INT ) return val_mgr->Int(i); - throw TypeMismatch("uint64", target); + throw ParameterMismatch("uint64", target); } else { if ( target->Tag() == TYPE_INT ) @@ -578,10 +588,10 @@ inline ValPtr to_val(hilti::rt::integer::safe i, const TypePtr& target) { if ( i >= 0 ) return val_mgr->Count(i); else - throw TypeMismatch("negative int64", target); + throw ParameterMismatch("negative int64", target); } - throw TypeMismatch("int64", target); + throw ParameterMismatch("int64", target); } } @@ -599,7 +609,7 @@ ValPtr to_val(const hilti::rt::ValueReference& t, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Bool& b, const TypePtr& target) { if ( target->Tag() != TYPE_BOOL ) - throw TypeMismatch("bool", target); + throw ParameterMismatch("bool", target); return val_mgr->Bool(b); } @@ -610,7 +620,7 @@ inline ValPtr to_val(const hilti::rt::Bool& b, const TypePtr& target) { */ inline ValPtr to_val(double r, const TypePtr& target) { if ( target->Tag() != TYPE_DOUBLE ) - throw TypeMismatch("double", target); + throw ParameterMismatch("double", target); return make_intrusive(r); } @@ -621,7 +631,7 @@ inline ValPtr to_val(double r, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Address& d, const TypePtr& target) { if ( target->Tag() != TYPE_ADDR ) - throw TypeMismatch("addr", target); + throw ParameterMismatch("addr", target); auto in_addr = d.asInAddr(); if ( auto v4 = std::get_if(&in_addr) ) @@ -638,7 +648,7 @@ inline ValPtr to_val(const hilti::rt::Address& d, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Port& p, const TypePtr& target) { if ( target->Tag() != TYPE_PORT ) - throw TypeMismatch("port", target); + throw ParameterMismatch("port", target); switch ( p.protocol().value() ) { case hilti::rt::Protocol::TCP: return val_mgr->Port(p.port(), ::TransportProto::TRANSPORT_TCP); @@ -657,7 +667,7 @@ inline ValPtr to_val(const hilti::rt::Port& p, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Interval& i, const TypePtr& target) { if ( target->Tag() != TYPE_INTERVAL ) - throw TypeMismatch("interval", target); + throw ParameterMismatch("interval", target); return make_intrusive(i.seconds()); } @@ -668,7 +678,7 @@ inline ValPtr to_val(const hilti::rt::Interval& i, const TypePtr& target) { */ inline ValPtr to_val(const hilti::rt::Time& t, const TypePtr& target) { if ( target->Tag() != TYPE_TIME ) - throw TypeMismatch("time", target); + throw ParameterMismatch("time", target); return make_intrusive(t.seconds()); } @@ -680,7 +690,7 @@ inline ValPtr to_val(const hilti::rt::Time& t, const TypePtr& target) { template inline ValPtr to_val(const hilti::rt::Vector& v, const TypePtr& target) { if ( target->Tag() != TYPE_VECTOR && target->Tag() != TYPE_LIST ) - throw TypeMismatch("expected vector or list", target); + throw ParameterMismatch("expected vector or list", target); auto vt = cast_intrusive(target); auto zv = make_intrusive(vt); @@ -697,17 +707,17 @@ inline ValPtr to_val(const hilti::rt::Vector& v, const TypePtr& target) { template inline ValPtr to_val(const hilti::rt::Map& m, const TypePtr& target) { if constexpr ( hilti::rt::is_tuple::value ) - throw TypeMismatch("internal error: sets with tuples not yet supported in to_val()"); + throw ParameterMismatch("internal error: sets with tuples not yet supported in to_val()"); if ( target->Tag() != TYPE_TABLE ) - throw TypeMismatch("map", target); + throw ParameterMismatch("map", target); auto tt = cast_intrusive(target); if ( tt->IsSet() ) - throw TypeMismatch("map", target); + throw ParameterMismatch("map", target); if ( tt->GetIndexTypes().size() != 1 ) - throw TypeMismatch("map with non-tuple elements", target); + throw ParameterMismatch("map with non-tuple elements", target); auto zv = make_intrusive(tt); @@ -727,20 +737,20 @@ inline ValPtr to_val(const hilti::rt::Map& m, const TypePtr& target) { template inline ValPtr to_val(const hilti::rt::Set& s, const TypePtr& target) { if ( target->Tag() != TYPE_TABLE ) - throw TypeMismatch("set", target); + throw ParameterMismatch("set", target); auto tt = cast_intrusive(target); if ( ! tt->IsSet() ) - throw TypeMismatch("set", target); + throw ParameterMismatch("set", target); auto zv = make_intrusive(tt); for ( const auto& i : s ) { if constexpr ( hilti::rt::is_tuple::value ) - throw TypeMismatch("internal error: sets with tuples not yet supported in to_val()"); + throw ParameterMismatch("internal error: sets with tuples not yet supported in to_val()"); else { if ( tt->GetIndexTypes().size() != 1 ) - throw TypeMismatch("set with non-tuple elements", target); + throw ParameterMismatch("set with non-tuple elements", target); auto idx = to_val(i, tt->GetIndexTypes()[0]); zv->Assign(std::move(idx), nullptr); @@ -821,7 +831,7 @@ inline void set_record_field(RecordVal* rval, const IntrusivePtr& rt // Field must be &optional or &default. if ( auto attrs = rtype->FieldDecl(idx)->attrs; ! attrs || ! (attrs->Find(detail::ATTR_DEFAULT) || attrs->Find(detail::ATTR_OPTIONAL)) ) - throw TypeMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx))); + throw ParameterMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx))); } } } @@ -833,12 +843,12 @@ inline void set_record_field(RecordVal* rval, const IntrusivePtr& rt template::value>*> inline ValPtr to_val(const T& t, const TypePtr& target) { if ( target->Tag() != TYPE_RECORD ) - throw TypeMismatch("tuple", target); + throw ParameterMismatch("tuple", target); auto rtype = cast_intrusive(target); if ( std::tuple_size::value != rtype->NumFields() ) - throw TypeMismatch("tuple", target); + throw ParameterMismatch("tuple", target); auto rval = make_intrusive(rtype); size_t idx = 0; @@ -856,12 +866,12 @@ inline ValPtr to_val(const hilti::rt::Bitfield& v, const TypePtr& target) using Bitfield = hilti::rt::Bitfield; if ( target->Tag() != TYPE_RECORD ) - throw TypeMismatch("bitfield", target); + throw ParameterMismatch("bitfield", target); auto rtype = cast_intrusive(target); if ( sizeof...(Ts) - 1 != rtype->NumFields() ) - throw TypeMismatch("bitfield", target); + throw ParameterMismatch("bitfield", target); auto rval = make_intrusive(rtype); size_t idx = 0; @@ -887,7 +897,7 @@ constexpr bool is_optional = is_optional_impl::value>*> inline ValPtr to_val(const T& t, const TypePtr& target) { if ( target->Tag() != TYPE_RECORD ) - throw TypeMismatch("struct", target); + throw ParameterMismatch("struct", target); auto rtype = cast_intrusive(target); @@ -898,7 +908,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { t.__visit([&](std::string_view name, const auto& val) { if ( idx >= num_fields ) - throw TypeMismatch(hilti::rt::fmt("no matching record field for field '%s'", name)); + throw ParameterMismatch(hilti::rt::fmt("no matching record field for field '%s'", name)); // Special-case: Lift up anonymous bitfields (which always come as std::optionals). if ( name == "" ) { @@ -924,7 +934,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { std::string field_name = rtype->FieldName(idx); if ( field_name != name ) - throw TypeMismatch( + throw ParameterMismatch( hilti::rt::fmt("mismatch in field name: expected '%s', found '%s'", name, field_name)); set_record_field(rval.get(), rtype, idx++, val); @@ -934,7 +944,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { // We already check above that all Spicy-side fields are mapped so we // can only hit this if there are uninitialized Zeek-side fields left. if ( idx != num_fields ) - throw TypeMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx + 1))); + throw ParameterMismatch(hilti::rt::fmt("missing initialization for field '%s'", rtype->FieldName(idx + 1))); return rval; } @@ -959,7 +969,7 @@ inline ValPtr to_val_for_transport_proto(int64_t val, const TypePtr& target) { template::value>*> inline ValPtr to_val(const T& t, const TypePtr& target) { if ( target->Tag() != TYPE_ENUM ) - throw TypeMismatch("enum", target); + throw ParameterMismatch("enum", target); // We'll usually be getting an int64_t for T, but allow other signed ints // as well. @@ -969,7 +979,7 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { // Special case: map enum values to Zeek's semantics. if ( target->GetName() == "transport_proto" ) { if ( ! std::is_same_v ) - throw TypeMismatch(hilti::rt::demangle(typeid(t).name()), target); + throw ParameterMismatch(hilti::rt::demangle(typeid(t).name()), target); return to_val_for_transport_proto(it, target); } @@ -984,4 +994,328 @@ inline ValPtr to_val(const T& t, const TypePtr& target) { return target->AsEnumType()->GetEnumVal(bt); } + +/** + * Returns the Zeek value associated with a global Zeek-side ID. Throws if the + * ID does not exist. + */ +inline ValPtr get_value(const std::string& name) { + if ( auto id = zeek::detail::global_scope()->Find(name) ) + return id->GetVal(); + else + throw InvalidValue(util::fmt("no such Zeek variable: '%s'", name.c_str())); +} + +namespace detail { +/** Helper to raise a ``TypeMismatch`` exception. */ +inline auto type_mismatch(const ValPtr& v, const char* expected) { + throw TypeMismatch(util::fmt("type mismatch in Zeek value: expected %s, but got %s", expected, + ::zeek::type_name(v->GetType()->Tag()))); +} + +/** + * Helper to check the type of Zeek value against an expected type tag, raising + * a ``TypeMismatch`` exception on mismatch. + */ +inline auto check_type(const ValPtr& v, ::zeek::TypeTag type_tag, const char* expected) { + if ( v->GetType()->Tag() != type_tag ) + type_mismatch(v, expected); +} + +} // namespace detail + +/** Type for a Zeek record value. */ +using ValRecordPtr = ::zeek::IntrusivePtr<::zeek::RecordVal>; + +/** Type for a Zeek set value. */ +using ValSetPtr = ::zeek::IntrusivePtr<::zeek::TableVal>; + +/** Type for a Zeek table value. */ +using ValTablePtr = ::zeek::IntrusivePtr<::zeek::TableVal>; + +/** Type for a Zeek vector value. */ +using ValVectorPtr = ::zeek::IntrusivePtr<::zeek::VectorVal>; + +/** Converts a Zeek `addr` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Address as_address(const ValPtr& v) { + detail::check_type(v, TYPE_ADDR, "address"); + return ::hilti::rt::Address(v->AsAddr()); +} + +/** Converts a Zeek `bool` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Bool as_bool(const ValPtr& v) { + detail::check_type(v, TYPE_BOOL, "bool"); + return ::hilti::rt::Bool(v->AsBool()); +} + +/** Converts a Zeek `count` value to its Spicy equivalent. Throws on error. */ +inline hilti::rt::integer::safe as_count(const ValPtr& v) { + detail::check_type(v, TYPE_COUNT, "count"); + return v->AsCount(); +} + +/** Converts a Zeek `double` value to its Spicy equivalent. Throws on error. */ +inline double as_double(const ValPtr& v) { + detail::check_type(v, TYPE_DOUBLE, "double"); + return v->AsDouble(); +} + +/** + * Converts a Zeek `enum` value to a string containing the (unscoped) label + * name. Throws on error. + */ +inline std::string as_enum(const ValPtr& v) { + detail::check_type(v, TYPE_ENUM, "enum"); + // Zeek returns the name as "::", we just want the enum name. + return hilti::rt::rsplit1(v->GetType()->AsEnumType()->Lookup(v->AsEnum()), "::").second; +} + +/** Converts a Zeek `int` value to its Spicy equivalent. Throws on error. */ +inline hilti::rt::integer::safe as_int(const ValPtr& v) { + detail::check_type(v, TYPE_INT, "int"); + return v->AsInt(); +} + +/** Converts a Zeek `interval` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Interval as_interval(const ValPtr& v) { + detail::check_type(v, TYPE_INTERVAL, "interval"); + return ::hilti::rt::Interval(v->AsInterval(), hilti::rt::Interval::SecondTag{}); +} + +/** Converts a Zeek `port` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Port as_port(const ValPtr& v) { + detail::check_type(v, TYPE_PORT, "port"); + auto p = v->AsPortVal(); + // Wrap port number into safe integer to catch any overflows (Zeek returns + // an uint32, while HILTI wants an uint16). + return ::hilti::rt::Port(hilti::rt::integer::safe(p->Port()), p->PortType()); +} + +/** Converts a Zeek `record` value to its Spicy equivalent. Throws on error. */ +inline ValRecordPtr as_record(const ValPtr& v) { + detail::check_type(v, TYPE_RECORD, "record"); + return ::zeek::cast_intrusive<::zeek::RecordVal>(v); +} + +/** Converts a Zeek `set` value to its Spicy equivalent. Throws on error. */ +inline ValSetPtr as_set(const ValPtr& v) { + detail::check_type(v, TYPE_TABLE, "set"); + + if ( ! v->AsTableVal()->GetType()->IsSet() ) + detail::type_mismatch(v, "set"); + + return ::zeek::cast_intrusive<::zeek::TableVal>(v); +} + +/** Converts a Zeek `string` value to its Spicy equivalent. Throws on error. */ +inline hilti::rt::Bytes as_string(const ValPtr& v) { + detail::check_type(v, TYPE_STRING, "string"); + auto str = v->AsString(); + return hilti::rt::Bytes(reinterpret_cast(str->Bytes()), str->Len()); +} + +/** Converts a Zeek `subnet` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Network as_subnet(const ValPtr& v) { + detail::check_type(v, TYPE_SUBNET, "subnet"); + auto subnet = v->AsSubNet(); + return ::hilti::rt::Network(subnet.Prefix(), subnet.Length()); +} + +/** Converts a Zeek `table` value to its Spicy equivalent. Throws on error. */ +inline ValTablePtr as_table(const ValPtr& v) { + detail::check_type(v, TYPE_TABLE, "table"); + + if ( v->AsTableVal()->GetType()->IsSet() ) + detail::type_mismatch(v, "table"); + + return ::zeek::cast_intrusive<::zeek::TableVal>(v); +} + +/** Converts a Zeek `time` value to its Spicy equivalent. Throws on error. */ +inline ::hilti::rt::Time as_time(const ValPtr& v) { + detail::check_type(v, TYPE_TIME, "time"); + return ::hilti::rt::Time(v->AsTime(), hilti::rt::Time::SecondTag{}); +} + +/** Converts a Zeek `vector` value to its Spicy equivalent. Throws on error. */ +inline ValVectorPtr as_vector(const ValPtr& v) { + detail::check_type(v, TYPE_VECTOR, "vector"); + return ::zeek::cast_intrusive<::zeek::VectorVal>(v); +} + + +/** Retrieves a global Zeek variable of assumed type `addr`. Throws on error. */ +inline hilti::rt::Address get_address(const std::string& name) { return as_address(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `bool`. Throws on error. */ +inline hilti::rt::Bool get_bool(const std::string& name) { return as_bool(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `count`. Throws on error. */ +inline hilti::rt::integer::safe get_count(const std::string& name) { return as_count(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `double`. Throws on error. */ +inline double get_double(const std::string& name) { return as_double(get_value(name)); } + +/** + * Retrieves a global Zeek variable of assumed type `enum` as a string + * containing the (unscoped) label name. Throws on error. + */ +inline std::string get_enum(const std::string& name) { return as_enum(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `int`. Throws on error. */ +inline hilti::rt::integer::safe get_int(const std::string& name) { return as_int(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `interval`. Throws on error. */ +inline hilti::rt::Interval get_interval(const std::string& name) { return as_interval(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `port`. Throws on error. */ +inline hilti::rt::Port get_port(const std::string& name) { return as_port(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `record`. Throws on error. */ +inline ValRecordPtr get_record(const std::string& name) { return as_record(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `set`. Throws on error. */ +inline ValSetPtr get_set(const std::string& name) { return as_set(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `string`. Throws on error. */ +inline hilti::rt::Bytes get_string(const std::string& name) { return as_string(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `subnet`. Throws on error. */ +inline hilti::rt::Network get_subnet(const std::string& name) { return as_subnet(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `table`. Throws on error. */ +inline ValTablePtr get_table(const std::string& name) { return as_table(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `time`. Throws on error. */ +inline hilti::rt::Time get_time(const std::string& name) { return as_time(get_value(name)); } + +/** Retrieves a global Zeek variable of assumed type `vector`. Throws on error. */ +inline ValVectorPtr get_vector(const std::string& name) { return as_vector(get_value(name)); } + +/** Retrieves the value of Zeek record field. Throws on error. */ +inline ::zeek::ValPtr record_field(const zeek::spicy::rt::ValRecordPtr& v, const std::string& field) { + auto index = v->GetType()->AsRecordType()->FieldOffset(field.c_str()); + if ( index < 0 ) + throw InvalidValue(util::fmt("no such record field: %s", field.c_str())); + + if ( auto x = v->GetFieldOrDefault(index) ) + return x; + else + throw InvalidValue(util::fmt("record field is not set: %s", field.c_str())); +} + +/** Retrieves the value of Zeek record field. Throws on error. */ +inline ::zeek::ValPtr record_field(const std::string& name, const std::string& index) { + return record_field(get_record(name), index); +} + +/** Check if a Zeek record has a field's value set. Throws on errors. */ +inline hilti::rt::Bool record_has_value(const zeek::spicy::rt::ValRecordPtr& v, const std::string& field) { + auto index = v->GetType()->AsRecordType()->FieldOffset(field.c_str()); + if ( index < 0 ) + throw InvalidValue(util::fmt("no such field in record type: %s", field.c_str())); + + return v->HasField(index); +} + +/** Checks if a Zeek record has a field's value set. Throws on errors. */ +inline hilti::rt::Bool record_has_value(const std::string& name, const std::string& index) { + return record_has_value(get_record(name), index); +} + +/** Check if a Zeek record type has a field of a give name. Throws on errors. */ +inline hilti::rt::Bool record_has_field(const zeek::spicy::rt::ValRecordPtr& v, const std::string& field) { + return v->GetType()->AsRecordType()->FieldOffset(field.c_str()) >= 0; +} + +/** Check if a Zeek record type has a field of a give name. Throws on errors. */ +inline hilti::rt::Bool record_has_field(const std::string& name, const std::string& index) { + return record_has_value(get_record(name), index); +} + +/** Checks if a Zeek set contains a given element. Throws on errors. */ +template +::hilti::rt::Bool set_contains(const ValSetPtr& v, const T& key) { + auto index = v->GetType()->AsTableType()->GetIndexTypes()[0]; + return (v->Find(to_val(key, index)) != nullptr); +} + +/** Checks if a Zeek set contains a given element. Throws on errors. */ +template +::hilti::rt::Bool set_contains(const std::string& name, const T& key) { + return set_contains(get_set(name), key); +} + +/** Checks if a Zeek table contains a given element. Throws on errors. */ +template +::hilti::rt::Bool table_contains(const ValTablePtr& v, const T& key) { + auto index = v->GetType()->AsTableType()->GetIndexTypes()[0]; + return (v->Find(to_val(key, index)) != nullptr); +} + +/** Check if a Zeek table contains a given element. Throws on errors. */ +template +::hilti::rt::Bool table_contains(const std::string& name, const T& key) { + return table_contains(get_table(name), key); +} + +/** + * Retrieves a value from a Zeek table. Returns an error value if the key does + * not exist. Throws on other errors. + */ +template +std::optional<::zeek::ValPtr> table_lookup(const zeek::spicy::rt::ValTablePtr& v, const T& key) { + auto index = v->GetType()->AsTableType()->GetIndexTypes()[0]; + if ( auto x = v->FindOrDefault(to_val(key, index)) ) + return x; + else + return {}; +} + +/** + * Retrieves a value from a Zeek table. Returns an error value if the key does + * not exist. Throws on other errors. + */ +template +std::optional<::zeek::ValPtr> table_lookup(const std::string& name, const T& key) { + return table_lookup(get_table(name), key); +} + +/** Returns a Zeek vector element. Throws on errors. */ +inline ::zeek::ValPtr vector_index(const zeek::spicy::rt::ValVectorPtr& v, + const hilti::rt::integer::safe& index) { + if ( index >= v->Size() ) + throw InvalidValue(util::fmt("vector index out of bounds: %" PRIu64, index.Ref())); + + return v->ValAt(index); +} + +/** Returns a Zeek vector element. Throws on errors. */ +inline ::zeek::ValPtr vector_index(const std::string& name, const hilti::rt::integer::safe& index) { + return vector_index(get_vector(name), index); +} + +/** Returns the size of a Zeek vector. Throws on errors. */ +inline hilti::rt::integer::safe vector_size(const zeek::spicy::rt::ValVectorPtr& v) { return v->Size(); } + +/** Returns the size of a Zeek vector. Throws on errors. */ +inline hilti::rt::integer::safe vector_size(const std::string& name) { return vector_size(get_vector(name)); } + } // namespace zeek::spicy::rt + +namespace hilti::rt::detail::adl { +// Stringification for opaque type handles. +inline std::string to_string(const zeek::ValPtr& v, detail::adl::tag /* unused */) { return ""; } + +inline std::string to_string(const zeek::spicy::rt::ValRecordPtr& v, detail::adl::tag /* unused */) { + return ""; +} + +inline std::string to_string(const zeek::spicy::rt::ValTablePtr& v, detail::adl::tag /* unused */) { + return ""; +} + +inline std::string to_string(const zeek::spicy::rt::ValVectorPtr& v, detail::adl::tag /* unused */) { + return ""; +} +} // namespace hilti::rt::detail::adl diff --git a/src/supervisor/Supervisor.cc b/src/supervisor/Supervisor.cc index a176f033e3..0585ec6467 100644 --- a/src/supervisor/Supervisor.cc +++ b/src/supervisor/Supervisor.cc @@ -18,6 +18,8 @@ #define RAPIDJSON_HAS_STDSTRING 1 #include +#include +#include extern "C" { #include "zeek/3rdparty/setsignal.h" @@ -1243,34 +1245,9 @@ Supervisor::NodeConfig Supervisor::NodeConfig::FromRecord(const RecordVal* node) rval.env[name] = v->GetVal()->AsStringVal()->ToStdString(); } - auto cluster_table_val = node->GetField("cluster")->AsTableVal(); - auto cluster_table = cluster_table_val->AsTable(); - - for ( const auto& cte : *cluster_table ) { - auto k = cte.GetHashKey(); - auto* v = cte.value; - - auto key = cluster_table_val->RecreateIndex(*k); - auto name = key->Idx(0)->AsStringVal()->ToStdString(); - auto rv = v->GetVal()->AsRecordVal(); - - Supervisor::ClusterEndpoint ep; - ep.role = static_cast(rv->GetFieldAs("role")); - ep.host = rv->GetFieldAs("host").AsString(); - ep.port = rv->GetFieldAs("p")->Port(); - - const auto& iface = rv->GetField("interface"); - - if ( iface ) - ep.interface = iface->AsStringVal()->ToStdString(); - - const auto& pcap_file = rv->GetField("pcap_file"); - - if ( pcap_file ) - ep.pcap_file = pcap_file->AsStringVal()->ToStdString(); - - rval.cluster.emplace(name, std::move(ep)); - } + auto cluster_table_val = node->GetField("cluster"); + auto re = std::make_unique("^_"); + rval.cluster = cluster_table_val->ToJSON(false, re.get())->ToStdString(); return rval; } @@ -1319,26 +1296,10 @@ Supervisor::NodeConfig Supervisor::NodeConfig::FromJSON(std::string_view json) { auto& cluster = j["cluster"]; - for ( auto it = cluster.MemberBegin(); it != cluster.MemberEnd(); ++it ) { - Supervisor::ClusterEndpoint ep; - - auto key = it->name.GetString(); - auto& val = it->value; - - auto& role_str = val["role"]; - ep.role = role_str_to_enum(role_str.GetString()); - - ep.host = val["host"].GetString(); - ep.port = val["p"]["port"].GetInt(); - - if ( auto it = val.FindMember("interface"); it != val.MemberEnd() ) - ep.interface = it->value.GetString(); - - if ( auto it = val.FindMember("pcap_file"); it != val.MemberEnd() ) - ep.pcap_file = it->value.GetString(); - - rval.cluster.emplace(key, std::move(ep)); - } + rapidjson::StringBuffer sb; + rapidjson::Writer writer(sb); + cluster.Accept(writer); + rval.cluster = sb.GetString(); return rval; } @@ -1349,7 +1310,7 @@ std::string Supervisor::NodeConfig::ToJSON() const { } RecordValPtr Supervisor::NodeConfig::ToRecord() const { - const auto& rt = BifType::Record::Supervisor::NodeConfig; + const auto& rt = id::find_type("Supervisor::NodeConfig"); auto rval = make_intrusive(rt); rval->AssignField("name", name); @@ -1401,27 +1362,18 @@ RecordValPtr Supervisor::NodeConfig::ToRecord() const { } auto tt = rt->GetFieldType("cluster"); - auto cluster_val = make_intrusive(std::move(tt)); - rval->AssignField("cluster", cluster_val); - - for ( const auto& e : cluster ) { - auto& name = e.first; - auto& ep = e.second; - auto key = make_intrusive(name); - const auto& ept = BifType::Record::Supervisor::ClusterEndpoint; - auto val = make_intrusive(ept); - - val->AssignField("role", BifType::Enum::Supervisor::ClusterRole->GetEnumVal(ep.role)); - val->AssignField("host", make_intrusive(ep.host)); - val->AssignField("p", val_mgr->Port(ep.port, TRANSPORT_TCP)); - - if ( ep.interface ) - val->AssignField("interface", *ep.interface); - - if ( ep.pcap_file ) - val->AssignField("pcap_file", *ep.pcap_file); - - cluster_val->Assign(std::move(key), std::move(val)); + auto json_res = detail::ValFromJSON(cluster, tt, Func::nil); + if ( auto val = std::get_if(&json_res) ) { + rval->AssignField("cluster", *val); + } + else { + // This should never happen: the JSON data comes from a table[string] of + // ClusterEndpoint and should therefore allow instantiation. Exiting + // here can be hard to debug. Other JSON code (see FromJSON()) fails + // silently when the JSON is misformatted. We just warn: + fprintf(stderr, "Could not parse %s's cluster table from '%s': %s\n", name.c_str(), cluster.c_str(), + std::get(json_res).c_str()); + rval->AssignField("cluster", make_intrusive(std::move(tt))); } return rval; @@ -1439,62 +1391,6 @@ RecordValPtr SupervisorNode::ToRecord() const { return rval; } -static ValPtr supervisor_role_to_cluster_node_type(BifEnum::Supervisor::ClusterRole role) { - static auto node_type = id::find_type("Cluster::NodeType"); - - switch ( role ) { - case BifEnum::Supervisor::LOGGER: return node_type->GetEnumVal(node_type->Lookup("Cluster", "LOGGER")); - case BifEnum::Supervisor::MANAGER: return node_type->GetEnumVal(node_type->Lookup("Cluster", "MANAGER")); - case BifEnum::Supervisor::PROXY: return node_type->GetEnumVal(node_type->Lookup("Cluster", "PROXY")); - case BifEnum::Supervisor::WORKER: return node_type->GetEnumVal(node_type->Lookup("Cluster", "WORKER")); - default: return node_type->GetEnumVal(node_type->Lookup("Cluster", "NONE")); - } -} - -bool SupervisedNode::InitCluster() const { - if ( config.cluster.empty() ) - return false; - - const auto& cluster_node_type = id::find_type("Cluster::Node"); - const auto& cluster_nodes_id = id::find("Cluster::nodes"); - const auto& cluster_manager_is_logger_id = id::find("Cluster::manager_is_logger"); - auto cluster_nodes = cluster_nodes_id->GetVal()->AsTableVal(); - auto has_logger = false; - std::optional manager_name; - - for ( const auto& e : config.cluster ) { - if ( e.second.role == BifEnum::Supervisor::MANAGER ) - manager_name = e.first; - else if ( e.second.role == BifEnum::Supervisor::LOGGER ) - has_logger = true; - } - - for ( const auto& e : config.cluster ) { - const auto& node_name = e.first; - const auto& ep = e.second; - - auto key = make_intrusive(node_name); - auto val = make_intrusive(cluster_node_type); - - auto node_type = supervisor_role_to_cluster_node_type(ep.role); - val->AssignField("node_type", std::move(node_type)); - val->AssignField("ip", make_intrusive(ep.host)); - val->AssignField("p", val_mgr->Port(ep.port, TRANSPORT_TCP)); - - // Remove in v7.1: Interface removed from Cluster::Node. - if ( ep.interface ) - val->AssignField("interface", *ep.interface); - - if ( manager_name && ep.role != BifEnum::Supervisor::MANAGER ) - val->AssignField("manager", *manager_name); - - cluster_nodes->Assign(std::move(key), std::move(val)); - } - - cluster_manager_is_logger_id->SetVal(val_mgr->Bool(! has_logger)); - return true; -} - void SupervisedNode::Init(Options* options) const { const auto& node_name = config.name; @@ -1546,7 +1442,7 @@ void SupervisedNode::Init(Options* options) const { } } - if ( ! config.cluster.empty() ) { + if ( ! config.cluster.empty() && config.cluster != "{}" ) { if ( setenv("CLUSTER_NODE", node_name.data(), true) == -1 ) { fprintf(stderr, "node '%s' failed to setenv: %s\n", node_name.data(), strerror(errno)); exit(1); diff --git a/src/supervisor/Supervisor.h b/src/supervisor/Supervisor.h index aa511e1209..641618dfde 100644 --- a/src/supervisor/Supervisor.h +++ b/src/supervisor/Supervisor.h @@ -110,35 +110,6 @@ public: std::string zeek_exe_path; }; - /** - * Configuration options that influence how a Supervised Zeek node - * integrates into the normal Zeek Cluster Framework. - */ - struct ClusterEndpoint { - /** - * The node's role within the cluster. E.g. manager, logger, worker. - */ - BifEnum::Supervisor::ClusterRole role; - /** - * The TCP port number at which the cluster node listens for connections. - */ - int port; - /** - * The host/IP at which the cluster node is listening for connections. - */ - std::string host; - /** - * The interface name from which the node read/analyze packets. - * Typically used by worker nodes. - */ - std::optional interface; - /** - * The PCAP file name from which the node read/analyze packets. - * Typically used by worker nodes. - */ - std::optional pcap_file; - }; - /** * Configuration options that influence behavior of a Supervised Zeek node. */ @@ -233,15 +204,16 @@ public: */ std::vector addl_user_scripts; /** - * Environment variables and values to define in the node. + * Environment variables and values to define in the node. */ std::map env; /** - * The Cluster Layout definition. Each node in the Cluster Framework - * knows about the full, static cluster topology to which it belongs. - * Entries in the map use node names for keys. + * The cluster layout definition. Each node in the Cluster Framework + * knows the full, static cluster topology to which it belongs. The + * layout is encoded as the JSON map resulting from ToJSON() on the + * corresponding cluster table in the script layer's NodeConfig record. */ - std::map cluster; + std::string cluster; }; /** diff --git a/src/supervisor/supervisor.bif b/src/supervisor/supervisor.bif index b749c773d5..d8bd06bd96 100644 --- a/src/supervisor/supervisor.bif +++ b/src/supervisor/supervisor.bif @@ -14,7 +14,6 @@ enum ClusterRole %{ WORKER, %} -type Supervisor::ClusterEndpoint: record; type Supervisor::Status: record; type Supervisor::NodeConfig: record; type Supervisor::NodeStatus: record; @@ -66,14 +65,6 @@ function Supervisor::__restart%(node: string%): bool return zeek::val_mgr->Bool(rval); %} -function Supervisor::__init_cluster%(%): bool - %{ - if ( zeek::Supervisor::ThisNode() ) - return zeek::val_mgr->Bool(zeek::Supervisor::ThisNode()->InitCluster()); - - return zeek::val_mgr->Bool(false); - %} - function Supervisor::__is_supervised%(%): bool %{ return zeek::val_mgr->Bool(zeek::Supervisor::ThisNode().has_value()); diff --git a/src/telemetry/Counter.cc b/src/telemetry/Counter.cc index 5abb624f20..8b34624254 100644 --- a/src/telemetry/Counter.cc +++ b/src/telemetry/Counter.cc @@ -5,7 +5,7 @@ using namespace zeek::telemetry; Counter::Counter(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept : handle(family->Add(labels)), labels(labels) { if ( callback ) { - handle.AddCollectCallback(callback); + handle.AddCollectCallback(std::move(callback)); has_callback = true; } } @@ -37,5 +37,5 @@ std::shared_ptr CounterFamily::GetOrAdd(Span labels, std::shared_ptr CounterFamily::GetOrAdd(std::initializer_list labels, prometheus::CollectCallbackPtr callback) { - return GetOrAdd(Span{labels.begin(), labels.size()}, callback); + return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback)); } diff --git a/src/telemetry/Counter.h b/src/telemetry/Counter.h index 5f8ec2bf32..f6c49315b7 100644 --- a/src/telemetry/Counter.h +++ b/src/telemetry/Counter.h @@ -63,7 +63,7 @@ private: using CounterPtr = std::shared_ptr; -class CounterFamily : public MetricFamily, public std::enable_shared_from_this { +class CounterFamily : public MetricFamily { public: static inline const char* OpaqueName = "CounterMetricFamilyVal"; diff --git a/src/telemetry/Gauge.cc b/src/telemetry/Gauge.cc index f3f510b436..273c9a57bf 100644 --- a/src/telemetry/Gauge.cc +++ b/src/telemetry/Gauge.cc @@ -17,7 +17,7 @@ double Gauge::Value() const noexcept { Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept : handle(family->Add(labels)), labels(labels) { if ( callback ) { - handle.AddCollectCallback(callback); + handle.AddCollectCallback(std::move(callback)); has_callback = true; } } @@ -37,5 +37,5 @@ std::shared_ptr GaugeFamily::GetOrAdd(Span labels, prome std::shared_ptr GaugeFamily::GetOrAdd(std::initializer_list labels, prometheus::CollectCallbackPtr callback) { - return GetOrAdd(Span{labels.begin(), labels.size()}, callback); + return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback)); } diff --git a/src/telemetry/Gauge.h b/src/telemetry/Gauge.h index cf04e7a9a0..900cb7b784 100644 --- a/src/telemetry/Gauge.h +++ b/src/telemetry/Gauge.h @@ -81,7 +81,7 @@ private: using GaugePtr = std::shared_ptr; -class GaugeFamily : public MetricFamily, public std::enable_shared_from_this { +class GaugeFamily : public MetricFamily { public: static inline const char* OpaqueName = "GaugeMetricFamilyVal"; diff --git a/src/telemetry/Histogram.h b/src/telemetry/Histogram.h index 65d371cd6d..ec8858e463 100644 --- a/src/telemetry/Histogram.h +++ b/src/telemetry/Histogram.h @@ -46,7 +46,7 @@ private: using HistogramPtr = std::shared_ptr; -class HistogramFamily : public MetricFamily, public std::enable_shared_from_this { +class HistogramFamily : public MetricFamily { public: static inline const char* OpaqueName = "HistogramMetricFamilyVal"; diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index 8b0a037e74..04c47ba3ef 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -6,6 +6,8 @@ // CivetServer is from the civetweb submodule in prometheus-cpp #include +#include +#include #include #include #include @@ -25,12 +27,19 @@ namespace zeek::telemetry { Manager::Manager() { prometheus_registry = std::make_shared(); } +// This can't be defined as =default because of the use of unique_ptr with a forward-declared type +// in Manager.h +Manager::~Manager() {} + void Manager::InitPostScript() { // Metrics port setting is used to calculate a URL for prometheus scraping std::string prometheus_url; auto metrics_port = id::find_val("Telemetry::metrics_port")->AsPortVal(); + auto metrics_address = id::find_val("Telemetry::metrics_address")->AsStringVal()->ToStdString(); + if ( metrics_address.empty() ) + metrics_address = "0.0.0.0"; if ( metrics_port->Port() != 0 ) - prometheus_url = util::fmt("localhost:%u", metrics_port->Port()); + prometheus_url = util::fmt("%s:%u", metrics_address.data(), metrics_port->Port()); if ( ! prometheus_url.empty() ) { CivetCallbacks* callbacks = nullptr; @@ -44,6 +53,8 @@ void Manager::InitPostScript() { static auto manager_type = node_type_type->Lookup("Cluster", "MANAGER"); if ( local_node_type == manager_type ) { + BuildClusterJson(); + callbacks = new CivetCallbacks(); callbacks->begin_request = [](struct mg_connection* conn) -> int { // Handle the services.json request ourselves by building up a response based on @@ -62,13 +73,19 @@ void Manager::InitPostScript() { } } - try { - prometheus_exposer = std::make_unique(prometheus_url, 2, callbacks); - } catch ( const CivetException& exc ) { - reporter->FatalError("Failed to setup Prometheus endpoint: %s\n", exc.what()); - } + if ( ! getenv("ZEEKCTL_CHECK_CONFIG") ) { + try { + prometheus_exposer = std::make_unique(prometheus_url, 2, callbacks); - prometheus_exposer->RegisterCollectable(prometheus_registry); + // CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here + delete callbacks; + } catch ( const CivetException& exc ) { + reporter->FatalError("Failed to setup Prometheus endpoint: %s. Attempted to bind to %s.", exc.what(), + prometheus_url.c_str()); + } + + prometheus_exposer->RegisterCollectable(prometheus_registry); + } } #ifdef HAVE_PROCESS_STAT_METRICS @@ -132,7 +149,6 @@ RecordValPtr Manager::GetMetricOptsRecord(const prometheus::MetricFamily& metric static auto name_idx = metric_opts_type->FieldOffset("name"); static auto help_text_idx = metric_opts_type->FieldOffset("help_text"); static auto unit_idx = metric_opts_type->FieldOffset("unit"); - static auto labels_idx = metric_opts_type->FieldOffset("labels"); static auto is_total_idx = metric_opts_type->FieldOffset("is_total"); static auto metric_type_idx = metric_opts_type->FieldOffset("metric_type"); @@ -154,55 +170,15 @@ RecordValPtr Manager::GetMetricOptsRecord(const prometheus::MetricFamily& metric // Assume that a metric ending with _total is always a summed metric so we can set that. record_val->Assign(is_total_idx, val_mgr->Bool(util::ends_with(metric_family.name, "_total"))); - auto label_names_vec = make_intrusive(string_vec_type); - - // Check if this is a Zeek-internal metric. We keep a little more information about a metric - // for these than we do for ones that were inserted into prom-cpp directly. - if ( auto it = families.find(metric_family.name); it != families.end() ) { - record_val->Assign(metric_type_idx, - zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal(it->second->MetricType())); - - for ( const auto& lbl : it->second->LabelNames() ) - label_names_vec->Append(make_intrusive(lbl)); - } - else { - // prom-cpp stores everything internally as doubles - if ( metric_family.type == prometheus::MetricType::Counter ) - record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( - BifEnum::Telemetry::MetricType::COUNTER)); - if ( metric_family.type == prometheus::MetricType::Gauge ) - record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( - BifEnum::Telemetry::MetricType::GAUGE)); - if ( metric_family.type == prometheus::MetricType::Histogram ) - record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( - BifEnum::Telemetry::MetricType::HISTOGRAM)); - - // prometheus-cpp doesn't store label names anywhere other than in each - // instrument. this is valid because label names can be different - // between instruments within a single family for prometheus. we don't - // follow that model in Zeek, so use the names from the first instrument - // but validate that they're the same in the rest and warn if not. - if ( ! metric_family.metric.empty() ) { - std::unordered_set names; - for ( const auto& lbl : metric_family.metric[0].label ) { - label_names_vec->Append(make_intrusive(lbl.name)); - names.insert(lbl.name); - } - - if ( metric_family.metric.size() > 1 ) { - for ( size_t i = 1; i < metric_family.metric.size(); ++i ) { - for ( const auto& lbl : metric_family.metric[i].label ) { - if ( names.count(lbl.name) == 0 ) - reporter->Warning( - "Telemetry labels must be the same across all instruments for metric family %s\n", - metric_family.name.c_str()); - } - } - } - } - } - - record_val->Assign(labels_idx, label_names_vec); + if ( metric_family.type == prometheus::MetricType::Counter ) + record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( + BifEnum::Telemetry::MetricType::COUNTER)); + if ( metric_family.type == prometheus::MetricType::Gauge ) + record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( + BifEnum::Telemetry::MetricType::GAUGE)); + if ( metric_family.type == prometheus::MetricType::Histogram ) + record_val->Assign(metric_type_idx, zeek::BifType::Enum::Telemetry::MetricType->GetEnumVal( + BifEnum::Telemetry::MetricType::HISTOGRAM)); opts_records.insert({metric_family.name, record_val}); @@ -242,8 +218,8 @@ static bool comparer(const std::optional& a, const std::optional& b, auto a_r = a->ToVal(type)->AsRecordVal(); auto b_r = b->ToVal(type)->AsRecordVal(); - auto a_labels = a_r->GetField("labels"); - auto b_labels = b_r->GetField("labels"); + auto a_labels = a_r->GetField("label_values"); + auto b_labels = b_r->GetField("label_values"); return compare_string_vectors(a_labels, b_labels); } @@ -262,8 +238,9 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view static auto string_vec_type = zeek::id::find_type("string_vec"); static auto metric_record_type = zeek::id::find_type("Telemetry::Metric"); static auto opts_idx = metric_record_type->FieldOffset("opts"); - static auto labels_idx = metric_record_type->FieldOffset("labels"); static auto value_idx = metric_record_type->FieldOffset("value"); + static auto label_names_idx = metric_record_type->FieldOffset("label_names"); + static auto label_values_idx = metric_record_type->FieldOffset("label_values"); static auto metric_opts_type = zeek::id::find_type("Telemetry::MetricOpts"); static auto metric_type_idx = metric_opts_type->FieldOffset("metric_type"); @@ -287,15 +264,7 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view RecordValPtr opts_record = GetMetricOptsRecord(fam); for ( const auto& inst : fam.metric ) { - auto label_values_vec = make_intrusive(string_vec_type); - for ( const auto& label : inst.label ) { - // We don't include the endpoint key/value unless it's a prometheus request - if ( label.name != "endpoint" ) - label_values_vec->Append(make_intrusive(label.value)); - } - auto r = make_intrusive(metric_record_type); - r->Assign(labels_idx, label_values_vec); r->Assign(opts_idx, opts_record); if ( fam.type == prometheus::MetricType::Counter ) @@ -303,7 +272,18 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view else if ( fam.type == prometheus::MetricType::Gauge ) r->Assign(value_idx, zeek::make_intrusive(inst.gauge.value)); - ret_val->Append(r); + auto label_names_vec = make_intrusive(string_vec_type); + auto label_values_vec = make_intrusive(string_vec_type); + + for ( const auto& lbl : inst.label ) { + label_names_vec->Append(make_intrusive(lbl.name)); + label_values_vec->Append(make_intrusive(lbl.value)); + } + + r->Assign(label_names_idx, std::move(label_names_vec)); + r->Assign(label_values_idx, std::move(label_values_vec)); + + ret_val->Append(std::move(r)); } } @@ -320,7 +300,7 @@ ValPtr Manager::CollectMetrics(std::string_view prefix_pattern, std::string_view } } - return ret_val; + return std::move(ret_val); } ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::string_view name_pattern) { @@ -328,8 +308,9 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st static auto string_vec_type = zeek::id::find_type("string_vec"); static auto double_vec_type = zeek::id::find_type("double_vec"); static auto histogram_metric_type = zeek::id::find_type("Telemetry::HistogramMetric"); - static auto labels_idx = histogram_metric_type->FieldOffset("labels"); static auto values_idx = histogram_metric_type->FieldOffset("values"); + static auto label_names_idx = histogram_metric_type->FieldOffset("label_names"); + static auto label_values_idx = histogram_metric_type->FieldOffset("label_values"); static auto observations_idx = histogram_metric_type->FieldOffset("observations"); static auto sum_idx = histogram_metric_type->FieldOffset("sum"); @@ -360,16 +341,19 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st RecordValPtr opts_record = GetMetricOptsRecord(fam); for ( const auto& inst : fam.metric ) { - auto label_values_vec = make_intrusive(string_vec_type); - for ( const auto& label : inst.label ) { - // We don't include the endpoint key/value unless it's a prometheus request - if ( label.name != "endpoint" ) - label_values_vec->Append(make_intrusive(label.value)); + auto r = make_intrusive(histogram_metric_type); + r->Assign(opts_idx, opts_record); + + auto label_names_vec = make_intrusive(string_vec_type); + auto label_values_vec = make_intrusive(string_vec_type); + + for ( const auto& lbl : inst.label ) { + label_names_vec->Append(make_intrusive(lbl.name)); + label_values_vec->Append(make_intrusive(lbl.value)); } - auto r = make_intrusive(histogram_metric_type); - r->Assign(labels_idx, label_values_vec); - r->Assign(opts_idx, opts_record); + r->Assign(label_names_idx, std::move(label_names_vec)); + r->Assign(label_values_idx, std::move(label_values_vec)); auto double_values_vec = make_intrusive(double_vec_type); std::vector boundaries; @@ -392,9 +376,9 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st r->Assign(sum_idx, zeek::make_intrusive(inst.histogram.sample_sum)); RecordValPtr local_opts_record = r->GetField(opts_idx); - local_opts_record->Assign(bounds_idx, bounds_vec); + local_opts_record->Assign(bounds_idx, std::move(bounds_vec)); - ret_val->Append(r); + ret_val->Append(std::move(r)); } } @@ -411,10 +395,10 @@ ValPtr Manager::CollectHistogramMetrics(std::string_view prefix_pattern, std::st } } - return ret_val; + return std::move(ret_val); } -std::string Manager::GetClusterJson() const { +void Manager::BuildClusterJson() { rapidjson::StringBuffer buffer; json::detail::NullDoubleWriter writer(buffer); @@ -423,8 +407,9 @@ std::string Manager::GetClusterJson() const { writer.Key("targets"); writer.StartArray(); - auto cluster_nodes = id::find_val("Cluster::nodes")->AsTableVal()->ToMap(); - for ( const auto& [idx, value] : cluster_nodes ) { + auto& node_val = id::find_val("Cluster::nodes"); + auto node_map = node_val->AsTableVal()->ToMap(); + for ( const auto& [idx, value] : node_map ) { auto node = value->AsRecordVal(); auto ip = node->GetField("ip"); auto port = node->GetField("metrics_port"); @@ -440,7 +425,7 @@ std::string Manager::GetClusterJson() const { writer.EndObject(); writer.EndArray(); - return buffer.GetString(); + cluster_json = buffer.GetString(); } CounterFamilyPtr Manager::CounterFamily(std::string_view prefix, std::string_view name, @@ -518,7 +503,7 @@ GaugePtr Manager::GaugeInstance(std::string_view prefix, std::string_view name, std::string_view helptext, std::string_view unit, prometheus::CollectCallbackPtr callback) { auto lbl_span = Span{labels.begin(), labels.size()}; - return GaugeInstance(prefix, name, lbl_span, helptext, unit, callback); + return GaugeInstance(prefix, name, lbl_span, helptext, unit, std::move(callback)); } HistogramFamilyPtr Manager::HistogramFamily(std::string_view prefix, std::string_view name, diff --git a/src/telemetry/Manager.h b/src/telemetry/Manager.h index 8a1deb5fc4..c4c2537f1a 100644 --- a/src/telemetry/Manager.h +++ b/src/telemetry/Manager.h @@ -2,8 +2,6 @@ #pragma once -#include -#include #include #include #include @@ -24,6 +22,11 @@ class RecordVal; using RecordValPtr = IntrusivePtr; } // namespace zeek +namespace prometheus { +class Exposer; +class Registry; +} // namespace prometheus + namespace zeek::telemetry { /** @@ -37,7 +40,7 @@ public: Manager& operator=(const Manager&) = delete; - ~Manager() = default; + ~Manager(); /** * Initialization of the manager. This is called late during Zeek's @@ -200,7 +203,7 @@ public: * @return A JSON description of the cluster configuration for reporting * to Prometheus for service discovery requests. */ - std::string GetClusterJson() const; + std::string GetClusterJson() const { return cluster_json; } /** * @return The pointer to the prometheus-cpp registry used by the telemetry @@ -230,6 +233,7 @@ protected: private: RecordValPtr GetMetricOptsRecord(const prometheus::MetricFamily& metric_family); + void BuildClusterJson(); std::map> families; std::map opts_records; @@ -242,11 +246,10 @@ private: GaugePtr cpu_gauge; GaugePtr fds_gauge; - std::string endpoint_name; - std::vector export_prefixes; - std::shared_ptr prometheus_registry; std::unique_ptr prometheus_exposer; + + std::string cluster_json; }; } // namespace zeek::telemetry diff --git a/src/telemetry/ProcessStats.cc b/src/telemetry/ProcessStats.cc index c0e9d322b7..f2a0447b63 100644 --- a/src/telemetry/ProcessStats.cc +++ b/src/telemetry/ProcessStats.cc @@ -67,6 +67,19 @@ std::atomic global_page_size; namespace zeek::telemetry::detail { +int64_t count_entries_in_directory(const char* path) { + int64_t result = 0; + if ( auto dptr = opendir(path); dptr != nullptr ) { + for ( auto entry = readdir(dptr); entry != nullptr; entry = readdir(dptr) ) { + auto fname = entry->d_name; + if ( strcmp(".", fname) != 0 && strcmp("..", fname) != 0 ) + ++result; + } + closedir(dptr); + } + return result; +} + /// Caches the result from a `sysconf` call in a cache variable to avoid /// frequent syscalls. Sets `cache_var` to -1 in case of an error. Initially, /// `cache_var` must be 0 and we assume a successful syscall would always return @@ -143,9 +156,7 @@ process_stats get_process_stats() { result.vms = vmsize_bytes; result.cpu = static_cast(utime_ticks + stime_ticks) / ticks_per_second; - zeek::filesystem::path fd_path{"/proc/self/fd"}; - result.fds = - std::distance(zeek::filesystem::directory_iterator{fd_path}, zeek::filesystem::directory_iterator{}); + result.fds = count_entries_in_directory("/proc/self/fd"); } return result; diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 9bcdc925f4..5620a0bf80 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -28,6 +28,7 @@ Manager::Manager() { did_process = true; next_beat = 0; terminating = false; + terminated = false; } Manager::~Manager() { @@ -61,10 +62,18 @@ void Manager::Terminate() { all_threads.clear(); msg_threads.clear(); terminating = false; + terminated = true; } void Manager::AddThread(BasicThread* thread) { DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name()); + + // This can happen when log writers or other threads are + // created during the shutdown phase and results in unclean + // shutdowns. + if ( terminated ) + reporter->Warning("Thread %s added after threading manager terminated", thread->Name()); + all_threads.push_back(thread); if ( ! heartbeat_timer_running ) diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 875e35290a..b075e6a70d 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -146,6 +146,7 @@ private: bool did_process; // True if the last Process() found some work to do. double next_beat; // Timestamp when the next heartbeat will be sent. bool terminating; // True if we are in Terminate(). + bool terminated; // True if Terminate() finished. msg_stats_list stats; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index a1d2e5e3da..022a8ce2b4 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -165,6 +165,50 @@ bool ReporterMessage::Process() { return true; } +// This is the IO source used by MsgThread. +// +// The lifetime of the IO source is decoupled from +// the thread. The thread may be terminated prior +// to the IO source being properly unregistered and +// removed by the IO manager. +class IOSource : public iosource::IOSource { +public: + explicit IOSource(MsgThread* thread) : thread(thread) { + if ( ! iosource_mgr->RegisterFd(flare.FD(), this) ) + reporter->InternalError("Failed to register MsgThread FD with iosource_mgr"); + + SetClosed(false); + } + + ~IOSource() override { + if ( ! iosource_mgr->UnregisterFd(flare.FD(), this) ) + reporter->InternalError("Failed to unregister MsgThread FD from iosource_mgr"); + } + + void Process() override { + flare.Extinguish(); + + if ( thread ) + thread->Process(); + } + + const char* Tag() override { return thread ? thread->Name() : ""; } + + double GetNextTimeout() override { return -1; } + + + void Fire() { flare.Fire(); }; + + void Close() { + thread = nullptr; + SetClosed(true); + } + +private: + MsgThread* thread = nullptr; + zeek::detail::Flare flare; +}; + } // namespace detail ////// Methods. @@ -181,16 +225,20 @@ MsgThread::MsgThread() : BasicThread(), queue_in(this, nullptr), queue_out(nullp failed = false; thread_mgr->AddMsgThread(this); - if ( ! iosource_mgr->RegisterFd(flare.FD(), this) ) - reporter->FatalError("Failed to register MsgThread fd with iosource_mgr"); + io_source = new detail::IOSource(this); - SetClosed(false); + // Register IOSource as non-counting lifetime managed IO source. + iosource_mgr->Register(io_source, true); } MsgThread::~MsgThread() { - // Unregister this thread from the iosource manager so it doesn't wake - // up the main poll anymore. - iosource_mgr->UnregisterFd(flare.FD(), this); + // Unregister this thread from the IO source so we don't + // get Process() callbacks anymore. The IO source itself + // is life-time managed by the IO manager. + if ( io_source ) { + io_source->Close(); + io_source = nullptr; + } } void MsgThread::OnSignalStop() { @@ -253,7 +301,13 @@ void MsgThread::OnWaitForStop() { } void MsgThread::OnKill() { - SetClosed(true); + // Ensure the IO source is closed and won't call Process() on this + // thread anymore. The thread got killed, so the threading manager will + // remove it forcefully soon. + if ( io_source ) { + io_source->Close(); + io_source = nullptr; + } // Send a message to unblock the reader if its currently waiting for // input. This is just an optimization to make it terminate more @@ -345,7 +399,8 @@ void MsgThread::SendOut(BasicOutputMessage* msg, bool force) { ++cnt_sent_out; - flare.Fire(); + if ( io_source ) + io_source->Fire(); } void MsgThread::SendEvent(const char* name, const int num_vals, Value** vals) { @@ -418,8 +473,6 @@ void MsgThread::GetStats(Stats* stats) { } void MsgThread::Process() { - flare.Extinguish(); - while ( HasOut() ) { Message* msg = RetrieveOut(); assert(msg); diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 55b8f0ba1d..259e64b11f 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -26,6 +26,7 @@ class HeartbeatMessage; class FinishMessage; class FinishedMessage; class KillMeMessage; +class IOSource; } // namespace detail @@ -40,7 +41,7 @@ class KillMeMessage; * that happens, the thread stops accepting any new messages, finishes * processes all remaining ones still in the queue, and then exits. */ -class MsgThread : public BasicThread, public iosource::IOSource { +class MsgThread : public BasicThread { public: /** * Constructor. It automatically registers the thread with the @@ -209,11 +210,9 @@ public: void GetStats(Stats* stats); /** - * Overridden from iosource::IOSource. + * Process() forwarded to from detail::IOSource. */ - void Process() override; - const char* Tag() override { return Name(); } - double GetNextTimeout() override { return -1; } + void Process(); protected: friend class Manager; @@ -362,7 +361,7 @@ private: bool child_sent_finish; // Child thread asked to be finished. bool failed; // Set to true when a command failed. - zeek::detail::Flare flare; + detail::IOSource* io_source = nullptr; // IO source registered with the IO manager. }; /** diff --git a/src/zeek.bif b/src/zeek.bif index 2d3d73fcda..eccc7f0292 100644 --- a/src/zeek.bif +++ b/src/zeek.bif @@ -5061,6 +5061,12 @@ function anonymize_addr%(a: addr, cl: IPAddrAnonymizationClass%): addr ## only_loggable: If the v value is a record this will only cause ## fields with the &log attribute to be included in the JSON. ## +## field_escape_pattern: If the v value is a record, the given pattern is +## matched against the field names of its type, and +## the first match, if any, is stripped from the +## rendered name. The default pattern strips a leading +## underscore. +## ## returns: a JSON formatted string. ## ## .. zeek:see:: fmt cat cat_sep string_cat print_raw from_json diff --git a/testing/btest/Baseline.cpp/bifs.disable_analyzer-invalid-aid/out b/testing/btest/Baseline.cpp/bifs.disable_analyzer-invalid-aid/out new file mode 100644 index 0000000000..47ad8f519f --- /dev/null +++ b/testing/btest/Baseline.cpp/bifs.disable_analyzer-invalid-aid/out @@ -0,0 +1,35 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer TCP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek (C++), line 5: root analyzer UDP cannot be removed (<___>testing_btest__tmp_bifs_disable_analyzer_invalid_aid_disable_analyzer_invalid_aid_zeek__new_connection__zf()) diff --git a/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err b/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err index 3e3ab66b88..2e4a0a075b 100644 --- a/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err +++ b/testing/btest/Baseline.cpp/broker.store.create-failure/zeek.err @@ -1,11 +1,11 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. error in <...>/create-failure.zeek (C++), line 61: Failed to attach master store backend_failure: (<___>testing_btest__tmp_broker_store_create_failure_create_failure_zeek__zeek_init__36__zf()) error in <...>/create-failure.zeek (C++), line 61: Could not create Broker master store '../fail' (<___>testing_btest__tmp_broker_store_create_failure_create_failure_zeek__zeek_init__36__zf()) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) -error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_8863924235512554227__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) +error in <...>/create-failure.zeek, line 49: invalid Broker store handle (lambda_1087046023523666109__lb_cl() and broker::store::{}) received termination signal diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-2/output b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-2/output new file mode 100644 index 0000000000..49d861c74c --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-2/output @@ -0,0 +1 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-3/output b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-3/output new file mode 100644 index 0000000000..49d861c74c --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn-3/output @@ -0,0 +1 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn/output b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn/output new file mode 100644 index 0000000000..010cfa58e2 --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-for-conn/output @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +AllAnalyzers::ANALYZER_ANALYZER_HTTP +AllAnalyzers::ANALYZER_ANALYZER_HTTP diff --git a/testing/btest/Baseline.zam/bifs.disable_analyzer-invalid-aid/out b/testing/btest/Baseline.zam/bifs.disable_analyzer-invalid-aid/out new file mode 100644 index 0000000000..6279bf0504 --- /dev/null +++ b/testing/btest/Baseline.zam/bifs.disable_analyzer-invalid-aid/out @@ -0,0 +1,35 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer TCP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) +XXXXXXXXXX.XXXXXX error in <...>/disable_analyzer-invalid-aid.zeek, line 12: root analyzer UDP cannot be removed (disable_analyzer(::#0, i, F, T)) diff --git a/testing/btest/Baseline.zam/bifs.from_json-5/.stderr b/testing/btest/Baseline.zam/bifs.from_json-5/.stderr index ed0056356c..d7ed338df6 100644 --- a/testing/btest/Baseline.zam/bifs.from_json-5/.stderr +++ b/testing/btest/Baseline.zam/bifs.from_json-5/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 4: tables are not supported (from_json([], ::#0, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 4: cannot convert JSON type 'array' to Zeek type 'table' (from_json([], ::#0, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline.zam/bifs.from_json-6/.stderr b/testing/btest/Baseline.zam/bifs.from_json-6/.stderr index 7a7c048f3c..b1ddbdd07a 100644 --- a/testing/btest/Baseline.zam/bifs.from_json-6/.stderr +++ b/testing/btest/Baseline.zam/bifs.from_json-6/.stderr @@ -1,2 +1,3 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 5: wrong port format, must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", ::#0, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 8: wrong port format, string must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", ::#2, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 9: wrong port format, object must have 'port' and 'proto' members (from_json({}, ::#4, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline.zam/bifs.from_json-6/.stdout b/testing/btest/Baseline.zam/bifs.from_json-6/.stdout index aee95c8a8e..a4da3aa3e4 100644 --- a/testing/btest/Baseline.zam/bifs.from_json-6/.stdout +++ b/testing/btest/Baseline.zam/bifs.from_json-6/.stdout @@ -1,2 +1,4 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +[v=80/tcp, valid=T] +[v=, valid=F] [v=, valid=F] diff --git a/testing/btest/Baseline.zam/bifs.from_json/.stdout b/testing/btest/Baseline.zam/bifs.from_json/.stdout index 24f35f7b9b..584cb2ca60 100644 --- a/testing/btest/Baseline.zam/bifs.from_json/.stdout +++ b/testing/btest/Baseline.zam/bifs.from_json/.stdout @@ -5,4 +5,6 @@ aa:bb::/32, }, se={ [192.168.0.1, 80/tcp] , [2001:db8::1, 8080/udp] +}, tbl={ +[192.168.0.1, 80/tcp] = foo }], valid=T] diff --git a/testing/btest/Baseline.zam/core.file-analyzer-violation/.stderr b/testing/btest/Baseline.zam/core.file-analyzer-violation/.stderr new file mode 100644 index 0000000000..e3f6131b1d --- /dev/null +++ b/testing/btest/Baseline.zam/core.file-analyzer-violation/.stderr @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +received termination signal diff --git a/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log b/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log index abef87d6a6..3b26732673 100644 --- a/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log +++ b/testing/btest/Baseline.zam/core.file-analyzer-violation/files.log @@ -8,3 +8,4 @@ #fields ts fuid uid id.orig_h id.orig_p id.resp_h id.resp_p source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid #types time string string addr port addr port string count set[string] string string interval bool bool count count count count bool string XXXXXXXXXX.XXXXXX FKPuH630Tmj6UQUMP7 - - - - - ./myfile.exe 0 PE application/x-dosexec - 0.000000 - - 64 - 0 0 F - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output b/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output index 4fab29b90a..795f0f84d7 100644 --- a/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output +++ b/testing/btest/Baseline.zam/opt.ZAM-bif-tracking/output @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -551 seen BiFs, 0 unseen BiFs (), 0 new BiFs () +533 seen BiFs, 0 unseen BiFs (), 0 new BiFs () diff --git a/testing/btest/Baseline/bifs.from_json-5/.stderr b/testing/btest/Baseline/bifs.from_json-5/.stderr index 93cbb432cf..6bdd60e118 100644 --- a/testing/btest/Baseline/bifs.from_json-5/.stderr +++ b/testing/btest/Baseline/bifs.from_json-5/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 4: tables are not supported (from_json([], to_any_coerce table_string_of_string, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 4: cannot convert JSON type 'array' to Zeek type 'table' (from_json([], to_any_coerce table_string_of_string, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline/bifs.from_json-6/.stderr b/testing/btest/Baseline/bifs.from_json-6/.stderr index bafb1a49e9..0b278db5ae 100644 --- a/testing/btest/Baseline/bifs.from_json-6/.stderr +++ b/testing/btest/Baseline/bifs.from_json-6/.stderr @@ -1,2 +1,3 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -error in <...>/from_json.zeek, line 5: wrong port format, must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", to_any_coerce port_t, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 8: wrong port format, string must be <...>/(tcp|udp|icmp|unknown)/ (from_json("80", to_any_coerce port_t, from_json_default_key_mapper)) +error in <...>/from_json.zeek, line 9: wrong port format, object must have 'port' and 'proto' members (from_json({}, to_any_coerce port_t, from_json_default_key_mapper)) diff --git a/testing/btest/Baseline/bifs.from_json-6/.stdout b/testing/btest/Baseline/bifs.from_json-6/.stdout index aee95c8a8e..a4da3aa3e4 100644 --- a/testing/btest/Baseline/bifs.from_json-6/.stdout +++ b/testing/btest/Baseline/bifs.from_json-6/.stdout @@ -1,2 +1,4 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +[v=80/tcp, valid=T] +[v=, valid=F] [v=, valid=F] diff --git a/testing/btest/Baseline/bifs.from_json/.stdout b/testing/btest/Baseline/bifs.from_json/.stdout index 24f35f7b9b..584cb2ca60 100644 --- a/testing/btest/Baseline/bifs.from_json/.stdout +++ b/testing/btest/Baseline/bifs.from_json/.stdout @@ -5,4 +5,6 @@ aa:bb::/32, }, se={ [192.168.0.1, 80/tcp] , [2001:db8::1, 8080/udp] +}, tbl={ +[192.168.0.1, 80/tcp] = foo }], valid=T] diff --git a/testing/btest/Baseline/core.file-analyzer-violation/.stderr b/testing/btest/Baseline/core.file-analyzer-violation/.stderr new file mode 100644 index 0000000000..e3f6131b1d --- /dev/null +++ b/testing/btest/Baseline/core.file-analyzer-violation/.stderr @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +received termination signal diff --git a/testing/btest/Baseline/core.file-analyzer-violation/files.log b/testing/btest/Baseline/core.file-analyzer-violation/files.log index abef87d6a6..3b26732673 100644 --- a/testing/btest/Baseline/core.file-analyzer-violation/files.log +++ b/testing/btest/Baseline/core.file-analyzer-violation/files.log @@ -8,3 +8,4 @@ #fields ts fuid uid id.orig_h id.orig_p id.resp_h id.resp_p source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid #types time string string addr port addr port string count set[string] string string interval bool bool count count count count bool string XXXXXXXXXX.XXXXXX FKPuH630Tmj6UQUMP7 - - - - - ./myfile.exe 0 PE application/x-dosexec - 0.000000 - - 64 - 0 0 F - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/coverage.init-default/missing_loads b/testing/btest/Baseline/coverage.init-default/missing_loads index fe23c7a04a..e16624e1fb 100644 --- a/testing/btest/Baseline/coverage.init-default/missing_loads +++ b/testing/btest/Baseline/coverage.init-default/missing_loads @@ -5,6 +5,7 @@ -./frameworks/cluster/nodes/proxy.zeek -./frameworks/cluster/nodes/worker.zeek -./frameworks/cluster/setup-connections.zeek +-./frameworks/cluster/supervisor.zeek -./frameworks/intel/cluster.zeek -./frameworks/netcontrol/cluster.zeek -./frameworks/openflow/cluster.zeek diff --git a/testing/btest/Baseline/language.attr-default-global-set-error/out b/testing/btest/Baseline/language.attr-default-global-set-error/out index 29a41aa656..431b2d445b 100644 --- a/testing/btest/Baseline/language.attr-default-global-set-error/out +++ b/testing/btest/Baseline/language.attr-default-global-set-error/out @@ -1,7 +1,6 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. error in <...>/attr-default-global-set-error.zeek, line 4: &default is not valid for global variables except for tables (&default=0) error in <...>/attr-default-global-set-error.zeek, line 9: &default is not valid for global variables except for tables (&default=10) -error in <...>/attr-default-global-set-error.zeek, line 9: Duplicate &default attribute is ambiguous error in <...>/attr-default-global-set-error.zeek, line 9: &default is not valid for global variables except for tables (&default=9) -error in <...>/attr-default-global-set-error.zeek, line 9: &optional is not valid for global variables (&default=9, &optional) +error in <...>/attr-default-global-set-error.zeek, line 9: &optional is not valid for global variables (&optional) error in <...>/attr-default-global-set-error.zeek, line 10: &default is not valid for global variables except for tables (&default=set()) diff --git a/testing/btest/Baseline/language.record-chain-assign/output b/testing/btest/Baseline/language.record-chain-assign/output new file mode 100644 index 0000000000..996852b152 --- /dev/null +++ b/testing/btest/Baseline/language.record-chain-assign/output @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +[a=-42.3, b=-12, c=3, d=3, e=-10012, f=1003.0, g=tail] +[a=-84.6, b=-24, c=1006, d=1006, e=-20024, f=-9039.3, g=intervening] diff --git a/testing/btest/Baseline/language.set/out b/testing/btest/Baseline/language.set/out index 51d2a72711..c53cae332c 100644 --- a/testing/btest/Baseline/language.set/out +++ b/testing/btest/Baseline/language.set/out @@ -76,18 +76,26 @@ table index membership (PASS) table index non-membership (PASS) table index reduced size (PASS) table index iteration (PASS) +table index JSON roundtrip success (PASS) +table index JSON roundtrip correct (PASS) vector index size (PASS) vector index membership (PASS) vector index non-membership (PASS) vector index reduced size (PASS) vector index iteration (PASS) +vector index JSON roundtrip success (PASS) +vector index JSON roundtrip correct (PASS) set index size (PASS) set index membership (PASS) set index non-membership (PASS) set index reduced size (PASS) set index iteration (PASS) +set index JSON roundtrip success (PASS) +set index JSON roundtrip correct (PASS) pattern index size (PASS) pattern index membership (PASS) pattern index non-membership (PASS) pattern index reduced size (PASS) pattern index iteration (PASS) +pattern index JSON roundtrip success (PASS) +pattern index JSON roundtrip correct (PASS) diff --git a/testing/btest/Baseline/language.table/out b/testing/btest/Baseline/language.table/out index 358a2f70a6..64105b72ca 100644 --- a/testing/btest/Baseline/language.table/out +++ b/testing/btest/Baseline/language.table/out @@ -53,21 +53,29 @@ table index non-membership (PASS) table index lookup (PASS) table index reduced size (PASS) table index iteration (PASS) +table index JSON roundtrip success (PASS) +table index JSON roundtrip correct (PASS) vector index size (PASS) vector index membership (PASS) vector index non-membership (PASS) vector index lookup (PASS) vector index reduced size (PASS) vector index iteration (PASS) +vector index JSON roundtrip success (PASS) +vector index JSON roundtrip (PASS) set index size (PASS) set index membership (PASS) set index non-membership (PASS) set index lookup (PASS) set index reduced size (PASS) set index iteration (PASS) +set index JSON roundtrip success (PASS) +set index JSON roundtrip (PASS) pattern index size (PASS) pattern index membership (PASS) pattern index non-membership (PASS) pattern index lookup (PASS) pattern index reduced size (PASS) pattern index iteration (PASS) +pattern index JSON roundtrip success (PASS) +pattern index JSON roundtrip (PASS) diff --git a/testing/btest/Baseline/opt.opt-no-files/.stderr b/testing/btest/Baseline/opt.opt-no-files/.stderr index ba158c1e5e..3fa6d06e85 100644 --- a/testing/btest/Baseline/opt.opt-no-files/.stderr +++ b/testing/btest/Baseline/opt.opt-no-files/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -fatal error: no matching functions/files for C++ compilation +fatal error: no matching functions/files for script optimization diff --git a/testing/btest/Baseline/opt.opt-no-func/.stderr b/testing/btest/Baseline/opt.opt-no-func/.stderr index ba158c1e5e..3fa6d06e85 100644 --- a/testing/btest/Baseline/opt.opt-no-func/.stderr +++ b/testing/btest/Baseline/opt.opt-no-func/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -fatal error: no matching functions/files for C++ compilation +fatal error: no matching functions/files for script optimization diff --git a/testing/btest/Baseline/opt.opt-no-func2/.stderr b/testing/btest/Baseline/opt.opt-no-func2/.stderr index ba158c1e5e..3fa6d06e85 100644 --- a/testing/btest/Baseline/opt.opt-no-func2/.stderr +++ b/testing/btest/Baseline/opt.opt-no-func2/.stderr @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -fatal error: no matching functions/files for C++ compilation +fatal error: no matching functions/files for script optimization diff --git a/testing/btest/Baseline/plugins.hooks/output b/testing/btest/Baseline/plugins.hooks/output index 297bdb3584..c3b551dc84 100644 --- a/testing/btest/Baseline/plugins.hooks/output +++ b/testing/btest/Baseline/plugins.hooks/output @@ -108,6 +108,8 @@ 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::ignore_analyzers, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, lambda_<15261139872714441626>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_confirmations)elsedisable_event_group(Analyzer::Logging::include_confirmations)return (Analyzer::Logging::new_value)}, 0)) -> +0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> +0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, lambda_<3084630089048204467>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_disabling)elsedisable_event_group(Analyzer::Logging::include_disabling)return (Analyzer::Logging::new_value)}, 0)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Broker::peer_counts_as_iosource, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Conn::analyzer_inactivity_timeouts, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Conn::default_extract, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) -> @@ -302,6 +304,7 @@ 0.000000 MetaHookPost CallFunction(__init_primary_bifs, , ()) -> 0.000000 MetaHookPost CallFunction(__init_secondary_bifs, , ()) -> 0.000000 MetaHookPost CallFunction(disable_event_group, , (Analyzer::Logging::include_confirmations)) -> +0.000000 MetaHookPost CallFunction(disable_event_group, , (Analyzer::Logging::include_disabling)) -> 0.000000 MetaHookPost CallFunction(enable_module_events, , (Analyzer::Logging)) -> 0.000000 MetaHookPost CallFunction(getenv, , (CLUSTER_NODE)) -> 0.000000 MetaHookPost CallFunction(getenv, , (ZEEK_DEFAULT_LISTEN_ADDRESS)) -> @@ -313,6 +316,7 @@ 0.000000 MetaHookPost CallFunction(is_packet_analyzer, , (AllAnalyzers::ANALYZER_ANALYZER_TCPSTATS)) -> 0.000000 MetaHookPost CallFunction(lambda_<15261139872714441626>, , (Analyzer::Logging::include_confirmations, F)) -> 0.000000 MetaHookPost CallFunction(lambda_<2645182068207650863>, , (Analyzer::Logging::enable, T)) -> +0.000000 MetaHookPost CallFunction(lambda_<3084630089048204467>, , (Analyzer::Logging::include_disabling, F)) -> 0.000000 MetaHookPost CallFunction(port_to_count, , (2123/udp)) -> 0.000000 MetaHookPost CallFunction(port_to_count, , (2152/udp)) -> 0.000000 MetaHookPost CallFunction(port_to_count, , (3544/udp)) -> @@ -1024,6 +1028,8 @@ 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::ignore_analyzers, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_confirmations, lambda_<15261139872714441626>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_confirmations)elsedisable_event_group(Analyzer::Logging::include_confirmations)return (Analyzer::Logging::new_value)}, 0)) +0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) +0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Analyzer::Logging::include_disabling, lambda_<3084630089048204467>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_disabling)elsedisable_event_group(Analyzer::Logging::include_disabling)return (Analyzer::Logging::new_value)}, 0)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Broker::peer_counts_as_iosource, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Conn::analyzer_inactivity_timeouts, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Conn::default_extract, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100)) @@ -1218,6 +1224,7 @@ 0.000000 MetaHookPre CallFunction(__init_primary_bifs, , ()) 0.000000 MetaHookPre CallFunction(__init_secondary_bifs, , ()) 0.000000 MetaHookPre CallFunction(disable_event_group, , (Analyzer::Logging::include_confirmations)) +0.000000 MetaHookPre CallFunction(disable_event_group, , (Analyzer::Logging::include_disabling)) 0.000000 MetaHookPre CallFunction(enable_module_events, , (Analyzer::Logging)) 0.000000 MetaHookPre CallFunction(getenv, , (CLUSTER_NODE)) 0.000000 MetaHookPre CallFunction(getenv, , (ZEEK_DEFAULT_LISTEN_ADDRESS)) @@ -1229,6 +1236,7 @@ 0.000000 MetaHookPre CallFunction(is_packet_analyzer, , (AllAnalyzers::ANALYZER_ANALYZER_TCPSTATS)) 0.000000 MetaHookPre CallFunction(lambda_<15261139872714441626>, , (Analyzer::Logging::include_confirmations, F)) 0.000000 MetaHookPre CallFunction(lambda_<2645182068207650863>, , (Analyzer::Logging::enable, T)) +0.000000 MetaHookPre CallFunction(lambda_<3084630089048204467>, , (Analyzer::Logging::include_disabling, F)) 0.000000 MetaHookPre CallFunction(port_to_count, , (2123/udp)) 0.000000 MetaHookPre CallFunction(port_to_count, , (2152/udp)) 0.000000 MetaHookPre CallFunction(port_to_count, , (3544/udp)) @@ -1939,6 +1947,8 @@ 0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::ignore_analyzers, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_confirmations, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_confirmations, lambda_<15261139872714441626>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_confirmations)elsedisable_event_group(Analyzer::Logging::include_confirmations)return (Analyzer::Logging::new_value)}, 0) +0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_disabling, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) +0.000000 | HookCallFunction Option::set_change_handler(Analyzer::Logging::include_disabling, lambda_<3084630089048204467>{ if (Analyzer::Logging::new_value) enable_event_group(Analyzer::Logging::include_disabling)elsedisable_event_group(Analyzer::Logging::include_disabling)return (Analyzer::Logging::new_value)}, 0) 0.000000 | HookCallFunction Option::set_change_handler(Broker::peer_counts_as_iosource, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Conn::analyzer_inactivity_timeouts, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Conn::default_extract, Config::config_option_changed{ if ( == Config::location) return (Config::new_value)Config::log = Config::Info($ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value))if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, to_any_coerce Config::log)return (Config::new_value)}, -100) @@ -2133,6 +2143,7 @@ 0.000000 | HookCallFunction __init_primary_bifs() 0.000000 | HookCallFunction __init_secondary_bifs() 0.000000 | HookCallFunction disable_event_group(Analyzer::Logging::include_confirmations) +0.000000 | HookCallFunction disable_event_group(Analyzer::Logging::include_disabling) 0.000000 | HookCallFunction enable_module_events(Analyzer::Logging) 0.000000 | HookCallFunction getenv(CLUSTER_NODE) 0.000000 | HookCallFunction getenv(ZEEK_DEFAULT_LISTEN_ADDRESS) @@ -2144,6 +2155,7 @@ 0.000000 | HookCallFunction is_packet_analyzer(AllAnalyzers::ANALYZER_ANALYZER_TCPSTATS) 0.000000 | HookCallFunction lambda_<15261139872714441626>(Analyzer::Logging::include_confirmations, F) 0.000000 | HookCallFunction lambda_<2645182068207650863>(Analyzer::Logging::enable, T) +0.000000 | HookCallFunction lambda_<3084630089048204467>(Analyzer::Logging::include_disabling, F) 0.000000 | HookCallFunction port_to_count(2123/udp) 0.000000 | HookCallFunction port_to_count(2152/udp) 0.000000 | HookCallFunction port_to_count(3544/udp) diff --git a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-no-confirmations b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-default similarity index 80% rename from testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-no-confirmations rename to testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-default index ef8059b705..db390c9968 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-no-confirmations +++ b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-default @@ -13,6 +13,4 @@ XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 5399 XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations index 87e8855b64..56c4033614 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations +++ b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-confirmations @@ -15,6 +15,4 @@ XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 5399 XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - -XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-disabling b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-disabling new file mode 100644 index 0000000000..662d888f7c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.analyzer.logging/analyzer.log-include-disabling @@ -0,0 +1,17 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path analyzer +#open XXXX-XX-XX-XX-XX-XX +#fields ts cause analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data +#types time string string string string string addr port addr port string string +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: out_of_bound: DCE_RPC_PDU:frag: -2665 > 31 - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX violation protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 Binpac exception: binpac exception: &enforce violation : DCE_RPC_Header:rpc_vers - +XXXXXXXXXX.XXXXXX disabled protocol DCE_RPC ClEkJM2Vm5giqnMf4h - 10.0.0.55 53994 60.190.189.214 8124 - Disabled after 6 violations +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log index 6e0e60a2f8..f371070a8e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.telemetry/telemetry.log @@ -5,12 +5,12 @@ #unset_field - #path telemetry #open XXXX-XX-XX-XX-XX-XX -#fields ts peer metric_type prefix name labels label_values value -#types time string string string string vector[string] vector[string] double -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_writer_writes_total writer,module,stream,filter-name,path default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_writer_writes_total writer,module,stream,filter-name,path default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_log_writer_writes_total writer,module,stream,filter-name,path default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0 +#fields ts peer metric_type name labels label_values value +#types time string string string vector[string] vector[string] double +XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream Conn,Conn::LOG 34.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream DNS,DNS::LOG 34.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_stream_writes_total module,stream HTTP,HTTP::LOG 14.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,Conn,conn,Conn::LOG,Log::WRITER_ASCII 30.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,DNS,dns,DNS::LOG,Log::WRITER_ASCII 23.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_log_writer_writes_total filter_name,module,path,stream,writer default,HTTP,http,HTTP::LOG,Log::WRITER_ASCII 10.0 #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/conn.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/conn.log.cut new file mode 100644 index 0000000000..197f268773 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/conn.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p history service +127.0.0.1 1080 127.0.0.1 8000 ^hADadFf http diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/files.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/files.log.cut new file mode 100644 index 0000000000..b0788b8a39 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/files.log.cut @@ -0,0 +1,4 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 +127.0.0.1 1080 127.0.0.1 8000 SHA1 image/png 1991cedee47909e324ac1b8bee2020d5690891e1 +127.0.0.1 1080 127.0.0.1 8000 SHA1 text/json eae909a9c2827d827ef30a6675a6388770ddc88d diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/http.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/http.log.cut new file mode 100644 index 0000000000..a8665ed118 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-orig/http.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg +127.0.0.1 1080 127.0.0.1 8000 localhost:8000 POST / 1.1 curl/7.81.0 200 OK diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/conn.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/conn.log.cut new file mode 100644 index 0000000000..14aa69299b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/conn.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p history service +127.0.0.1 1080 127.0.0.1 80 ^hADadFf http diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/files.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/files.log.cut new file mode 100644 index 0000000000..d3f231e710 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/files.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 +127.0.0.1 1080 127.0.0.1 80 SHA1 image/png 1991cedee47909e324ac1b8bee2020d5690891e1 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/http.log.cut b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/http.log.cut new file mode 100644 index 0000000000..8b6d2e26ec --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.flip-content-line-resp/http.log.cut @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg +127.0.0.1 1080 127.0.0.1 80 localhost GET /zeek.png 1.1 curl/7.81.0 200 OK diff --git a/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-client/analyzer.log b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-client/analyzer.log new file mode 100644 index 0000000000..d96d1809f8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-client/analyzer.log @@ -0,0 +1,124 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path analyzer +#open XXXX-XX-XX-XX-XX-XX +#fields ts cause analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data +#types time string string string string string addr port addr port string string +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C1Xkzz2MaGtLrc1Tla - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CqlVyW1YwZ15RhTBc4 - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CLNN1k2QMum1aexUK7 - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CBA8792iHmnhPLksKa - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CGLPPc35OzDQij1XX8 - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-server/analyzer.log b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-server/analyzer.log new file mode 100644 index 0000000000..4296d41ecf --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ssh.half-duplex-server/analyzer.log @@ -0,0 +1,128 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path analyzer +#open XXXX-XX-XX-XX-XX-XX +#fields ts cause analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data +#types time string string string string string addr port addr port string string +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CHhAvVGS1DHFjwGM9 - 192.168.1.79 51880 131.159.21.1 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH ClEkJM2Vm5giqnMf4h - 192.168.2.1 57189 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C4J4Th3PJpwUYZZ6gc - 192.168.2.1 57191 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtPZjS20MLrsMUOJi2 - 192.168.2.1 56594 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CUM0KZ3MLUfNB0cl11 - 192.168.2.1 56821 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CmES5u32sYpV7JYN - 192.168.2.1 56837 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CP5puj4I8PtEU4qzYg - 192.168.2.1 56845 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C37jN32gN3y3AZzyf6 - 192.168.2.1 56875 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C3eiCBGOLw3VtHfOj - 192.168.2.1 56878 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CwjjYJ2WqgTbAqiHl6 - 192.168.2.1 56940 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C0LAHyvtKSQHyJxIl - 192.168.2.1 57831 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CFLRIC3zaTU1loLGxh - 192.168.2.1 59246 192.168.2.158 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9rXSW3KSpTYvPrlI1 - 192.168.1.32 41164 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH Ck51lg1bScffFj34Ri - 192.168.1.32 33910 128.2.13.133 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C9mvWx3ezztgzcexV7 - 192.168.1.32 41268 128.2.10.238 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CNnMIj2QSd84NKf7U3 - 192.168.1.31 52294 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH C7fIlMZDuRiqjpYbb - 192.168.1.31 51489 192.168.1.32 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CykQaM33ztNt0csB9a - 192.168.1.32 58641 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CtxTCR2Yer0FR1tIBg - 192.168.1.32 58646 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +XXXXXXXXXX.XXXXXX violation protocol SSH CpmdRlaUoJLN3uIRa - 192.168.1.32 58649 131.103.20.168 22 Binpac exception: binpac exception: invalid index for case: SSH_Key_Exchange: 3 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log index d79fef633b..af06992a04 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry.log @@ -5,7 +5,7 @@ #unset_field - #path telemetry #open XXXX-XX-XX-XX-XX-XX -#fields ts peer metric_type prefix name labels label_values value -#types time string string string string vector[string] vector[string] double -XXXXXXXXXX.XXXXXX zeek counter btest btest_connections_total proto tcp 500.0 +#fields ts peer metric_type name labels label_values value +#types time string string string vector[string] vector[string] double +XXXXXXXXXX.XXXXXX zeek counter btest_connections_total proto tcp 500.0 #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log index 79adb57972..a30298db72 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log-prefixes/telemetry_histogram.log @@ -5,8 +5,8 @@ #unset_field - #path telemetry_histogram #open XXXX-XX-XX-XX-XX-XX -#fields ts peer prefix name labels label_values bounds values sum observations -#types time string string string vector[string] vector[string] vector[double] vector[double] double double -XXXXXXXXXX.XXXXXX zeek btest btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 -XXXXXXXXXX.XXXXXX zeek btest btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 +#fields ts peer name labels label_values bounds values sum observations +#types time string string vector[string] vector[string] vector[double] vector[double] double double +XXXXXXXXXX.XXXXXX zeek btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 +XXXXXXXXXX.XXXXXX zeek btest_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 #close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered index 19b4b90ddb..c7b26a1f28 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered @@ -1,5 +1,5 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_total_sessions_total protocol tcp 1.0 -XXXXXXXXXX.XXXXXX zeek gauge zeek zeek_active_sessions protocol tcp 1.0 -XXXXXXXXXX.XXXXXX zeek counter zeek zeek_total_sessions_total protocol tcp 500.0 -XXXXXXXXXX.XXXXXX zeek gauge zeek zeek_active_sessions protocol tcp 500.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_total_sessions_total protocol tcp 1.0 +XXXXXXXXXX.XXXXXX zeek gauge zeek_active_sessions protocol tcp 1.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_total_sessions_total protocol tcp 500.0 +XXXXXXXXXX.XXXXXX zeek gauge zeek_active_sessions protocol tcp 500.0 diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered index b382cd5ca6..d47ba69d07 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry_histogram.log.filtered @@ -1,3 +1,3 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -XXXXXXXXXX.XXXXXX zeek zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 -XXXXXXXXXX.XXXXXX zeek zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 +XXXXXXXXXX.XXXXXX zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,0.0,0.0,0.0,0.0,0.0,0.0 0.0 0.0 +XXXXXXXXXX.XXXXXX zeek zeek_connection_duration_seconds (empty) (empty) 2.0,3.0,4.0,5.0,6.0,10.0,inf 0.0,322.0,90.0,5.0,76.0,7.0,0.0 1650.264644 500.0 diff --git a/testing/btest/Baseline/spicy.get-values/output b/testing/btest/Baseline/spicy.get-values/output new file mode 100644 index 0000000000..4e01755636 --- /dev/null +++ b/testing/btest/Baseline/spicy.get-values/output @@ -0,0 +1,6 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. + + + + + diff --git a/testing/btest/Baseline/spicy.replaces-conflicts-2/output b/testing/btest/Baseline/spicy.replaces-conflicts-2/output new file mode 100644 index 0000000000..b4f454e6e7 --- /dev/null +++ b/testing/btest/Baseline/spicy.replaces-conflicts-2/output @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +fatal error: spicy::SSH_2: file analyzer MD5 is already mapped to a different analyzer; cannot replace an analyzer multiple times diff --git a/testing/btest/Baseline/spicy.replaces-conflicts-3/output b/testing/btest/Baseline/spicy.replaces-conflicts-3/output new file mode 100644 index 0000000000..0733458438 --- /dev/null +++ b/testing/btest/Baseline/spicy.replaces-conflicts-3/output @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +fatal error: spicy::SSH_2: packet analyzer Ethernet is already mapped to a different analyzer; cannot replace an analyzer multiple times diff --git a/testing/btest/Baseline/spicy.replaces-conflicts/output b/testing/btest/Baseline/spicy.replaces-conflicts/output new file mode 100644 index 0000000000..8cf95b5195 --- /dev/null +++ b/testing/btest/Baseline/spicy.replaces-conflicts/output @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +fatal error: redefinition of protocol analyzer spicy::SSH_1 diff --git a/testing/btest/Traces/http/zeek-image-1080-80-x.pcap b/testing/btest/Traces/http/zeek-image-1080-80-x.pcap new file mode 100644 index 0000000000..a22a01f276 Binary files /dev/null and b/testing/btest/Traces/http/zeek-image-1080-80-x.pcap differ diff --git a/testing/btest/Traces/http/zeek-image-post-1080-8000-x.pcap b/testing/btest/Traces/http/zeek-image-post-1080-8000-x.pcap new file mode 100644 index 0000000000..184888de8c Binary files /dev/null and b/testing/btest/Traces/http/zeek-image-post-1080-8000-x.pcap differ diff --git a/testing/btest/bifs/from_json.zeek b/testing/btest/bifs/from_json.zeek index 9351c3b9ca..31d3438e7c 100644 --- a/testing/btest/bifs/from_json.zeek +++ b/testing/btest/bifs/from_json.zeek @@ -29,11 +29,12 @@ type Foo: record { re: pattern; su: subnet_set; se: set[addr, port]; + tbl: table[addr, port] of string; }; event zeek_init() { - local json = "{\"hello\":\"world\",\"t\":true,\"f\":false,\"se\":[[\"192.168.0.1\", \"80/tcp\"], [\"2001:db8::1\", \"8080/udp\"]],\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[\"1\",\"2\",\"3\",\"4\"],\"su\":[\"[aa:bb::0]/32\",\"192.168.0.0/16\"],\"c1\":\"A::Blue\",\"p\":\"1500/tcp\",\"it\":5000,\"ad\":\"127.0.0.1\",\"s\":\"[::1/128]\",\"re\":\"/a/\",\"ti\":1681652265.042767}"; + local json = "{\"hello\":\"world\",\"t\":true,\"f\":false,\"se\":[[\"192.168.0.1\", \"80/tcp\"], [\"2001:db8::1\", \"8080/udp\"]],\"n\":null,\"i\":123,\"pi\":3.1416,\"a\":[\"1\",\"2\",\"3\",\"4\"],\"su\":[\"[aa:bb::0]/32\",\"192.168.0.0/16\"],\"c1\":\"A::Blue\",\"p\":\"1500/tcp\",\"it\":5000,\"ad\":\"127.0.0.1\",\"s\":\"[::1/128]\",\"re\":\"/a/\",\"ti\":1681652265.042767,\"tbl\":{\"[\\\"192.168.0.1\\\",\\\"80/tcp\\\"]\":\"foo\"}}"; print from_json(json, Foo); } @@ -73,10 +74,14 @@ event zeek_init() @TEST-START-NEXT type port_t: port; -# wrong port format +# additional & incorrect port formats event zeek_init() { + # Ports can also be given as objects: + print from_json("{\"port\":80,\"proto\":\"tcp\"}", port_t); + # These are violations: print from_json("\"80\"", port_t); + print from_json("{}", port_t); } @TEST-START-NEXT diff --git a/testing/btest/btest.cfg b/testing/btest/btest.cfg index 5668992b56..b47ff2dddf 100644 --- a/testing/btest/btest.cfg +++ b/testing/btest/btest.cfg @@ -14,6 +14,7 @@ MinVersion = 0.63 [environment] ZEEKPATH=`bash -c %(testbase)s/../../%(build_dir)s/zeek-path-dev` HILTI_CXX_COMPILER_LAUNCHER=`f=%(testbase)s/../../%(build_dir)s/CMakeCache.txt && grep -q '^ENABLE_CCACHE:BOOL=true' $f && sed -n 's/^CCACHE_PROGRAM:FILEPATH=\(.*\)$/\1/p' $f` +HILTI_JIT_PARALLELISM=`bash -c 'echo ${HILTI_JIT_PARALLELISM:-1}'` ZEEK_SEED_FILE=%(testbase)s/random.seed ZEEK_PLUGIN_PATH= TZ=UTC diff --git a/testing/btest/core/file-analyzer-violation.zeek b/testing/btest/core/file-analyzer-violation.zeek index 6d73d2bfb6..70757bc8d2 100644 --- a/testing/btest/core/file-analyzer-violation.zeek +++ b/testing/btest/core/file-analyzer-violation.zeek @@ -1,20 +1,34 @@ # @TEST-DOC: Verify analyzer_violation_info is raised for an invalid PE file. # @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff .stderr # @TEST-EXEC: btest-diff .stdout # @TEST-EXEC: btest-diff files.log @load base/frameworks/files @load base/files/pe +redef exit_only_after_terminate = T; + event analyzer_violation_info(tag: AllAnalyzers::Tag, info: AnalyzerViolationInfo) { print tag, info$reason, info$f$id, cat(info$f$info$analyzers); + terminate(); + } + +event force_terminate() + { + if ( zeek_is_terminating() ) + return; + + Reporter::error("force_terminate called - timeout?"); + terminate(); } event zeek_init() { local source: string = "./myfile.exe"; Input::add_analysis([$source=source, $name=source]); + schedule 10sec { force_terminate() }; } # This file triggers a binpac exception for PE that is reported through diff --git a/testing/btest/language/record-chain-assign.zeek b/testing/btest/language/record-chain-assign.zeek new file mode 100644 index 0000000000..ac896760e6 --- /dev/null +++ b/testing/btest/language/record-chain-assign.zeek @@ -0,0 +1,48 @@ +# @TEST-DOC: Test for correct ZAM optimization of record "chains". +# +# @TEST-REQUIRES: test "${ZEEK_USE_CPP}" != "1" +# @TEST-EXEC: zeek -b -O ZAM %INPUT >output +# @TEST-EXEC: btest-diff output + +type R: record { + a: count; + b: int; + c: double; +}; + +type Rev_R: record { + a: double; + b: int; + c: count; + + d: count; + e: int; + f: double; + + g: string; +}; + +global r1 = R($a = 3, $b = -12, $c = -42.3); +global r2 = R($a = 1003, $b = -10012, $c = -10042.3); + +global r3: Rev_R; + +r3$a = r1$c; +r3$b = r1$b; +r3$c = r1$a; +r3$d = r1$a; +r3$e = r2$b; +r3$f = r2$a; +r3$g = "tail"; + +print r3; + +r3$a += r1$c; +r3$b += r1$b; +r3$g = "intervening"; +r3$c += r2$a; +r3$d += r2$a; +r3$e += r2$b; +r3$f += r2$c; + +print r3; diff --git a/testing/btest/language/set.zeek b/testing/btest/language/set.zeek index 110856a7b4..24e2ce402c 100644 --- a/testing/btest/language/set.zeek +++ b/testing/btest/language/set.zeek @@ -186,82 +186,119 @@ function basic_functionality() test_case( "magnitude", |a_and_b| == |a_or_b|); } -function complex_index_types() +type tss_set: set[table[string] of string]; + +function complex_index_type_table() { # Initialization - local s1: set[table[string] of string] = { table(["k1"] = "v1") }; + local s: tss_set = { table(["k1"] = "v1") }; # Adding a member - add s1[table(["k2"] = "v2")]; + add s[table(["k2"] = "v2")]; # Various checks, including membership test - test_case( "table index size", |s1| == 2 ); - test_case( "table index membership", table(["k2"] = "v2") in s1 ); - test_case( "table index non-membership", table(["k2"] = "v3") !in s1 ); + test_case( "table index size", |s| == 2 ); + test_case( "table index membership", table(["k2"] = "v2") in s ); + test_case( "table index non-membership", table(["k2"] = "v3") !in s ); # Member deletion - delete s1[table(["k1"] = "v1")]; - test_case( "table index reduced size", |s1| == 1 ); + delete s[table(["k1"] = "v1")]; + test_case( "table index reduced size", |s| == 1 ); # Iteration - for ( ti in s1 ) + for ( ti in s ) { test_case( "table index iteration", to_json(ti) == to_json(table(["k2"] = "v2")) ); break; } + # JSON serialize/unserialize + local fjr = from_json(to_json(s), tss_set); + test_case( "table index JSON roundtrip success", fjr$valid ); + test_case( "table index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); +} + +type vs_set: set[vector of string]; + +function complex_index_type_vector() +{ # As above, for other index types - local s2: set[vector of string] = { vector("v1", "v2") }; + local s: vs_set = { vector("v1", "v2") }; - add s2[vector("v3", "v4")]; - test_case( "vector index size", |s2| == 2 ); - test_case( "vector index membership", vector("v3", "v4") in s2 ); - test_case( "vector index non-membership", vector("v4", "v5") !in s2 ); + add s[vector("v3", "v4")]; + test_case( "vector index size", |s| == 2 ); + test_case( "vector index membership", vector("v3", "v4") in s ); + test_case( "vector index non-membership", vector("v4", "v5") !in s ); - delete s2[vector("v1", "v2")]; - test_case( "vector index reduced size", |s2| == 1 ); + delete s[vector("v1", "v2")]; + test_case( "vector index reduced size", |s| == 1 ); - for ( vi in s2 ) + for ( vi in s ) { test_case( "vector index iteration", to_json(vi) == to_json(vector("v3", "v4")) ); break; } - local s3: set[set[string]] = { set("s1", "s2") }; + local fjr = from_json(to_json(s), vs_set); + test_case( "vector index JSON roundtrip success", fjr$valid ); + test_case( "vector index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); +} - add s3[set("s3", "s4")]; - test_case( "set index size", |s3| == 2 ); - test_case( "set index membership", set("s3", "s4") in s3 ); - test_case( "set index non-membership", set("s4", "s5") !in s3 ); +type ss_set: set[set[string]]; - delete s3[set("s1", "s2")]; - test_case( "set index reduced size", |s3| == 1 ); +function complex_index_type_set() +{ + local s: ss_set = { set("s1", "s2") }; - for ( si in s3 ) + add s[set("s3", "s4")]; + test_case( "set index size", |s| == 2 ); + test_case( "set index membership", set("s3", "s4") in s ); + test_case( "set index non-membership", set("s4", "s5") !in s ); + + delete s[set("s1", "s2")]; + test_case( "set index reduced size", |s| == 1 ); + + for ( si in s ) { test_case( "set index iteration", to_json(si) == to_json(set("s3", "s4")) ); break; } - local s4: set[pattern] = { /pat1/ }; + local fjr = from_json(to_json(s), ss_set); + test_case( "set index JSON roundtrip success", fjr$valid ); + test_case( "set index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); +} - add s4[/pat2/]; - test_case( "pattern index size", |s4| == 2 ); - test_case( "pattern index membership", /pat2/ in s4 ); - test_case( "pattern index non-membership", /pat3/ !in s4 ); +type p_set: set[pattern]; - delete s4[/pat1/]; - test_case( "pattern index reduced size", |s4| == 1 ); +function complex_index_type_pattern() +{ + local s: p_set = { /pat1/ }; - for ( pi in s4 ) + add s[/pat2/]; + test_case( "pattern index size", |s| == 2 ); + test_case( "pattern index membership", /pat2/ in s ); + test_case( "pattern index non-membership", /pat3/ !in s ); + + delete s[/pat1/]; + test_case( "pattern index reduced size", |s| == 1 ); + + for ( pi in s ) { test_case( "pattern index iteration", to_json(pi) == to_json(/pat2/) ); break; } + + local fjr = from_json(to_json(s), p_set); + test_case( "pattern index JSON roundtrip success", fjr$valid ); + test_case( "pattern index JSON roundtrip correct", to_json(s) == to_json(fjr$v) ); } event zeek_init() { basic_functionality(); - complex_index_types(); + complex_index_type_table(); + complex_index_type_vector(); + complex_index_type_set(); + complex_index_type_pattern(); } diff --git a/testing/btest/language/table.zeek b/testing/btest/language/table.zeek index db95f43d41..c4f73a3328 100644 --- a/testing/btest/language/table.zeek +++ b/testing/btest/language/table.zeek @@ -162,94 +162,130 @@ function basic_functionality() test_case( "!in operator", [cid, T] !in t11 ); } -function complex_index_types() +type tss_table: table[table[string] of string] of string; + +function complex_index_type_table() { # Initialization - local t1: table[table[string] of string] of string = { + local t: tss_table = { [table(["k1"] = "v1")] = "res1" }; # Adding a member - t1[table(["k2"] = "v2")] = "res2"; + t[table(["k2"] = "v2")] = "res2"; # Various checks, including membership test and lookup - test_case( "table index size", |t1| == 2 ); - test_case( "table index membership", table(["k2"] = "v2") in t1 ); - test_case( "table index non-membership", table(["k2"] = "v3") !in t1 ); - test_case( "table index lookup", t1[table(["k2"] = "v2")] == "res2" ); + test_case( "table index size", |t| == 2 ); + test_case( "table index membership", table(["k2"] = "v2") in t ); + test_case( "table index non-membership", table(["k2"] = "v3") !in t ); + test_case( "table index lookup", t[table(["k2"] = "v2")] == "res2" ); # Member deletion - delete t1[table(["k1"] = "v1")]; - test_case( "table index reduced size", |t1| == 1 ); + delete t[table(["k1"] = "v1")]; + test_case( "table index reduced size", |t| == 1 ); # Iteration - for ( ti in t1 ) + for ( ti in t ) { test_case( "table index iteration", to_json(ti) == to_json(table(["k2"] = "v2")) ); break; } - # As above, for other index types - local t2: table[vector of string] of string = { + # JSON serialize/unserialize + local fjr = from_json(to_json(t), tss_table); + test_case( "table index JSON roundtrip success", fjr$valid ); + test_case( "table index JSON roundtrip correct", to_json(t) == to_json(fjr$v) ); +} + +type vs_table: table[vector of string] of string; + +function complex_index_type_vector() +{ + local t: vs_table = { [vector("v1", "v2")] = "res1" }; - t2[vector("v3", "v4")] = "res2"; - test_case( "vector index size", |t2| == 2 ); - test_case( "vector index membership", vector("v3", "v4") in t2 ); - test_case( "vector index non-membership", vector("v4", "v5") !in t2 ); - test_case( "vector index lookup", t2[vector("v3", "v4")] == "res2" ); + t[vector("v3", "v4")] = "res2"; + test_case( "vector index size", |t| == 2 ); + test_case( "vector index membership", vector("v3", "v4") in t ); + test_case( "vector index non-membership", vector("v4", "v5") !in t ); + test_case( "vector index lookup", t[vector("v3", "v4")] == "res2" ); - delete t2[vector("v1", "v2")]; - test_case( "vector index reduced size", |t2| == 1 ); + delete t[vector("v1", "v2")]; + test_case( "vector index reduced size", |t| == 1 ); - for ( vi in t2 ) + for ( vi in t ) { test_case( "vector index iteration", to_json(vi) == to_json(vector("v3", "v4")) ); break; } - local t3: table[set[string]] of string = { + local fjr = from_json(to_json(t), vs_table); + test_case( "vector index JSON roundtrip success", fjr$valid ); + test_case( "vector index JSON roundtrip", to_json(t) == to_json(fjr$v) ); +} + +type ss_table: table[set[string]] of string; + +function complex_index_type_set() +{ + local t: ss_table = { [set("s1", "s2")] = "res1" }; - t3[set("s3", "s4")] = "res2"; - test_case( "set index size", |t3| == 2 ); - test_case( "set index membership", set("s3", "s4") in t3 ); - test_case( "set index non-membership", set("s4", "s5") !in t3 ); - test_case( "set index lookup", t3[set("s3", "s4")] == "res2" ); + t[set("s3", "s4")] = "res2"; + test_case( "set index size", |t| == 2 ); + test_case( "set index membership", set("s3", "s4") in t ); + test_case( "set index non-membership", set("s4", "s5") !in t ); + test_case( "set index lookup", t[set("s3", "s4")] == "res2" ); - delete t3[set("s1", "s2")]; - test_case( "set index reduced size", |t3| == 1 ); + delete t[set("s1", "s2")]; + test_case( "set index reduced size", |t| == 1 ); - for ( si in t3 ) + for ( si in t ) { test_case( "set index iteration", to_json(si) == to_json(set("s3", "s4")) ); break; } - local t4: table[pattern] of string = { + local fjr = from_json(to_json(t), ss_table); + test_case( "set index JSON roundtrip success", fjr$valid ); + test_case( "set index JSON roundtrip", to_json(t) == to_json(fjr$v) ); +} + +type tp_table: table[pattern] of string; + +function complex_index_type_pattern() +{ + local t: tp_table = { [/pat1/] = "res1" }; - t4[/pat2/] = "res2"; - test_case( "pattern index size", |t4| == 2 ); - test_case( "pattern index membership", /pat2/ in t4 ); - test_case( "pattern index non-membership", /pat3/ !in t4 ); - test_case( "pattern index lookup", t4[/pat2/] == "res2" ); + t[/pat2/] = "res2"; + test_case( "pattern index size", |t| == 2 ); + test_case( "pattern index membership", /pat2/ in t ); + test_case( "pattern index non-membership", /pat3/ !in t ); + test_case( "pattern index lookup", t[/pat2/] == "res2" ); - delete t4[/pat1/]; - test_case( "pattern index reduced size", |t4| == 1 ); + delete t[/pat1/]; + test_case( "pattern index reduced size", |t| == 1 ); - for ( pi in t4 ) + for ( pi in t ) { test_case( "pattern index iteration", to_json(pi) == to_json(/pat2/) ); break; } + + local fjr = from_json(to_json(t), tp_table); + test_case( "pattern index JSON roundtrip success", fjr$valid ); + test_case( "pattern index JSON roundtrip", to_json(t) == to_json(fjr$v) ); } event zeek_init() { basic_functionality(); - complex_index_types(); + complex_index_type_table(); + complex_index_type_vector(); + complex_index_type_set(); + complex_index_type_pattern(); } diff --git a/testing/btest/opt/ZAM-bif-tracking.zeek b/testing/btest/opt/ZAM-bif-tracking.zeek index 829f2d22d9..b6c027f8b1 100644 --- a/testing/btest/opt/ZAM-bif-tracking.zeek +++ b/testing/btest/opt/ZAM-bif-tracking.zeek @@ -8,557 +8,539 @@ # As new ones are added or old ones removed, attend to updating FuncInfo.cc # for ZAM, and then update the list here. global known_BiFs = set( - "Analyzer::__disable_all_analyzers", - "Analyzer::__disable_analyzer", - "Analyzer::__enable_analyzer", - "Analyzer::__has_tag", - "Analyzer::__name", - "Analyzer::__register_for_port", - "Analyzer::__schedule_analyzer", - "Analyzer::__tag", - "Broker::__append", - "Broker::__auto_publish", - "Broker::__auto_unpublish", - "Broker::__clear", - "Broker::__close", - "Broker::__create_clone", - "Broker::__create_master", - "Broker::__data", - "Broker::__data_type", - "Broker::__decrement", - "Broker::__erase", - "Broker::__exists", - "Broker::__flush_logs", - "Broker::__forward", - "Broker::__get", - "Broker::__get_index_from_value", - "Broker::__increment", - "Broker::__insert_into_set", - "Broker::__insert_into_table", - "Broker::__is_closed", - "Broker::__keys", - "Broker::__listen", - "Broker::__node_id", - "Broker::__opaque_clone_through_serialization", - "Broker::__peer", - "Broker::__peer_no_retry", - "Broker::__peers", - "Broker::__pop", - "Broker::__publish_id", - "Broker::__push", - "Broker::__put", - "Broker::__put_unique", - "Broker::__record_assign", - "Broker::__record_create", - "Broker::__record_iterator", - "Broker::__record_iterator_last", - "Broker::__record_iterator_next", - "Broker::__record_iterator_value", - "Broker::__record_lookup", - "Broker::__record_size", - "Broker::__remove_from", - "Broker::__set_clear", - "Broker::__set_contains", - "Broker::__set_create", - "Broker::__set_insert", - "Broker::__set_iterator", - "Broker::__set_iterator_last", - "Broker::__set_iterator_next", - "Broker::__set_iterator_value", - "Broker::__set_metrics_export_endpoint_name", - "Broker::__set_metrics_export_interval", - "Broker::__set_metrics_export_prefixes", - "Broker::__set_metrics_export_topic", - "Broker::__set_metrics_import_topics", - "Broker::__set_remove", - "Broker::__set_size", - "Broker::__store_name", - "Broker::__subscribe", - "Broker::__table_clear", - "Broker::__table_contains", - "Broker::__table_create", - "Broker::__table_insert", - "Broker::__table_iterator", - "Broker::__table_iterator_last", - "Broker::__table_iterator_next", - "Broker::__table_iterator_value", - "Broker::__table_lookup", - "Broker::__table_remove", - "Broker::__table_size", - "Broker::__unpeer", - "Broker::__unsubscribe", - "Broker::__vector_clear", - "Broker::__vector_create", - "Broker::__vector_insert", - "Broker::__vector_iterator", - "Broker::__vector_iterator_last", - "Broker::__vector_iterator_next", - "Broker::__vector_iterator_value", - "Broker::__vector_lookup", - "Broker::__vector_remove", - "Broker::__vector_replace", - "Broker::__vector_size", - "Broker::make_event", - "Broker::publish", - "Cluster::publish_hrw", - "Cluster::publish_rr", - "FileExtract::__set_limit", - "Files::__add_analyzer", - "Files::__analyzer_enabled", - "Files::__analyzer_name", - "Files::__disable_analyzer", - "Files::__disable_reassembly", - "Files::__enable_analyzer", - "Files::__enable_reassembly", - "Files::__file_exists", - "Files::__lookup_file", - "Files::__remove_analyzer", - "Files::__set_reassembly_buffer", - "Files::__set_timeout_interval", - "Files::__stop", - "Input::__create_analysis_stream", - "Input::__create_event_stream", - "Input::__create_table_stream", - "Input::__force_update", - "Input::__remove_stream", - "Log::__add_filter", - "Log::__create_stream", - "Log::__delay", - "Log::__delay_finish", - "Log::__disable_stream", - "Log::__enable_stream", - "Log::__flush", - "Log::__get_delay_queue_size", - "Log::__remove_filter", - "Log::__remove_stream", - "Log::__set_buf", - "Log::__set_max_delay_interval", - "Log::__set_max_delay_queue_size", - "Log::__write", - "Option::any_set_to_any_vec", - "Option::set", - "Option::set_change_handler", - "PacketAnalyzer::GTPV1::remove_gtpv1_connection", - "PacketAnalyzer::TEREDO::remove_teredo_connection", - "PacketAnalyzer::__disable_analyzer", - "PacketAnalyzer::__enable_analyzer", - "PacketAnalyzer::__set_ignore_checksums_nets", - "PacketAnalyzer::register_packet_analyzer", - "PacketAnalyzer::register_protocol_detection", - "PacketAnalyzer::try_register_packet_analyzer_by_name", - "Pcap::error", - "Pcap::findalldevs", - "Pcap::get_filter_state", - "Pcap::get_filter_state_string", - "Pcap::install_pcap_filter", - "Pcap::precompile_pcap_filter", - "Reporter::conn_weird", - "Reporter::error", - "Reporter::fatal", - "Reporter::fatal_error_with_core", - "Reporter::file_weird", - "Reporter::flow_weird", - "Reporter::get_weird_sampling_duration", - "Reporter::get_weird_sampling_global_list", - "Reporter::get_weird_sampling_rate", - "Reporter::get_weird_sampling_threshold", - "Reporter::get_weird_sampling_whitelist", - "Reporter::info", - "Reporter::net_weird", - "Reporter::set_weird_sampling_duration", - "Reporter::set_weird_sampling_global_list", - "Reporter::set_weird_sampling_rate", - "Reporter::set_weird_sampling_threshold", - "Reporter::set_weird_sampling_whitelist", - "Reporter::warning", - "Spicy::__resource_usage", - "Spicy::__toggle_analyzer", - "Supervisor::__create", - "Supervisor::__destroy", - "Supervisor::__init_cluster", - "Supervisor::__is_supervised", - "Supervisor::__is_supervisor", - "Supervisor::__node", - "Supervisor::__restart", - "Supervisor::__status", - "Supervisor::__stem_pid", - "Telemetry::__collect_histogram_metrics", - "Telemetry::__collect_metrics", - "Telemetry::__dbl_counter_family", - "Telemetry::__dbl_counter_inc", - "Telemetry::__dbl_counter_metric_get_or_add", - "Telemetry::__dbl_counter_value", - "Telemetry::__dbl_gauge_dec", - "Telemetry::__dbl_gauge_family", - "Telemetry::__dbl_gauge_inc", - "Telemetry::__dbl_gauge_metric_get_or_add", - "Telemetry::__dbl_gauge_value", - "Telemetry::__dbl_histogram_family", - "Telemetry::__dbl_histogram_metric_get_or_add", - "Telemetry::__dbl_histogram_observe", - "Telemetry::__dbl_histogram_sum", - "Telemetry::__int_counter_family", - "Telemetry::__int_counter_inc", - "Telemetry::__int_counter_metric_get_or_add", - "Telemetry::__int_counter_value", - "Telemetry::__int_gauge_dec", - "Telemetry::__int_gauge_family", - "Telemetry::__int_gauge_inc", - "Telemetry::__int_gauge_metric_get_or_add", - "Telemetry::__int_gauge_value", - "Telemetry::__int_histogram_family", - "Telemetry::__int_histogram_metric_get_or_add", - "Telemetry::__int_histogram_observe", - "Telemetry::__int_histogram_sum", + "Analyzer::__disable_all_analyzers", + "Analyzer::__disable_analyzer", + "Analyzer::__enable_analyzer", + "Analyzer::__has_tag", + "Analyzer::__name", + "Analyzer::__register_for_port", + "Analyzer::__schedule_analyzer", + "Analyzer::__tag", + "Broker::__append", + "Broker::__auto_publish", + "Broker::__auto_unpublish", + "Broker::__clear", + "Broker::__close", + "Broker::__create_clone", + "Broker::__create_master", + "Broker::__data", + "Broker::__data_type", + "Broker::__decrement", + "Broker::__erase", + "Broker::__exists", + "Broker::__flush_logs", + "Broker::__forward", + "Broker::__get", + "Broker::__get_index_from_value", + "Broker::__increment", + "Broker::__insert_into_set", + "Broker::__insert_into_table", + "Broker::__is_closed", + "Broker::__keys", + "Broker::__listen", + "Broker::__node_id", + "Broker::__opaque_clone_through_serialization", + "Broker::__peer", + "Broker::__peer_no_retry", + "Broker::__peers", + "Broker::__pop", + "Broker::__publish_id", + "Broker::__push", + "Broker::__put", + "Broker::__put_unique", + "Broker::__record_assign", + "Broker::__record_create", + "Broker::__record_iterator", + "Broker::__record_iterator_last", + "Broker::__record_iterator_next", + "Broker::__record_iterator_value", + "Broker::__record_lookup", + "Broker::__record_size", + "Broker::__remove_from", + "Broker::__set_clear", + "Broker::__set_contains", + "Broker::__set_create", + "Broker::__set_insert", + "Broker::__set_iterator", + "Broker::__set_iterator_last", + "Broker::__set_iterator_next", + "Broker::__set_iterator_value", + "Broker::__set_remove", + "Broker::__set_size", + "Broker::__store_name", + "Broker::__subscribe", + "Broker::__table_clear", + "Broker::__table_contains", + "Broker::__table_create", + "Broker::__table_insert", + "Broker::__table_iterator", + "Broker::__table_iterator_last", + "Broker::__table_iterator_next", + "Broker::__table_iterator_value", + "Broker::__table_lookup", + "Broker::__table_remove", + "Broker::__table_size", + "Broker::__unpeer", + "Broker::__unsubscribe", + "Broker::__vector_clear", + "Broker::__vector_create", + "Broker::__vector_insert", + "Broker::__vector_iterator", + "Broker::__vector_iterator_last", + "Broker::__vector_iterator_next", + "Broker::__vector_iterator_value", + "Broker::__vector_lookup", + "Broker::__vector_remove", + "Broker::__vector_replace", + "Broker::__vector_size", + "Broker::make_event", + "Broker::publish", + "Cluster::publish_hrw", + "Cluster::publish_rr", + "FileExtract::__set_limit", + "Files::__add_analyzer", + "Files::__analyzer_enabled", + "Files::__analyzer_name", + "Files::__disable_analyzer", + "Files::__disable_reassembly", + "Files::__enable_analyzer", + "Files::__enable_reassembly", + "Files::__file_exists", + "Files::__lookup_file", + "Files::__remove_analyzer", + "Files::__set_reassembly_buffer", + "Files::__set_timeout_interval", + "Files::__stop", + "Input::__create_analysis_stream", + "Input::__create_event_stream", + "Input::__create_table_stream", + "Input::__force_update", + "Input::__remove_stream", + "Log::__add_filter", + "Log::__create_stream", + "Log::__delay", + "Log::__delay_finish", + "Log::__disable_stream", + "Log::__enable_stream", + "Log::__flush", + "Log::__get_delay_queue_size", + "Log::__remove_filter", + "Log::__remove_stream", + "Log::__set_buf", + "Log::__set_max_delay_interval", + "Log::__set_max_delay_queue_size", + "Log::__write", + "Option::any_set_to_any_vec", + "Option::set", + "Option::set_change_handler", + "PacketAnalyzer::GTPV1::remove_gtpv1_connection", + "PacketAnalyzer::TEREDO::remove_teredo_connection", + "PacketAnalyzer::__disable_analyzer", + "PacketAnalyzer::__enable_analyzer", + "PacketAnalyzer::__set_ignore_checksums_nets", + "PacketAnalyzer::register_packet_analyzer", + "PacketAnalyzer::register_protocol_detection", + "PacketAnalyzer::try_register_packet_analyzer_by_name", + "Pcap::error", + "Pcap::findalldevs", + "Pcap::get_filter_state", + "Pcap::get_filter_state_string", + "Pcap::install_pcap_filter", + "Pcap::precompile_pcap_filter", + "Reporter::conn_weird", + "Reporter::error", + "Reporter::fatal", + "Reporter::fatal_error_with_core", + "Reporter::file_weird", + "Reporter::flow_weird", + "Reporter::get_weird_sampling_duration", + "Reporter::get_weird_sampling_global_list", + "Reporter::get_weird_sampling_rate", + "Reporter::get_weird_sampling_threshold", + "Reporter::get_weird_sampling_whitelist", + "Reporter::info", + "Reporter::net_weird", + "Reporter::set_weird_sampling_duration", + "Reporter::set_weird_sampling_global_list", + "Reporter::set_weird_sampling_rate", + "Reporter::set_weird_sampling_threshold", + "Reporter::set_weird_sampling_whitelist", + "Reporter::warning", + "Spicy::__resource_usage", + "Spicy::__toggle_analyzer", + "Supervisor::__create", + "Supervisor::__destroy", + "Supervisor::__is_supervised", + "Supervisor::__is_supervisor", + "Supervisor::__node", + "Supervisor::__restart", + "Supervisor::__status", + "Supervisor::__stem_pid", + "Telemetry::__collect_histogram_metrics", + "Telemetry::__collect_metrics", + "Telemetry::__counter_family", + "Telemetry::__counter_inc", + "Telemetry::__counter_metric_get_or_add", + "Telemetry::__counter_value", + "Telemetry::__gauge_dec", + "Telemetry::__gauge_family", + "Telemetry::__gauge_inc", + "Telemetry::__gauge_metric_get_or_add", + "Telemetry::__gauge_value", + "Telemetry::__histogram_family", + "Telemetry::__histogram_metric_get_or_add", + "Telemetry::__histogram_observe", + "Telemetry::__histogram_sum", "WebSocket::__configure_analyzer", - "__init_primary_bifs", - "__init_secondary_bifs", - "active_file", - "addr_to_counts", - "addr_to_ptr_name", - "addr_to_subnet", - "all_set", - "anonymize_addr", - "any_set", - "backtrace", - "bare_mode", - "bloomfilter_add", - "bloomfilter_basic_init", - "bloomfilter_basic_init2", - "bloomfilter_clear", - "bloomfilter_counting_init", - "bloomfilter_decrement", - "bloomfilter_internal_state", - "bloomfilter_intersect", - "bloomfilter_lookup", - "bloomfilter_merge", - "bytestring_to_count", - "bytestring_to_double", - "bytestring_to_float", - "bytestring_to_hexstr", - "calc_next_rotate", - "cat", - "cat_sep", - "ceil", - "check_subnet", - "clean", - "clear_table", - "close", - "community_id_v1", - "compress_path", - "connection_exists", - "continue_processing", - "convert_for_pattern", - "count_substr", - "count_to_double", - "count_to_port", - "count_to_v4_addr", - "counts_to_addr", - "current_analyzer", - "current_event_time", - "current_time", - "decode_base64", - "decode_base64_conn", - "decode_netbios_name", - "decode_netbios_name_type", - "disable_analyzer", - "disable_event_group", - "disable_module_events", - "do_profiling", - "double_to_count", - "double_to_int", - "double_to_interval", - "double_to_time", - "dump_current_packet", - "dump_packet", - "dump_rule_stats", - "edit", - "enable_event_group", - "enable_module_events", - "enable_raw_output", - "encode_base64", - "ends_with", - "entropy_test_add", - "entropy_test_finish", - "entropy_test_init", - "enum_names", - "enum_to_int", - "escape_string", - "exit", - "exp", - "file_magic", - "file_mode", - "file_size", - "filter_subnet_table", - "find_all", - "find_all_ordered", - "find_entropy", + "__init_primary_bifs", + "__init_secondary_bifs", + "active_file", + "addr_to_counts", + "addr_to_ptr_name", + "addr_to_subnet", + "all_set", + "anonymize_addr", + "any_set", + "backtrace", + "bare_mode", + "bloomfilter_add", + "bloomfilter_basic_init", + "bloomfilter_basic_init2", + "bloomfilter_clear", + "bloomfilter_counting_init", + "bloomfilter_decrement", + "bloomfilter_internal_state", + "bloomfilter_intersect", + "bloomfilter_lookup", + "bloomfilter_merge", + "bytestring_to_count", + "bytestring_to_double", + "bytestring_to_float", + "bytestring_to_hexstr", + "calc_next_rotate", + "cat", + "cat_sep", + "ceil", + "check_subnet", + "clean", + "clear_table", + "close", + "community_id_v1", + "compress_path", + "connection_exists", + "continue_processing", + "convert_for_pattern", + "count_substr", + "count_to_double", + "count_to_port", + "count_to_v4_addr", + "counts_to_addr", + "current_analyzer", + "current_event_time", + "current_time", + "decode_base64", + "decode_base64_conn", + "decode_netbios_name", + "decode_netbios_name_type", + "disable_analyzer", + "disable_event_group", + "disable_module_events", + "do_profiling", + "double_to_count", + "double_to_int", + "double_to_interval", + "double_to_time", + "dump_current_packet", + "dump_packet", + "dump_rule_stats", + "edit", + "enable_event_group", + "enable_module_events", + "enable_raw_output", + "encode_base64", + "ends_with", + "entropy_test_add", + "entropy_test_finish", + "entropy_test_init", + "enum_names", + "enum_to_int", + "escape_string", + "exit", + "exp", + "file_magic", + "file_mode", + "file_size", + "filter_subnet_table", + "find_all", + "find_all_ordered", + "find_entropy", "find_in_zeekpath", - "find_last", - "find_str", - "floor", - "flush_all", - "fmt", - "fmt_ftp_port", - "fnv1a32", - "from_json", - "generate_all_events", - "get_broker_stats", - "get_conn_stats", - "get_conn_transport_proto", - "get_contents_file", - "get_current_conn_bytes_threshold", - "get_current_conn_duration_threshold", - "get_current_conn_packets_threshold", - "get_current_packet", - "get_current_packet_header", - "get_dns_stats", - "get_event_handler_stats", - "get_event_stats", - "get_file_analysis_stats", - "get_file_name", - "get_gap_stats", - "get_identifier_comments", - "get_identifier_declaring_script", - "get_login_state", - "get_matcher_stats", - "get_net_stats", - "get_orig_seq", - "get_package_readme", - "get_port_transport_proto", - "get_proc_stats", - "get_reassembler_stats", - "get_record_field_comments", - "get_record_field_declaring_script", - "get_reporter_stats", - "get_resp_seq", - "get_script_comments", - "get_thread_stats", - "get_timer_stats", - "getenv", - "gethostname", - "getpid", - "global_container_footprints", - "global_ids", - "global_options", - "gsub", - "has_event_group", - "has_module_events", - "have_spicy", - "have_spicy_analyzers", - "haversine_distance", - "hexdump", - "hexstr_to_bytestring", - "hll_cardinality_add", - "hll_cardinality_copy", - "hll_cardinality_estimate", - "hll_cardinality_init", - "hll_cardinality_merge_into", - "hrw_weight", - "identify_data", - "install_dst_addr_filter", - "install_dst_net_filter", - "install_src_addr_filter", - "install_src_net_filter", - "int_to_count", - "int_to_double", - "interval_to_double", - "is_alnum", - "is_alpha", - "is_ascii", - "is_file_analyzer", - "is_icmp_port", - "is_local_interface", - "is_num", - "is_packet_analyzer", - "is_processing_suspended", - "is_protocol_analyzer", - "is_remote_event", - "is_tcp_port", - "is_udp_port", - "is_v4_addr", - "is_v4_subnet", - "is_v6_addr", - "is_v6_subnet", - "is_valid_ip", - "join_string_set", - "join_string_vec", - "levenshtein_distance", - "ljust", - "ln", - "load_CPP", - "log10", - "log2", - "lookup_ID", - "lookup_addr", - "lookup_autonomous_system", - "lookup_connection", - "lookup_hostname", - "lookup_hostname_txt", - "lookup_location", - "lstrip", - "mask_addr", - "match_signatures", - "matching_subnets", - "md5_hash", - "md5_hash_finish", - "md5_hash_init", - "md5_hash_update", - "md5_hmac", - "mkdir", - "mmdb_open_asn_db", - "mmdb_open_location_db", - "network_time", - "open", - "open_for_append", - "order", - "packet_source", - "paraglob_equals", - "paraglob_init", - "paraglob_match", - "parse_distinguished_name", - "parse_eftp_port", - "parse_ftp_epsv", - "parse_ftp_pasv", - "parse_ftp_port", - "piped_exec", - "port_to_count", - "pow", - "preserve_prefix", - "preserve_subnet", - "print_raw", - "ptr_name_to_addr", - "rand", - "raw_bytes_to_v4_addr", - "raw_bytes_to_v6_addr", - "reading_live_traffic", - "reading_traces", - "record_fields", - "record_type_to_vector", - "remask_addr", - "remove_prefix", - "remove_suffix", - "rename", - "resize", - "reverse", - "rfind_str", - "rjust", - "rmdir", - "rotate_file", - "rotate_file_by_name", - "routing0_data_to_addrs", - "rstrip", - "safe_shell_quote", - "same_object", - "sct_verify", - "set_buf", - "set_contents_file", - "set_current_conn_bytes_threshold", - "set_current_conn_duration_threshold", - "set_current_conn_packets_threshold", - "set_file_handle", - "set_inactivity_timeout", - "set_keys", - "set_login_state", - "set_network_time", - "set_record_packets", - "set_secret", - "set_ssl_established", - "setenv", - "sha1_hash", - "sha1_hash_finish", - "sha1_hash_init", - "sha1_hash_update", - "sha256_hash", - "sha256_hash_finish", - "sha256_hash_init", - "sha256_hash_update", - "skip_further_processing", - "skip_http_entity_data", - "skip_smtp_data", - "sort", - "split_string", - "split_string1", - "split_string_all", - "split_string_n", - "sqrt", - "srand", - "starts_with", - "str_smith_waterman", - "str_split_indices", - "strcmp", - "strftime", - "string_cat", - "string_fill", - "string_to_ascii_hex", - "string_to_pattern", - "strip", - "strptime", - "strstr", - "sub", - "sub_bytes", - "subnet_to_addr", - "subnet_width", - "subst_string", - "suspend_processing", - "swap_case", - "syslog", - "system", - "system_env", - "table_keys", - "table_pattern_matcher_stats", - "table_values", - "terminate", - "time_to_double", - "to_addr", - "to_count", - "to_double", - "to_int", - "to_json", - "to_lower", - "to_port", - "to_string_literal", - "to_subnet", - "to_title", - "to_upper", - "topk_add", - "topk_count", - "topk_epsilon", - "topk_get_top", - "topk_init", - "topk_merge", - "topk_merge_prune", - "topk_size", - "topk_sum", - "type_aliases", - "type_name", - "unescape_URI", - "uninstall_dst_addr_filter", - "uninstall_dst_net_filter", - "uninstall_src_addr_filter", - "uninstall_src_net_filter", - "unique_id", - "unique_id_from", - "unlink", - "uuid_to_string", - "val_footprint", - "write_file", - "x509_check_cert_hostname", - "x509_check_hostname", - "x509_from_der", - "x509_get_certificate_string", - "x509_issuer_name_hash", - "x509_ocsp_verify", - "x509_parse", - "x509_set_certificate_cache", - "x509_set_certificate_cache_hit_callback", - "x509_spki_hash", - "x509_subject_name_hash", - "x509_verify", - "zeek_args", - "zeek_is_terminating", - "zeek_version", - "zfill", + "find_last", + "find_str", + "floor", + "flush_all", + "fmt", + "fmt_ftp_port", + "fnv1a32", + "from_json", + "generate_all_events", + "get_broker_stats", + "get_conn_stats", + "get_conn_transport_proto", + "get_contents_file", + "get_current_conn_bytes_threshold", + "get_current_conn_duration_threshold", + "get_current_conn_packets_threshold", + "get_current_packet", + "get_current_packet_header", + "get_dns_stats", + "get_event_handler_stats", + "get_event_stats", + "get_file_analysis_stats", + "get_file_name", + "get_gap_stats", + "get_identifier_comments", + "get_identifier_declaring_script", + "get_login_state", + "get_matcher_stats", + "get_net_stats", + "get_orig_seq", + "get_package_readme", + "get_port_transport_proto", + "get_proc_stats", + "get_reassembler_stats", + "get_record_field_comments", + "get_record_field_declaring_script", + "get_reporter_stats", + "get_resp_seq", + "get_script_comments", + "get_thread_stats", + "get_timer_stats", + "getenv", + "gethostname", + "getpid", + "global_container_footprints", + "global_ids", + "global_options", + "gsub", + "has_event_group", + "has_module_events", + "have_spicy", + "have_spicy_analyzers", + "haversine_distance", + "hexdump", + "hexstr_to_bytestring", + "hll_cardinality_add", + "hll_cardinality_copy", + "hll_cardinality_estimate", + "hll_cardinality_init", + "hll_cardinality_merge_into", + "hrw_weight", + "identify_data", + "install_dst_addr_filter", + "install_dst_net_filter", + "install_src_addr_filter", + "install_src_net_filter", + "int_to_count", + "int_to_double", + "interval_to_double", + "is_alnum", + "is_alpha", + "is_ascii", + "is_file_analyzer", + "is_icmp_port", + "is_local_interface", + "is_num", + "is_packet_analyzer", + "is_processing_suspended", + "is_protocol_analyzer", + "is_remote_event", + "is_tcp_port", + "is_udp_port", + "is_v4_addr", + "is_v4_subnet", + "is_v6_addr", + "is_v6_subnet", + "is_valid_ip", + "join_string_set", + "join_string_vec", + "levenshtein_distance", + "ljust", + "ln", + "load_CPP", + "log10", + "log2", + "lookup_ID", + "lookup_addr", + "lookup_autonomous_system", + "lookup_connection", + "lookup_connection_analyzer_id", + "lookup_hostname", + "lookup_hostname_txt", + "lookup_location", + "lstrip", + "mask_addr", + "match_signatures", + "matching_subnets", + "md5_hash", + "md5_hash_finish", + "md5_hash_init", + "md5_hash_update", + "md5_hmac", + "mkdir", + "mmdb_open_asn_db", + "mmdb_open_location_db", + "network_time", + "open", + "open_for_append", + "order", + "packet_source", + "paraglob_equals", + "paraglob_init", + "paraglob_match", + "parse_distinguished_name", + "parse_eftp_port", + "parse_ftp_epsv", + "parse_ftp_pasv", + "parse_ftp_port", + "piped_exec", + "port_to_count", + "pow", + "preserve_prefix", + "preserve_subnet", + "print_raw", + "ptr_name_to_addr", + "rand", + "raw_bytes_to_v4_addr", + "raw_bytes_to_v6_addr", + "reading_live_traffic", + "reading_traces", + "record_fields", + "record_type_to_vector", + "remask_addr", + "remove_prefix", + "remove_suffix", + "rename", + "resize", + "reverse", + "rfind_str", + "rjust", + "rmdir", + "rotate_file", + "rotate_file_by_name", + "routing0_data_to_addrs", + "rstrip", + "safe_shell_quote", + "same_object", + "sct_verify", + "set_buf", + "set_contents_file", + "set_current_conn_bytes_threshold", + "set_current_conn_duration_threshold", + "set_current_conn_packets_threshold", + "set_file_handle", + "set_inactivity_timeout", + "set_keys", + "set_login_state", + "set_network_time", + "set_record_packets", + "set_secret", + "set_ssl_established", + "setenv", + "sha1_hash", + "sha1_hash_finish", + "sha1_hash_init", + "sha1_hash_update", + "sha256_hash", + "sha256_hash_finish", + "sha256_hash_init", + "sha256_hash_update", + "skip_further_processing", + "skip_http_entity_data", + "skip_smtp_data", + "sort", + "split_string", + "split_string1", + "split_string_all", + "split_string_n", + "sqrt", + "srand", + "starts_with", + "str_smith_waterman", + "str_split_indices", + "strcmp", + "strftime", + "string_cat", + "string_fill", + "string_to_ascii_hex", + "string_to_pattern", + "strip", + "strptime", + "strstr", + "sub", + "sub_bytes", + "subnet_to_addr", + "subnet_width", + "subst_string", + "suspend_processing", + "swap_case", + "syslog", + "system", + "system_env", + "table_keys", + "table_pattern_matcher_stats", + "table_values", + "terminate", + "time_to_double", + "to_addr", + "to_count", + "to_double", + "to_int", + "to_json", + "to_lower", + "to_port", + "to_string_literal", + "to_subnet", + "to_title", + "to_upper", + "topk_add", + "topk_count", + "topk_epsilon", + "topk_get_top", + "topk_init", + "topk_merge", + "topk_merge_prune", + "topk_size", + "topk_sum", + "type_aliases", + "type_name", + "unescape_URI", + "uninstall_dst_addr_filter", + "uninstall_dst_net_filter", + "uninstall_src_addr_filter", + "uninstall_src_net_filter", + "unique_id", + "unique_id_from", + "unlink", + "uuid_to_string", + "val_footprint", + "write_file", + "x509_check_cert_hostname", + "x509_check_hostname", + "x509_from_der", + "x509_get_certificate_string", + "x509_issuer_name_hash", + "x509_ocsp_verify", + "x509_parse", + "x509_set_certificate_cache", + "x509_set_certificate_cache_hit_callback", + "x509_spki_hash", + "x509_subject_name_hash", + "x509_verify", + "zeek_args", + "zeek_is_terminating", + "zeek_version", + "zfill", ); function fmt_str_set(s: set[string]): string diff --git a/testing/btest/scripts/base/frameworks/analyzer/logging.zeek b/testing/btest/scripts/base/frameworks/analyzer/logging.zeek index 6f8be11f24..7e6be8e6be 100644 --- a/testing/btest/scripts/base/frameworks/analyzer/logging.zeek +++ b/testing/btest/scripts/base/frameworks/analyzer/logging.zeek @@ -1,11 +1,19 @@ # @TEST-EXEC: zeek -r ${TRACES}/socks.trace %INPUT -# @TEST-EXEC: mv analyzer.log analyzer.log-no-confirmations -# @TEST-EXEC: btest-diff analyzer.log-no-confirmations +# @TEST-EXEC: mv analyzer.log analyzer.log-default +# @TEST-EXEC: btest-diff analyzer.log-default # @TEST-EXEC: zeek -r ${TRACES}/socks.trace %INPUT Analyzer::Logging::include_confirmations=T # @TEST-EXEC: mv analyzer.log analyzer.log-include-confirmations # @TEST-EXEC: btest-diff analyzer.log-include-confirmations +# @TEST-EXEC: zeek -r ${TRACES}/socks.trace %INPUT Analyzer::Logging::include_disabling=T +# @TEST-EXEC: mv analyzer.log analyzer.log-include-disabling +# @TEST-EXEC: btest-diff analyzer.log-include-disabling + @load base/protocols/conn @load base/protocols/dns @load base/protocols/socks + +# DCE RPC violations are ignored by default. Consider violations for this +# test so that the analyzer will be disabled eventually. +redef DPD::ignore_violations -= { Analyzer::ANALYZER_DCE_RPC }; diff --git a/testing/btest/scripts/base/frameworks/logging/telemetry.zeek b/testing/btest/scripts/base/frameworks/logging/telemetry.zeek index a71ffd7d00..43c1ab641e 100644 --- a/testing/btest/scripts/base/frameworks/logging/telemetry.zeek +++ b/testing/btest/scripts/base/frameworks/logging/telemetry.zeek @@ -8,6 +8,9 @@ @load policy/frameworks/telemetry/log +# Force telemetry output to be sorted for test determinism +redef running_under_test = T; + global http_logs = 0; hook HTTP::log_policy(rec: HTTP::Info, id: Log::ID, filter: Log::Filter) { @@ -28,7 +31,7 @@ hook Log::log_stream_policy(rec: any, id: Log::ID) hook Telemetry::log_policy(rec: Telemetry::Info, id: Log::ID, filter: Log::Filter) { - if ( rec$prefix != "zeek" || /^zeek_log_/ !in rec$name ) + if ( /^zeek_log_/ !in rec$name ) break; if ( /HTTP|DNS|Conn/ !in cat(rec$label_values) ) diff --git a/testing/btest/scripts/base/frameworks/telemetry/basic.zeek b/testing/btest/scripts/base/frameworks/telemetry/basic.zeek index 72b675dc0a..0592bff684 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/basic.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/basic.zeek @@ -15,7 +15,7 @@ global btest_a_cf = Telemetry::register_counter_family([ $name="a_test", $unit="", $help_text="A btest metric", - $labels=vector("x", "y") + $label_names=vector("x", "y") ]); global btest_b_cf = Telemetry::register_counter_family([ @@ -23,7 +23,7 @@ global btest_b_cf = Telemetry::register_counter_family([ $name="b_test", $unit="", $help_text="Another btest metric", - $labels=vector("x", "y") + $label_names=vector("x", "y") ]); global btest_c_cf = Telemetry::register_counter_family([ @@ -31,7 +31,7 @@ global btest_c_cf = Telemetry::register_counter_family([ $name="c_test", $unit="", $help_text="The last btest metric", - $labels=vector("x", "y") + $label_names=vector("x", "y") ]); global system_sensor_temp_gf = Telemetry::register_gauge_family([ @@ -39,7 +39,7 @@ global system_sensor_temp_gf = Telemetry::register_gauge_family([ $name="sensor_temperature", $unit="celsius", $help_text="Temperatures reported by sensors in the system", - $labels=vector("name") + $label_names=vector("name") ]); global btest_sample_histogram_hf = Telemetry::register_histogram_family([ @@ -48,7 +48,7 @@ global btest_sample_histogram_hf = Telemetry::register_histogram_family([ $unit="", $help_text="A sample histogram that is not returned by Telemetry::collect_metrics", $bounds=vector(1.0, 2.0, 3.0, 4.0, 5.0), - $labels=vector("dim") + $label_names=vector("dim") ]); function print_metrics(what: string, metrics: vector of Telemetry::Metric) @@ -57,7 +57,7 @@ function print_metrics(what: string, metrics: vector of Telemetry::Metric) for (i in metrics) { local m = metrics[i]; - print m$opts$metric_type, m$opts$prefix, m$opts$name, m$opts$labels, m$labels, m$value; + print m$opts$metric_type, m$opts$prefix, m$opts$name, m$label_names, m$label_values, m$value; } } @@ -67,7 +67,7 @@ function print_histogram_metrics(what: string, metrics: vector of Telemetry::His for (i in metrics) { local m = metrics[i]; - print m$opts$metric_type, m$opts$prefix, m$opts$name, m$opts$bounds, m$opts$labels, m$labels, m$values, m$sum, m$observations; + print m$opts$metric_type, m$opts$prefix, m$opts$name, m$opts$bounds, m$label_names, m$label_values, m$values, m$sum, m$observations; } } diff --git a/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek b/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek index b892f5740c..3f01d9ddf3 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/conn-duration-histogram.zeek @@ -18,10 +18,10 @@ global connection_duration_hf = Telemetry::register_histogram_family([ global realistic_connection_duration_hf = Telemetry::register_histogram_family([ $prefix="zeek", $name="realistic_connection_duration", - $labels=vector("proto"), + $label_names=vector("proto"), $unit="seconds", $help_text="Monitored connection durations by protocol", - $bounds=vector(0.1, 1.0, 10.0, 30.0, 60.0, 120.0, 300, 900.0, 1800.0) + $bounds=vector(0.1, 1.0, 10.0, 30.0, 60.0, 120.0, 300, 900.0, 1800.0), ]); global connection_duration_h = Telemetry::histogram_with(connection_duration_hf); @@ -42,8 +42,8 @@ event zeek_done() &priority=-100 { local hm = histogram_metrics[i]; print hm$opts$metric_type, hm$opts$prefix, hm$opts$name; - print hm$opts$labels; - print hm$labels; + print hm$label_names; + print hm$label_values; print hm$opts$bounds; print hm$values; print hm$observations, hm$sum; diff --git a/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek b/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek index 5060c357a8..c0a9c73b2d 100644 --- a/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek +++ b/testing/btest/scripts/base/frameworks/telemetry/event-handler-invocations.zeek @@ -16,7 +16,7 @@ event zeek_done() &priority=-100 local ms = Telemetry::collect_metrics("zeek", "event_handler_invocations"); for ( _, m in ms ) { - if ( /zeek_.*|connection_.*/ in cat(m$labels)) - print m$opts$prefix, m$opts$name, m$labels, m$value; + if ( /zeek_.*|connection_.*/ in cat(m$label_values)) + print m$opts$prefix, m$opts$name, m$label_values, m$value; } } diff --git a/testing/btest/scripts/base/protocols/http/flip-content-line-orig.zeek b/testing/btest/scripts/base/protocols/http/flip-content-line-orig.zeek new file mode 100644 index 0000000000..320462fbd4 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/flip-content-line-orig.zeek @@ -0,0 +1,19 @@ +# @TEST-DOC: Flipping roles of a HTTP connection didn't flip the content line analyzers, resulting in inconsistent deliveries. Regression test for #3789 + +# Pcap contains a POST of the Zeek logo, expecting SHA1 1991cedee47909e324ac1b8bee2020d5690891e1 in files.log +# @TEST-EXEC: zeek -b -r $TRACES/http/zeek-image-post-1080-8000-x.pcap %INPUT +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p history service < conn.log > conn.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg < http.log > http.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 < files.log > files.log.cut +# @TEST-EXEC: btest-diff conn.log.cut +# @TEST-EXEC: btest-diff http.log.cut +# @TEST-EXEC: btest-diff files.log.cut + +@load base/protocols/conn +@load base/protocols/http +@load base/files/hash + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_SHA1); + } diff --git a/testing/btest/scripts/base/protocols/http/flip-content-line-resp.zeek b/testing/btest/scripts/base/protocols/http/flip-content-line-resp.zeek new file mode 100644 index 0000000000..4123a34321 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/flip-content-line-resp.zeek @@ -0,0 +1,19 @@ +# @TEST-DOC: Flipping roles of a HTTP connection didn't flip the content line analyzers, resulting in inconsistent deliveries. Regression test for #3789 + +# Pcap contains a download of the Zeek logo, expecting SHA1 1991cedee47909e324ac1b8bee2020d5690891e1 in files.log +# @TEST-EXEC: zeek -b -r $TRACES/http/zeek-image-1080-80-x.pcap %INPUT +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p history service < conn.log > conn.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p host method uri version user_agent status_code status_msg < http.log > http.log.cut +# @TEST-EXEC: zeek-cut -m id.orig_h id.orig_p id.resp_h id.resp_p analyzers mime_type sha1 < files.log > files.log.cut +# @TEST-EXEC: btest-diff conn.log.cut +# @TEST-EXEC: btest-diff http.log.cut +# @TEST-EXEC: btest-diff files.log.cut + +@load base/protocols/conn +@load base/protocols/http +@load base/files/hash + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_SHA1); + } diff --git a/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek b/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek index 4657a44107..6e1242f286 100644 --- a/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek +++ b/testing/btest/scripts/base/protocols/ssh/half-duplex-client.zeek @@ -2,7 +2,7 @@ # analyzer.log output. # @TEST-EXEC: zeek -r $TRACES/ssh/ssh.client-side-half-duplex.pcap %INPUT -# @TEST-EXEC: test ! -e analyzer.log +# @TEST-EXEC: btest-diff analyzer.log # @TEST-EXEC: btest-diff ssh.log # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek b/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek index 423148950d..232aa251fb 100644 --- a/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek +++ b/testing/btest/scripts/base/protocols/ssh/half-duplex-server.zeek @@ -2,7 +2,7 @@ # analyzer.log output. # @TEST-EXEC: zeek -r $TRACES/ssh/ssh.server-side-half-duplex.pcap %INPUT -# @TEST-EXEC: test ! -e analyzer.log +# @TEST-EXEC: btest-diff analyzer.log # @TEST-EXEC: btest-diff ssh.log # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/utils/json.test b/testing/btest/scripts/base/utils/json.test index 30e8e201ba..da741102fe 100644 --- a/testing/btest/scripts/base/utils/json.test +++ b/testing/btest/scripts/base/utils/json.test @@ -141,7 +141,7 @@ event zeek_init() $name="btest_testing_gauge", $unit="", $help_text="Btest testing", - $labels=vector("dim_1"), + $label_names=vector("dim_1"), ]); local gauge = Telemetry::gauge_with(gauge_family, vector("dim_1_value")); print to_json(gauge); @@ -152,7 +152,7 @@ event zeek_init() $name="btest_testing_counter", $unit="", $help_text="Btest testing", - $labels=vector("dim_1"), + $label_names=vector("dim_1"), ]); local counter = Telemetry::counter_with(counter_family, vector("dim_1_value")); print to_json(counter); diff --git a/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek b/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek index 8c208fd9b5..0752c605e2 100644 --- a/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek +++ b/testing/btest/scripts/policy/frameworks/telemetry/log-prefixes.zeek @@ -13,7 +13,7 @@ global connections_by_proto_cf = Telemetry::register_counter_family([ $name="connections", $unit="", $help_text="Total number of monitored connections", - $labels=vector("proto") + $label_names=vector("proto") ]); global connection_duration_hf = Telemetry::register_histogram_family([ diff --git a/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek b/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek index ff7155b267..0d6e7794b0 100644 --- a/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek +++ b/testing/btest/scripts/policy/frameworks/telemetry/prometheus.zeek @@ -2,6 +2,7 @@ # Note compilable to C++ due to globals being initialized to a record that # has an opaque type as a field. # @TEST-REQUIRES: test "${ZEEK_USE_CPP}" != "1" +# @TEST-REQUIRES: which jq # # @TEST-PORT: BROKER_PORT1 # @TEST-PORT: BROKER_PORT2 diff --git a/testing/btest/spicy/double-types.zeek b/testing/btest/spicy/double-types.zeek index a67b0c5ef5..08fd1f178b 100644 --- a/testing/btest/spicy/double-types.zeek +++ b/testing/btest/spicy/double-types.zeek @@ -32,7 +32,7 @@ protocol analyzer spicy::dtest over TCP: on dtest::Message -> event dtest_message(self.func); -on dtest::Message -> event dtest_result(self.sub.result); +on dtest::Message -> event dtest_result(self.sub.result_); on dtest::Message -> event dtest_result_tuple(dtest::bro_result(self)); @@ -56,11 +56,11 @@ public type Message = unit { }; public type SubMessage = unit { - result: uint8 &convert=RESULT($$); + result_: uint8 &convert=RESULT($$); }; public function bro_result(entry: Message) : tuple { - return (entry.func, entry.sub.result); + return (entry.func, entry.sub.result_); } # @TEST-END-FILE diff --git a/testing/btest/spicy/export-protocol-enum.zeek b/testing/btest/spicy/export-protocol-enum.zeek index a021dccbd5..532d3da8de 100644 --- a/testing/btest/spicy/export-protocol-enum.zeek +++ b/testing/btest/spicy/export-protocol-enum.zeek @@ -41,7 +41,7 @@ import spicy; public type Message = unit { sswitch: uint8; - result: uint8; + result_: uint8; var p_tcp: spicy::Protocol = spicy::Protocol::TCP; var p_udp: spicy::Protocol = spicy::Protocol::UDP; diff --git a/testing/btest/spicy/get-values.spicy b/testing/btest/spicy/get-values.spicy new file mode 100644 index 0000000000..e0e5ab6170 --- /dev/null +++ b/testing/btest/spicy/get-values.spicy @@ -0,0 +1,99 @@ +# @TEST-REQUIRES: have-spicy +# +# @TEST-EXEC: spicyz -d -o test.hlto %INPUT +# @TEST-EXEC: zeek globals.zeek test.hlto Spicy::enable_print=T >output +# @TEST-EXEC: btest-diff output +# +# @TEST-DOC: Test access to Zeek-side globals. + +module Foo; + +import zeek; + +assert zeek::get_address("Bar::address_") == 1.2.3.4; +assert zeek::get_bool("Bar::bool_") == True; +assert zeek::get_count("Bar::count_") == 42; +assert zeek::get_double("Bar::double_") == 42.0; +assert zeek::get_enum("Bar::enum_") == "Foo"; +assert zeek::get_int("Bar::int_") == 42; +assert zeek::get_interval("Bar::interval_") == interval(42); +assert zeek::get_port("Bar::port_") == 42/tcp; +assert zeek::get_string("Bar::string_") == b"xxx"; +assert zeek::get_subnet("Bar::subnet_") == 1.2.3.4/16; +assert zeek::get_time("Bar::time_") == time(42.0); + +assert zeek::as_address(zeek::get_value("Bar::address_")) == 1.2.3.4; +assert zeek::as_bool(zeek::get_value("Bar::bool_")) == True; +assert zeek::as_count(zeek::get_value("Bar::count_")) == 42; +assert zeek::as_double(zeek::get_value("Bar::double_")) == 42.0; +assert zeek::as_enum(zeek::get_value("Bar::enum_")) == "Foo"; +assert zeek::as_int(zeek::get_value("Bar::int_")) == 42; +assert zeek::as_interval(zeek::get_value("Bar::interval_")) == interval(42); +assert zeek::as_port(zeek::get_value("Bar::port_")) == 42/tcp; +assert zeek::as_string(zeek::get_value("Bar::string_")) == b"xxx"; +assert zeek::as_subnet(zeek::get_value("Bar::subnet_")) == 1.2.3.4/16; +assert zeek::as_time(zeek::get_value("Bar::time_")) == time(42.0); + +assert zeek::as_string(zeek::record_field("Bar::record_", "x")) == b"foo"; +assert zeek::as_int(zeek::record_field("Bar::record_", "y")) == 42; +assert zeek::as_int(zeek::record_field(zeek::get_record("Bar::record_"), "y")) == 42; +assert zeek::record_has_value("Bar::record_", "x"); +assert zeek::record_has_value(zeek::get_record("Bar::record_"), "y"); +assert zeek::record_has_value("Bar::record_", "y"); +assert ! zeek::record_has_value("Bar::record_", "z"); +assert zeek::record_has_field("Bar::record_", "x"); +assert ! zeek::record_has_field("Bar::record_", "z"); +assert-exception zeek::record_field("Bar::record_", "z"); # not set + +assert zeek::set_contains("Bar::set_", "foo"); +assert ! zeek::set_contains("Bar::set_", "xxx"); +assert zeek::set_contains(zeek::get_set("Bar::set_"), "foo"); + +assert zeek::table_contains("Bar::table_", "foo"); +assert ! zeek::table_contains("Bar::table_", "xxx"); +assert zeek::table_contains(zeek::get_table("Bar::table_"), "foo"); +assert zeek::as_string(*zeek::table_lookup("Bar::table_", "foo")) == b"bar"; +assert zeek::as_string(*zeek::table_lookup(zeek::get_table("Bar::table_"), "foo")) == b"bar"; +assert ! zeek::table_lookup("Bar::table_", "does-not-exist"); + +assert zeek::as_count(zeek::vector_index("Bar::vector_", 2)) == 2; +assert zeek::as_count(zeek::vector_index(zeek::get_vector("Bar::vector_"), 2)) == 2; + +assert-exception zeek::get_bool("Bar::does_not_exist"); +assert-exception zeek::get_bool("Bar::string_"); + +# Test stringifcation. +print zeek::get_value("Bar::bool_"); +print zeek::get_record("Bar::record_"); +print zeek::get_set("Bar::set_"); +print zeek::get_table("Bar::table_"); +print zeek::get_vector("Bar::vector_"); + +# @TEST-START-FILE globals.zeek +module Bar; + +type Record: record { + x: string; + y: int &default=42; + z: bool &optional; +}; + +type Enum: enum { Foo, Bar }; + +const address_: addr = 1.2.3.4; +const bool_: bool = T; +const count_: count = 42; +const double_: double = 42.0; +const enum_: Enum = Foo; +const int_: int = 42; +const interval_: interval = 42sec; +const port_: port = 42/tcp; +const record_: Record = [$x="foo"]; +const set_: set[string] = set("foo", "bar"); +const string_: string = "xxx"; +const subnet_: subnet = 1.2.3.4/16; +const table_: table[string] of string = table(["foo"] = "bar"); +const time_: time = double_to_time(42.0); +const vector_: vector of count = vector(0, 1, 2); + +# @TEST-END-FILE diff --git a/testing/btest/spicy/multiple-enum.zeek b/testing/btest/spicy/multiple-enum.zeek index ff6508d715..e077db70fe 100644 --- a/testing/btest/spicy/multiple-enum.zeek +++ b/testing/btest/spicy/multiple-enum.zeek @@ -22,10 +22,10 @@ protocol analyzer spicy::dtest over TCP: parse originator with dtest::Message; on dtest::Message if ( self.sswitch == 83 ) - -> event dtest_one(self.result); + -> event dtest_one(self.result_); on dtest::Message if ( self.sswitch != 83 ) - -> event dtest_two(self.result); + -> event dtest_two(self.result_); # @TEST-END-FILE # @TEST-START-FILE dtest.spicy @@ -38,7 +38,7 @@ public type RESULT = enum { public type Message = unit { sswitch: uint8; - result: uint8 &convert=RESULT($$); + result_: uint8 &convert=RESULT($$); }; # @TEST-END-FILE diff --git a/testing/btest/spicy/replaces-conflicts.evt b/testing/btest/spicy/replaces-conflicts.evt new file mode 100644 index 0000000000..dbca6d637e --- /dev/null +++ b/testing/btest/spicy/replaces-conflicts.evt @@ -0,0 +1,42 @@ +# @TEST-REQUIRES: have-spicy +# +# @TEST-EXEC: spicyz -d -o ssh.hlto ssh.spicy %INPUT +# @TEST-EXEC-FAIL: zeek ssh.hlto >output 2>&1 +# @TEST-EXEC: btest-diff output + +# @TEST-START-FILE ssh.spicy +module SSH; + +import zeek; + +public type Banner = unit {}; +# @TEST-END-FILE + +protocol analyzer spicy::SSH_1 over TCP: + parse with SSH::Banner, + replaces SSH; + +protocol analyzer spicy::SSH_1 over UDP: + parse with SSH::Banner, + replaces SSH; + +# @TEST-START-NEXT + +file analyzer spicy::SSH_1: + parse with SSH::Banner, + replaces MD5; + +file analyzer spicy::SSH_2: + parse with SSH::Banner, + replaces MD5; + +# @TEST-START-NEXT + +packet analyzer spicy::SSH_1: + parse with SSH::Banner, + replaces Ethernet; + +packet analyzer spicy::SSH_2: + parse with SSH::Banner, + replaces Ethernet; + diff --git a/testing/coverage/lcov_html.sh b/testing/coverage/lcov_html.sh index 18898048b4..ba8c8a37df 100755 --- a/testing/coverage/lcov_html.sh +++ b/testing/coverage/lcov_html.sh @@ -36,14 +36,14 @@ Usage: $0 repo. Options: - --help Display this output. - --html DIR This is the default mode, but this argument can be passed - to make it explicit. It also can be used to pass an optional - destination directory for the HTML output. - --coveralls TOKEN Report coverage data to Coveralls.io using the specified - repo token. Enabling this option disables the HTML report. - This option requires the coveralls-lcov Ruby gem to be - installed. + --help Display this output. + --html DIR This is the default mode, but this argument can be passed + to make it explicit. It also can be used to pass an optional + destination directory for the HTML output. + --coveralls [TOKEN] Report coverage data to Coveralls.io using the specified + repo token. Enabling this option disables the HTML report. + This option requires the coveralls-lcov Ruby gem to be + installed. If TOKEN is empty, uses --dry-run mode. " echo "${usage}" @@ -63,15 +63,14 @@ while (("$#")); do fi ;; --coveralls) - if [ ${#2} -eq 0 ]; then - echo "ERROR: Coveralls repo token must be passed with --coveralls argument." - echo - usage - fi - HTML_REPORT=0 - COVERALLS_REPO_TOKEN=$2 - shift 2 + if [ ${#2} -eq 0 ]; then + echo "WARN: No coveralls token, running coveralls-lcov --dry-run." + shift 1 + else + COVERALLS_REPO_TOKEN=$2 + shift 2 + fi ;; --help) usage @@ -131,19 +130,19 @@ if [ $HTML_REPORT -eq 1 ]; then echo -n "Creating HTML files... " verify_run "genhtml --ignore-errors empty -o $COVERAGE_HTML_DIR $COVERAGE_FILE" else - if [ "${CIRRUS_BRANCH}" != "master" ]; then - echo "Coverage upload skipped for non-master branches" - exit 0 - fi - # The data we send to coveralls has a lot of duplicate files in it because of the # zeek symlink in the src directory. Run a script that cleans that up. echo -n "Cleaning coverage data for Coveralls..." COVERAGE_FILE_CLEAN="${COVERAGE_FILE}.clean" verify_run "testing/coverage/coverage_cleanup.py ${COVERAGE_FILE} > ${COVERAGE_FILE_CLEAN} 2>&1" - echo -n "Reporting to Coveralls..." - coveralls_cmd="coveralls-lcov -t ${COVERALLS_REPO_TOKEN}" + if [ "${CIRRUS_BRANCH}" == "master" ] && [ -n "${COVERALLS_REPO_TOKEN}" ]; then + echo -n "Reporting to Coveralls..." + coveralls_cmd="coveralls-lcov -t ${COVERALLS_REPO_TOKEN}" + else + echo "Reporting to Coveralls in --dry-run mode" + coveralls_cmd="coveralls-lcov --dry-run" + fi # If we're being called by Cirrus, add some additional information to the output. if [ -n "${CIRRUS_BUILD_ID}" ]; then diff --git a/testing/external/commit-hash.zeek-testing-cluster b/testing/external/commit-hash.zeek-testing-cluster index 6c5a060b06..8b8bfdc2e4 100644 --- a/testing/external/commit-hash.zeek-testing-cluster +++ b/testing/external/commit-hash.zeek-testing-cluster @@ -1 +1 @@ -216efb97832e412ae0197dc8e36069d8c35fd81c +45582671c6715e719d91c8afde7ffb480c602441 diff --git a/testing/external/commit-hash.zeek-testing-private b/testing/external/commit-hash.zeek-testing-private index 169d80e658..b41c8fda5c 100644 --- a/testing/external/commit-hash.zeek-testing-private +++ b/testing/external/commit-hash.zeek-testing-private @@ -1 +1 @@ -8dd88e9b33da35feaae860b158bc91586ff17136 +3df94cb39ab9c0079e82a7f2cd5edb561c2ec07b