diff --git a/.cirrus.yml b/.cirrus.yml index b96146f230..446116bb5b 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -14,9 +14,9 @@ config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WOR no_spicy_config: &NO_SPICY_CONFIG --build-type=release --disable-broker-tests --disable-spicy --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror binary_config: &BINARY_CONFIG --prefix=$CIRRUS_WORKING_DIR/install --libdir=$CIRRUS_WORKING_DIR/install/lib --binary-package --enable-static-broker --enable-static-binpac --disable-broker-tests --build-type=Release --ccache --enable-werror -asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --disable-spicy --ccache -ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --disable-spicy --ccache --enable-werror -tsan_sanitizer_config: &TSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=thread --enable-fuzzers --disable-spicy --ccache --enable-werror +asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --ccache --enable-werror +ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --ccache --enable-werror +tsan_sanitizer_config: &TSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=thread --enable-fuzzers --ccache --enable-werror resources_template: &RESOURCES_TEMPLATE cpu: *CPUS @@ -35,8 +35,7 @@ macos_environment: &MACOS_ENVIRONMENT freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE cpu: 8 - # Not allowed to request less than 8GB for an 8 CPU FreeBSD VM. - memory: 8GB + memory: *MEMORY # For greediness, see https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4 greedy: true diff --git a/CHANGES b/CHANGES index e8abed6565..a176d02e60 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,421 @@ +7.1.0-dev.141 | 2024-08-12 11:07:32 +0200 + + * spicyz: Add back message about removed support for port / ports in evt (Arne Welzel, Corelight) + + spicy-dhcp, spicy-http and spicy-dns all have this still in their .evt files, + so it seems popular. Be more helpful than "unexpected token" to users. + +7.1.0-dev.139 | 2024-08-09 20:57:23 +0200 + + * rule-parse: Remove id_to_str() lookup to squelch coverity warning (Arne Welzel, Corelight) + + Coverity didn't like that id_to_str() allocates memory and we didn't + free it. Remote its usage wholesale. + + * GH-3774: btest: Skip core.script-args under TSAN (Arne Welzel, Corelight) + + TSAN may re-execute the executable when the memory layout doesn't + fullfill requirements, causing argument confusion when that happens. + + Closes #3774. + + * Update zeekctl submodule [nomail] (Tim Wojtulewicz, Corelight) + +7.1.0-dev.133 | 2024-08-08 10:44:31 +0200 + + * Update zeekctl submodule (Tim Wojtulewicz) + + * Add note to NEWS about the removal of OpaqueVal::DoSerialize and OpaqueVal::DoUnserialize (Tim Wojtulewicz, Corelight) + +7.1.0-dev.129 | 2024-08-07 12:20:22 -0700 + + * Remove deprecated port/ports fields for spicy analyzers (Tim Wojtulewicz, Corelight) + + * Remove deprecated Cluster::Node::interface field (Tim Wojtulewicz, Corelight) + + * Remove deprecated signature definition format (Tim Wojtulewicz, Corelight) + + * Return an error if GLOBAL:: prefix is used (Tim Wojtulewicz, Corelight) + + * Remove deprecated BloomFilter serialization methods (Tim Wojtulewicz, Corelight) + + * Remove deprecated OpaqueVal serialization methods (Tim Wojtulewicz, Corelight) + + * Remove deprecated DECLARE_OPAQUE_VALUE macro (Tim Wojtulewicz, Corelight) + + * Make TypePtr::Capture member variables private (Tim Wojtulewicz, Corelight) + + The public versions were marked as deprecated for 7.0, and accessors + should be used to manage them now. + + * Remove deprecated Trigger constructor (Tim Wojtulewicz, Corelight) + + * Remove deprecated Controller::auto_assign_ports and Controller::auto_assign_start_port (Tim Wojtulewicz, Corelight) + + * Remove deprecated load-balacing policy script (Tim Wojtulewicz, Corelight) + + * Remove deprecated prometheus telemetry policy script (Tim Wojtulewicz, Corelight) + + * Remove deprecated policy/tuning/default package (Tim Wojtulewicz, Corelight) + + * Remove deprecated time machine settings (Tim Wojtulewicz, Corelight) + + * Remove deprecated json NullDoubleWriter class (Tim Wojtulewicz, Corelight) + + * Remove deprecated modbus event definitions (Tim Wojtulewicz, Corelight) + + * Remove Connection::AppendAddl (Tim Wojtulewicz, Corelight) + + * Remove STMT_ANY statement type (Tim Wojtulewicz, Corelight) + + * Remove EventRegistry::Used and EventRegistry::SetUsed (Tim Wojtulewicz, Corelight) + +7.1.0-dev.109 | 2024-08-07 14:10:54 +0200 + + * ldap: Promote uint8 to uint64 before shifting (Arne Welzel, Corelight) + + Relates to zeek/spicy#1829 + +7.1.0-dev.107 | 2024-08-07 11:43:15 +0200 + + * ci/ubuntu-24.04: Use ccache 4.10.2 (Arne Welzel, Corelight) + + The ccache version shipped with Ubuntu 24.04 does not yet recognize + --fprofile-update=atomic, install one that does. + + Now that the asan_sanitizer build also includes building Spicy and + running the spicyz test suite, ccache is quite important. + + Reference ccache/ccache#1408 and zeek/zeek#3777. + +7.1.0-dev.105 | 2024-08-07 10:54:10 +0200 + + * telemetry/Manager: Check RegisterFd() return value (Arne Welzel, Corelight) + + Please coverity. + + * telemetry/Manager: Track sent_in and sent_out totals without callback (Arne Welzel, Corelight) + + For terminated threads, the totals would go down once the threads are + removed, which isn't great. Move tracking of sent in and sent out + messages from callback to explicit `Inc()` calls. + + Also fixes total_messages_in_metric being initialized twice rather + than total_messages_out_metric. + + * threading/Manager: Switch inf bucket from infinity() to max() (Arne Welzel, Corelight) + + For uint64_t, std::numeric_limits::has_infinity is false and infinity() + actually returns 0. Use uint64_t's max() instead. We could cast to double + and use the double infinity, but this seems reasonable, too. + + This was found while trying to provoke some pending messages and being + confused why all but the "inf" bucket increased. + + * threading/Manager: "lt" to "le" and do not break (Arne Welzel, Corelight) + + The buckets are specified as lower-equal (changed from lower-than now), + which means we shouldn't break: The larger "le" bucket contains all previous + buckets, too. The "inf" bucket represents the current number of threads. + + For example, with a total of 10 threads, 5 threads with 0 messages pending, + another 4 threads with 50 messages, and on with 2000 messages, the metrics + would end end up as follows: + + pending_buckets{le=1} = 5 + pending_buckets{le=10} = 5 + pending_buckets{le=100} = 9 + pending_buckets{le=1000} = 9 + pending_buckets{le=10000} = 10 + pending_buckets{le=inf} = 10 + + This might be strange initially, but aligns with the Prometheus + histogram approach (though we're using gauges here). + +7.1.0-dev.99 | 2024-08-06 20:08:37 +0200 + + * Bump auxil/spicy to latest development snapshot (Arne Welzel, Corelight) + + * spicy/runtime-support: Switch ParameterMismatch::_fmt to static (Arne Welzel, Corelight) + + UBSAN's vptr sanitize isn't happy with the call to _fmt() + in its member initializer list. + + $ zeek -r Traces/ssh/single-conn.trace .tmp/spicy.event-args-mismatch/test.hlto .tmp/spicy.event-args-mismatch/event-args-mismatch.zeek + <...>/src/include/zeek/spicy/runtime-support.h:80:29: runtime error: member call on address 0x511000369540 which does not point to an object of type 'zeek::spicy::rt::ParameterMismatch' + 0x511000369540: note: object has invalid vptr + 00 00 00 00 be be be be be be be be be be be be be be be be be be be be be be be be be be be be + ^~~~~~~~~~~~~~~~~~~~~~~ + invalid vptr + #0 0x7f9c9977b019 in zeek::spicy::rt::ParameterMismatch::ParameterMismatch(std::basic_string_view>, zeek::IntrusivePtr const&, std::basic_string_view>) <...>/src/include/zeek/spicy/runtime-support.h:80:29 + #1 0x7f9c9977a6a2 in zeek::spicy::rt::to_val(hilti::rt::Bytes const&, zeek::IntrusivePtr const&) <...>/src/include/zeek/spicy/runtime-support.h:562:15 + + * coverage/lcov_html: Ignore testing/btest/.tmp (Arne Welzel, Corelight) + + gcda/gcno files in the btest/.tmp directory are from .htlo files + referencing ephemeral cc files. No need to include these. + + * cirrus: Do not disable Spicy for sanitizer builds (Arne Welzel, Corelight) + + * ldap: Avoid unset m$opcode (Arne Welzel, Corelight) + + Initial fuzzing caused a bind response to arrive before a bind request, + resulting in an unset field expression error: + + expression error in base/protocols/ldap/main.zeek, line 270: field value missing (LDAP::m$opcode) + + Prevent this by ensuring m$opcode is set and raising instead. + + * GH-3860: fuzzers: Add LDAP fuzzing (Arne Welzel, Corelight) + + LDAP supports both, UDP and TCP as separate analyzers. The corpus + is identical, however. Started to hit the TLS analyzer fairly + quickly, too. + + Closes #3860 + +7.1.0-dev.92 | 2024-08-06 09:01:40 -0700 + + * Don't install empty ZAM directories (Tim Wojtulewicz, Corelight) + + * btest/spicy: Make replaces-conflicts trigger replaces code path (Arne Welzel, Corelight) + + The current test attempts to instantiate two spicy::SSH_1 protocol + analyzers in the .evt file. The intention likely was to use two + distinct protocol analyzer both trying to replace the builtin SSH + analyzer. + + Coincidentally, fixing this happens to workaround TSAN errors tickled + by the FatalError() call while loading the .hlto with two identically + named analyzers. + + $ cat .tmp/spicy.replaces-conflicts/output + error: redefinition of protocol analyzer spicy::SSH_1 + ThreadSanitizer: main thread finished with ignores enabled + One of the following ignores was not ended (in order of probability) + Ignore was enabled at: + #0 __llvm_gcov_init __linker___d192e45c25d5ee23-484d3e0fc2caf5b4.cc (ssh.hlto+0x34036) (BuildId: 091934ca4da885e7) + #1 __llvm_gcov_init __linker___d192e45c25d5ee23-484d3e0fc2caf5b4.cc (ssh.hlto+0x34036) (BuildId: 091934ca4da885e7) + ... + + I was tempted to replace FatalError() with Error() and rely on + zeek-setup.cc's early exiting on any reporter errors, but this + seems easier for now. + + Relates to #3865. + +7.1.0-dev.87 | 2024-08-05 14:39:56 -0700 + + * Remove some unnecessary #includes (Tim Wojtulewicz, Corelight) + + * Avoid capturing 'this' for callback in telemetry::Manager (Tim Wojtulewicz, Corelight) + + * Make telemetry metrics out of MsgThread statistics (Tim Wojtulewicz, Corelight) + + * Split cpu time metric into user/system components like prof.log (Tim Wojtulewicz, Corelight) + + The total can be calculated from the two parts via Prometheus/Grafana + if desired, so it's more informative to pass them as separate parts. + + * Move broker statistics to be telemetry metrics (Tim Wojtulewicz, Corelight) + + * Add timer counts as telemetry metrics (Tim Wojtulewicz, Corelight) + + * Move thread manager stats to telemetry metric (Tim Wojtulewicz, Corelight) + + * Add extra metrics to session_mgr (Tim Wojtulewicz, Corelight) + + - Sessions killed by activity + - Current number of sessions across all types + + * Move dns_mgr stats to telemetry instruments (Tim Wojtulewicz, Corelight) + + * Move trigger stats to telemetry instruments (Tim Wojtulewicz, Corelight) + + * Move pulling of global state inside 'expensive' check for stats (Tim Wojtulewicz, Corelight) + +7.1.0-dev.74 | 2024-08-02 15:49:40 -0700 + + * Process metric callbacks from the main-loop thread (Tim Wojtulewicz, Corelight) + + This avoids the callbacks from being processed on the worker thread + spawned by Civetweb. It fixes data race issues with lookups involving + global variables, amongst other threading issues. + + * CI: Use 16GB of memory for FreeBSD builds (Tim Wojtulewicz, Corelight) + +7.1.0-dev.68 | 2024-07-26 10:09:48 -0700 + + * Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight) + + This in particular pulls in a fix for zeek/spicy#1808. + +7.1.0-dev.66 | 2024-07-26 15:14:36 +0200 + + * GH-3853: ldap: Recognize SASL+SPNEGO+NTLMSSP (Arne Welzel, Corelight) + + The ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap file was harvested + from the CTU-SME-11 (Experiment-VM-Microsoft-Windows7AD-1) dataset + at https://zenodo.org/records/7958259 (DOI 10.5281/zenodo.7958258). + + Closes #3853 + +7.1.0-dev.63 | 2024-07-25 09:19:20 -0700 + + * Use accessor to reach into X509_ALGOR (Theo Buehler) + + Despite already having an accessor, X509_ALGOR wasn't made opaque + during OpenSSL 1.1.0 development. It would be nice if this could be + fixed at some point, so avoid reaching into that struct by using the + accessor + +7.1.0-dev.61 | 2024-07-25 12:02:51 +0200 + + * mysql: Simplify length computation (Arne Welzel, Corelight) + + Thanks Tim! + + * mysql: Improve date and time parsing (Fupeng Zhao) + + * btest/mysql: Clean query-attr.pcapng (Arne Welzel, Corelight) + + It contained some unrelated IntelliJ traffic. + + * mysql: Support non-string query attributes (Arne Welzel, Corelight) + + The query attributes aren't exposed to script layer right now, but this + should at least parse over them once encountered and some fixups. + + * btest/mysql: Add pcap with non-string query attributes (Arne Welzel, Corelight) + + Pcap was generated as follows. Doesn't seem wireshark even parses + this properly right now. + + with common.get_connection() as c: + with c.cursor() as cur: + date1 = datetime.date(1987, 10, 18) + datetime1 = datetime.datetime(1990, 9, 26, 12, 13, 14) + cur.add_attribute("number1", 42) + cur.add_attribute("string1", "a string") + cur.add_attribute("date1", date1) + cur.add_attribute("datetime1", datetime1) + cur.execute("SELECT version()") + result = cur.fetchall() + print("result", result) + + * mysql: Introduce mysql_ssl_request event (Arne Welzel, Corelight) + + This should've been added with fa48c885 for completion. Do it now. + The MySQL spec calls it SSLRequest packet, so keep SSL in the name for + consistency. + + * mysql: Fix EOFIfLegacyThenResultSet (Arne Welzel, Corelight) + + Only expect a result next if an EOF was consumed. + + * mysql: Add data parameter to mysql_auth_plugin (Arne Welzel, Corelight) + + This may contain salt from the server or a hashed password from the client. + + * mysql: Add mysql_auth_plugin, mysql_auth_more_data and mysql_auth_switch_request events (Arne Welzel, Corelight) + + Remove caching_sha2_password parsing/state from the analyzer and implement + the generic events. If we actually want to peak into the authentication + mechanism, we could write a separate analyzer for it. For now, treat it + as opaque values that are exposed to script land. + + The added tests show the --get-server-public-key in use where + mysql_auth_more_data contains an RSA public key. + + * mysql: AuthSwitchRequest: &enforce a 0xfe / 254 status (Arne Welzel, Corelight) + + * mysql: Make auth_plugin_ a std::string (Arne Welzel, Corelight) + + * mysql: Fix auth_plugin_data_part2 length computation (Arne Welzel, Corelight) + + * Refactored connection phase state handling (Fupeng Zhao) + + Added `ConnectionExpected` enum for expected packet types during the connection phase. + + * Add support for "auth switch" and "query attrs" (Fupeng Zhao) + + Also fix the issue where Resultset could not correctly distinguish between EOF_Packet and OK_Packet. + + * Add support for parsing the "caching_sha2_password" auth plugin (Fupeng Zhao) + +7.1.0-dev.45 | 2024-07-24 15:28:59 -0700 + + * Update binpac submodule to better format output code [nomail] (Tim Wojtulewicz, Corelight) + +7.1.0-dev.43 | 2024-07-24 13:25:52 -0700 + + * Update 7.0 NEWS with blurb about multi-PDU parsing causing increased load [nomail] [skip ci] (Tim Wojtulewicz, Corelight) + + * Fix handling of zero-length SMB2 error responses (Tim Wojtulewicz, Corelight) + +7.1.0-dev.40 | 2024-07-24 11:18:03 +0200 + + * minor optimization of boolean comparisons (Vern Paxson, Corelight) + + * GH-3839: fix & regression test for GH-3839 (spurious warnings for "when" constructs) (Vern Paxson, Corelight) + +7.1.0-dev.37 | 2024-07-23 19:18:37 -0700 + + * Bump zeek-testing-cluster to reflect deprecation of prometheus.zeek (Christian Kreibich, Corelight) + + * Update doc submodule [nomail] [skip ci] (zeek-bot) + +7.1.0-dev.34 | 2024-07-23 08:54:23 -0700 + + * Update Mozilla CA list and CT list (Johanna Amann, Corelight) + +7.1.0-dev.32 | 2024-07-23 08:51:11 -0700 + + * Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight) + +7.1.0-dev.30 | 2024-07-23 12:38:54 +0200 + + * ldap: Add heuristic for wrap tokens (Arne Welzel, Corelight) + + Instead of dissecting the GSSAPI handshake, add another heuristic + into MaybeEncrypted to check for the WRAP token identifier. + + After this change, the pcap on the following ticket is processed + nicely: https://gitlab.com/wireshark/migration-test/-/issues/9398 + + * ldap: Ignore ec/rrc for sealed wrap tokens (Arne Welzel, Corelight) + + It shouldn't matter for the encrypted payload that we'll + just consume and ignore. + + * ldap: Add LDAP sample with SASL-SRP mechanism (Arne Welzel, Corelight) + + This is what @dopheide-esnet actually saw. Produced with a custom + cyrus-sasl and openldap build :-( + + * ldap: Reintroduce encryption after SASL heuristic (Arne Welzel, Corelight) + + @dopheide-esnet provided sample captures where SASL SRP is used as + a SASL mechanism and the follow-up LDAP messages are encrypted. It's + not clear how to determine whether encryption will or will not happen, + so re-add a heuristic to determine this based on the first byte of + the first message *after* the successful bindResponse handshake. If + that byte is 0x30, assume cleartext. + + I haven't been able to produce such pcaps, unfortunately, but the + cleartext path is tested via the existing sasl-ntlm.pcap. + + * ldap: Fix assuming GSS-SPNEGO for all bindResponses (Arne Welzel, Corelight) + + In retrospect that's an obvious bug. + + * ldap: Implement extended request/response and StartTLS support (Arne Welzel, Corelight) + + PCAP was produced with a local OpenLDAP server configured to support StartTLS. + + This puts the Zeek calls into a separate ldap_zeek.spicy file/module + to separate it from LDAP. + 7.1.0-dev.23 | 2024-07-23 10:02:52 +0200 * telemetry: Deprecate prometheus.zeek policy script (Arne Welzel, Corelight) diff --git a/CMakeLists.txt b/CMakeLists.txt index c4c7aa9990..42dc4f0a63 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -340,7 +340,6 @@ add_zeek_dynamic_plugin_build_interface_include_directories( ${PROJECT_SOURCE_DIR}/auxil/binpac/lib ${PROJECT_SOURCE_DIR}/auxil/broker/libbroker ${PROJECT_SOURCE_DIR}/auxil/paraglob/include - ${PROJECT_SOURCE_DIR}/auxil/rapidjson/include ${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include ${CMAKE_BINARY_DIR}/src ${CMAKE_BINARY_DIR}/src/include @@ -348,10 +347,6 @@ add_zeek_dynamic_plugin_build_interface_include_directories( ${CMAKE_BINARY_DIR}/auxil/broker/libbroker ${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include) -# threading/formatters/JSON.h includes rapidjson headers and may be used -# by external plugins, extend the include path. -target_include_directories(zeek_dynamic_plugin_base SYSTEM - INTERFACE $) target_include_directories( zeek_dynamic_plugin_base SYSTEM INTERFACE $) @@ -1010,9 +1005,6 @@ include(BuiltInSpicyAnalyzer) include_directories(BEFORE ${PCAP_INCLUDE_DIR} ${BIND_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR} ${ZLIB_INCLUDE_DIR} ${JEMALLOC_INCLUDE_DIR}) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/rapidjson/include/rapidjson - DESTINATION include/zeek/3rdparty/rapidjson/include) - install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc DESTINATION include/zeek/3rdparty/) diff --git a/NEWS b/NEWS index 02e9a035e4..bfb2deb0a4 100644 --- a/NEWS +++ b/NEWS @@ -9,11 +9,22 @@ Zeek 7.1.0 Breaking Changes ---------------- +* The ``OpaqueVal::DoSerialize`` and ``OpaqueVal::DoUnserialize`` methods were + marked as deprecated in v7.0 and have now been removed as per the Zeek + deprecation policy. Plugins that were overriding these methods and were not + updated will fail to compile. Those plugins should be updated to override the + new ``OpaqueVal::DoSerializeData`` and ``OpaqueVal::DoUnserializeData`` + methods. + New Functionality ----------------- * The LDAP analyzer now supports handling of non-sealed GSS-API WRAP tokens. +* StartTLS support was added to the LDAP analyzer. The SSL analyzer is enabled + for connections where client and server negotiate to TLS through the extended + request/response mechanism. + Changed Functionality --------------------- @@ -21,6 +32,10 @@ Changed Functionality made more strict and predictable. Please provide input if this results in less visibility in your environment. +* The MySQL analyzer has been improved to better support plugin authentication + mechanisms, like caching_sha2_password, as well as recognizing MySQL query + attributes. + Removed Functionality --------------------- @@ -109,7 +124,8 @@ New Functionality environment variable configures the addition. - SMB2 packets containing multiple PDUs now correctly parse all of the headers, - instead of just the first one and ignoring the rest. + instead of just the first one and ignoring the rest. This may cause increased + CPU load on SMB2-heavy networks. - The new built-in function ``lookup_connection_analyzer_id()`` retrieves the numeric identifier of an analyzer associated with a connection. This enables diff --git a/VERSION b/VERSION index 01d768baf7..e2bf3bdfd6 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -7.1.0-dev.23 +7.1.0-dev.141 diff --git a/auxil/binpac b/auxil/binpac index a5c8f19fb4..6e494ed5b3 160000 --- a/auxil/binpac +++ b/auxil/binpac @@ -1 +1 @@ -Subproject commit a5c8f19fb49c60171622536fa6d369fa168f19e0 +Subproject commit 6e494ed5b3d0a121cd1e1dd18b18e40d7e937675 diff --git a/auxil/prometheus-cpp b/auxil/prometheus-cpp index 2fec7205d1..4649065e2a 160000 --- a/auxil/prometheus-cpp +++ b/auxil/prometheus-cpp @@ -1 +1 @@ -Subproject commit 2fec7205d1a9cb4829b86c943d599696d53de85c +Subproject commit 4649065e2a1dd21c81e41cd6007dce5486b77fc0 diff --git a/auxil/spicy b/auxil/spicy index 4a1b43ef07..7cddc357ff 160000 --- a/auxil/spicy +++ b/auxil/spicy @@ -1 +1 @@ -Subproject commit 4a1b43ef07d1305a7e88a4f0866068dc49de9d06 +Subproject commit 7cddc357ff83175984e19037f1f8062a69cf2030 diff --git a/auxil/zeekctl b/auxil/zeekctl index 39c0ee1e17..7e1a844808 160000 --- a/auxil/zeekctl +++ b/auxil/zeekctl @@ -1 +1 @@ -Subproject commit 39c0ee1e1742bb28dff57632ee4620f905b892e7 +Subproject commit 7e1a8448083ef0013f15e67ce001836e680589a2 diff --git a/ci/ubuntu-24.04/Dockerfile b/ci/ubuntu-24.04/Dockerfile index ab7ad2cf25..f4f98fa272 100644 --- a/ci/ubuntu-24.04/Dockerfile +++ b/ci/ubuntu-24.04/Dockerfile @@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles" # A version field to invalidate Cirrus's build cache when needed, as suggested in # https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822 -ENV DOCKERFILE_VERSION 20240528 +ENV DOCKERFILE_VERSION 20240807 RUN apt-get update && apt-get -y install \ bc \ @@ -41,3 +41,25 @@ RUN apt-get update && apt-get -y install \ RUN pip3 install --break-system-packages junit2html RUN gem install coveralls-lcov + +# Download a newer pre-built ccache version that recognizes -fprofile-update=atomic +# which is used when building with --coverage. +# +# This extracts the tarball into /opt/ccache-- and +# symlinks the executable to /usr/local/bin/ccache. +# +# See: https://ccache.dev/download.html +ENV CCACHE_VERSION=4.10.2 +ENV CCACHE_PLATFORM=linux-x86_64 +ENV CCACHE_URL=https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}-${CCACHE_PLATFORM}.tar.xz +ENV CCACHE_SHA256=80cab87bd510eca796467aee8e663c398239e0df1c4800a0b5dff11dca0b4f18 +RUN cd /opt \ + && if [ "$(uname -p)" != "x86_64" ]; then echo "cannot use ccache pre-built for x86_64!" >&2; exit 1 ; fi \ + && curl -L --fail --max-time 30 $CCACHE_URL -o ccache.tar.xz \ + && sha256sum ./ccache.tar.xz >&2 \ + && echo "${CCACHE_SHA256} ccache.tar.xz" | sha256sum -c - \ + && tar xvf ./ccache.tar.xz \ + && ln -s $(pwd)/ccache-${CCACHE_VERSION}-${CCACHE_PLATFORM}/ccache /usr/local/bin/ccache \ + && test "$(command -v ccache)" = "/usr/local/bin/ccache" \ + && test "$(ccache --print-version)" = "${CCACHE_VERSION}" \ + && rm ./ccache.tar.xz diff --git a/doc b/doc index f65820ff0f..f450f803d3 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit f65820ff0faf2887799fe691a443b5db39eeed54 +Subproject commit f450f803d3e69cb2fd474a919b7a6c6885f1f433 diff --git a/scripts/base/frameworks/cluster/main.zeek b/scripts/base/frameworks/cluster/main.zeek index e3e45a0cbc..f112e9669c 100644 --- a/scripts/base/frameworks/cluster/main.zeek +++ b/scripts/base/frameworks/cluster/main.zeek @@ -40,10 +40,6 @@ export { ## worker nodes in a cluster. Used with broker-enabled cluster communication. const worker_topic = "zeek/cluster/worker" &redef; - ## The topic name used for exchanging messages that are relevant to - ## time machine nodes in a cluster. Used with broker-enabled cluster communication. - const time_machine_topic = "zeek/cluster/time_machine" &redef &deprecated="Remove in v7.1: Unused."; - ## A set of topic names to be used for broadcasting messages that are ## relevant to all nodes in a cluster. Currently, there is not a common ## topic to broadcast to, because enabling implicit Broker forwarding would @@ -53,9 +49,6 @@ export { manager_topic, proxy_topic, worker_topic, -@pragma push ignore-deprecations - time_machine_topic, -@pragma pop ignore-deprecations }; ## The topic prefix used for exchanging messages that are relevant to @@ -169,10 +162,6 @@ export { PROXY, ## The node type doing all the actual traffic analysis. WORKER, - ## A node acting as a traffic recorder using the - ## `Time Machine `_ - ## software. - TIME_MACHINE &deprecated="Remove in v7.1: Unused.", }; ## Record type to indicate a node in a cluster. @@ -187,12 +176,8 @@ export { ## The port that this node will listen on for peer connections. ## A value of ``0/unknown`` means the node is not pre-configured to listen. p: port &default=0/unknown; - ## Identifier for the interface a worker is sniffing. - interface: string &optional &deprecated="Remove in v7.1: interface is not required and not set consistently on workers. Replace usages with packet_source() or keep a separate worker-to-interface mapping in a global table."; ## Name of the manager node this node uses. For workers and proxies. manager: string &optional; - ## Name of a time machine node with which this node connects. - time_machine: string &optional &deprecated="Remove in v7.1: Unused."; ## A unique identifier assigned to the node by the broker framework. ## This field is only set while a node is connected. id: string &optional; diff --git a/scripts/base/frameworks/cluster/setup-connections.zeek b/scripts/base/frameworks/cluster/setup-connections.zeek index 5cd3e310fd..ba3010129c 100644 --- a/scripts/base/frameworks/cluster/setup-connections.zeek +++ b/scripts/base/frameworks/cluster/setup-connections.zeek @@ -94,11 +94,6 @@ event zeek_init() &priority=-10 case WORKER: Broker::subscribe(Cluster::worker_topic); break; -@pragma push ignore-deprecations - case TIME_MACHINE: - Broker::subscribe(Cluster::time_machine_topic); - break; -@pragma pop ignore-deprecations default: Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type)); return; @@ -121,11 +116,6 @@ event zeek_init() &priority=-10 case MANAGER: connect_peers_with_type(LOGGER); -@pragma push ignore-deprecations - if ( self?$time_machine ) - connect_peer(TIME_MACHINE, self$time_machine); -@pragma pop ignore-deprecations - break; case PROXY: connect_peers_with_type(LOGGER); @@ -141,11 +131,6 @@ event zeek_init() &priority=-10 if ( self?$manager ) connect_peer(MANAGER, self$manager); -@pragma push ignore-deprecations - if ( self?$time_machine ) - connect_peer(TIME_MACHINE, self$time_machine); -@pragma pop ignore-deprecations - break; } } diff --git a/scripts/base/frameworks/cluster/supervisor.zeek b/scripts/base/frameworks/cluster/supervisor.zeek index ba0d676c6a..cea4a6f96c 100644 --- a/scripts/base/frameworks/cluster/supervisor.zeek +++ b/scripts/base/frameworks/cluster/supervisor.zeek @@ -43,10 +43,6 @@ function __init_cluster_nodes(): bool typ = rolemap[endp$role]; cnode = [$node_type=typ, $ip=endp$host, $p=endp$p]; -@pragma push ignore-deprecations - if ( endp?$interface ) - cnode$interface = endp$interface; -@pragma pop ignore-deprecations if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER ) cnode$manager = manager_name; if ( endp?$metrics_port ) diff --git a/scripts/base/init-bare.zeek b/scripts/base/init-bare.zeek index 30b49def26..9ac3870e8b 100644 --- a/scripts/base/init-bare.zeek +++ b/scripts/base/init-bare.zeek @@ -5256,12 +5256,6 @@ const dpd_ignore_ports = F &redef; ## connection if it misses the initial handshake. const likely_server_ports: set[port] &redef; -## If true, output profiling for Time-Machine queries. -const time_machine_profiling = F &redef &deprecated="Remove in v7.1. Unused."; - -## If true, warns about unused event handlers at startup. -const check_for_unused_event_handlers = F &redef &deprecated="Remove in v7.1. This has been replaced by usage analyzer functionality."; - ## Holds the filename of the trace file given with ``-w`` (empty if none). ## ## .. zeek:see:: record_all_packets @@ -5883,6 +5877,13 @@ export { type MetricVector : vector of Metric; type HistogramMetricVector : vector of HistogramMetric; + + ## Maximum amount of time for CivetWeb HTTP threads to + ## wait for metric callbacks to complete on the IO loop. + const callback_timeout: interval = 5sec &redef; + + ## Number of CivetWeb threads to use. + const civetweb_threads: count = 2 &redef; } module GLOBAL; diff --git a/scripts/base/protocols/ldap/consts.zeek b/scripts/base/protocols/ldap/consts.zeek index bbd378c7e8..5b29fd22e4 100644 --- a/scripts/base/protocols/ldap/consts.zeek +++ b/scripts/base/protocols/ldap/consts.zeek @@ -120,4 +120,11 @@ export { "searching", [ LDAP::SearchDerefAlias_DEREF_FINDING_BASE ] = "finding", [ LDAP::SearchDerefAlias_DEREF_ALWAYS ] = "always", } &default="unknown"; + + const EXTENDED_REQUESTS = { + # StartTLS, https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1 + [ "1.3.6.1.4.1.1466.20037" ] = "StartTLS", + # whoami, https://datatracker.ietf.org/doc/html/rfc4532#section-2 + [ "1.3.6.1.4.1.4203.1.11.3" ] = "whoami", + } &default="unknown" &redef; } diff --git a/scripts/base/protocols/ldap/main.zeek b/scripts/base/protocols/ldap/main.zeek index 93c301a65a..1e23c7bf84 100644 --- a/scripts/base/protocols/ldap/main.zeek +++ b/scripts/base/protocols/ldap/main.zeek @@ -229,6 +229,10 @@ event LDAP::message(c: connection, fmt("%s: %s -> %s", message_id, m$opcode, opcode_str), "LDAP"); } + m$opcode = opcode_str; + } else if ( ! m?$opcode ) { + # This can happen if we see a bind response before the bind request. + Reporter::conn_weird("LDAP_bind_without_opcode", c, fmt("%s: %s", message_id, opcode_str), "LDAP"); m$opcode = opcode_str; } @@ -258,6 +262,9 @@ event LDAP::message(c: connection, } m$object = object; + + if ( opcode == LDAP::ProtocolOpcode_EXTENDED_REQUEST ) + m$object += fmt(" (%s)", EXTENDED_REQUESTS[object]); } if ( argument != "" ) { diff --git a/scripts/base/protocols/ldap/spicy-events.zeek b/scripts/base/protocols/ldap/spicy-events.zeek index fa670f3456..baa00ba548 100644 --- a/scripts/base/protocols/ldap/spicy-events.zeek +++ b/scripts/base/protocols/ldap/spicy-events.zeek @@ -98,3 +98,44 @@ global LDAP::search_result_entry: event ( message_id: int, object_name: string ); + +## Event generated for each ExtendedRequest in LDAP messages. +## +## c: The connection. +## +## message_id: The messageID element. +## +## request_name: The name of the extended request. +## +## request_value: The value of the extended request (empty if missing). +global LDAP::extended_request: event ( + c: connection, + message_id: int, + request_name: string, + request_value: string +); + +## Event generated for each ExtendedResponse in LDAP messages. +## +## c: The connection. +## +## message_id: The messageID element. +## +## result: The result code of the response. +## +## response_name: The name of the extended response (empty if missing). +## +## response_value: The value of the extended response (empty if missing). +global LDAP::extended_response: event ( + c: connection, + message_id: int, + result: LDAP::ResultCode, + response_name: string, + response_value: string +); + +## Event generated when a plaintext LDAP connection switched to TLS. +## +## c: The connection. +## +global LDAP::starttls: event(c: connection); diff --git a/scripts/base/protocols/ssl/ct-list.zeek b/scripts/base/protocols/ssl/ct-list.zeek index c7c2a87ec9..036374d946 100644 --- a/scripts/base/protocols/ssl/ct-list.zeek +++ b/scripts/base/protocols/ssl/ct-list.zeek @@ -1,9 +1,9 @@ # # Do not edit this file. This file is automatically generated by gen-ct-list.pl -# File generated at Fri Feb 23 11:37:01 2024 +# File generated at Tue Jul 23 16:04:45 2024 # File generated from https://www.gstatic.com/ct/log_list/v3/log_list.json -# Source file generated at: 2024-02-22T12:56:21Z -# Source file version: 32.9 +# Source file generated at: 2024-07-23T13:06:08Z +# Source file version: 39.1 # @load base/protocols/ssl @@ -12,21 +12,32 @@ redef ct_logs += { ["\xee\xcd\xd0\x64\xd5\xdb\x1a\xce\xc5\x5c\xb7\x9d\xb4\xcd\x13\xa2\x32\x87\x46\x7c\xbc\xec\xde\xc3\x51\x48\x59\x46\x71\x1f\xb5\x9b"] = CTInfo($description="Google 'Argon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1d\xb9\x6c\xa9\xcb\x69\x94\xc5\x5c\xe6\xb6\xa6\x03\xbb\xd2\xb8\xdc\x54\x43\x17\x28\x99\x0c\x06\x01\x50\x1d\x9d\x64\xc0\x59\x46\x2b\xdc\xc8\x03\x1d\x05\xb4\x2d\xa8\x09\xf7\x99\x41\xed\x04\xfb\xe5\x57\xba\x26\x04\xf6\x11\x52\xce\x14\x65\x3b\x2f\x76\x2b\xc0"), ["\x4e\x75\xa3\x27\x5c\x9a\x10\xc3\x38\x5b\x6c\xd4\xdf\x3f\x52\xeb\x1d\xf0\xe0\x8e\x1b\x8d\x69\xc0\xb1\xfa\x64\xb1\x62\x9a\x39\xdf"] = CTInfo($description="Google 'Argon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x20\x82\xa1\xf9\x67\x68\xa8\xe4\xdb\x94\x98\xe2\xe1\x68\x87\xe4\x09\x6d\x20\x35\x33\x38\x3c\xaf\x14\xaa\xd7\x08\x18\xf0\xfd\x16\x9b\xd3\xff\x7c\x27\x82\xd4\x87\xb7\x4e\x24\x46\x3b\xfb\xae\xbe\xc8\x23\x52\x20\x2b\xaa\x44\x05\xfe\x54\xf9\xd5\xf1\x1d\x45\x9a"), ["\x12\xf1\x4e\x34\xbd\x53\x72\x4c\x84\x06\x19\xc3\x8f\x3f\x7a\x13\xf8\xe7\xb5\x62\x87\x88\x9c\x6d\x30\x05\x84\xeb\xe5\x86\x26\x3a"] = CTInfo($description="Google 'Argon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaf\xe4\xf3\x94\x2c\xdf\xa6\x27\xb5\xfe\xb2\x61\x83\x19\xc8\x21\x3a\x23\xa8\xa9\x3d\x54\xaf\xbc\x31\x9a\x1c\xd3\xc1\xe3\xb6\xc2\xf3\x0f\xc7\xb9\xca\x3b\x1d\x79\x65\x61\x22\x25\x82\x56\x4e\x98\xe8\xaa\x26\x29\x36\x1e\x28\x60\x6f\xeb\x15\x6e\xf7\x7c\xd0\xba"), +["\x0e\x57\x94\xbc\xf3\xae\xa9\x3e\x33\x1b\x2c\x99\x07\xb3\xf7\x90\xdf\x9b\xc2\x3d\x71\x32\x25\xdd\x21\xa9\x25\xac\x61\xc5\x4e\x21"] = CTInfo($description="Google 'Argon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x07\xfc\x1e\xe8\x63\x8e\xff\x1c\x31\x8a\xfc\xb8\x1e\x19\x2b\x60\x50\x00\x3e\x8e\x9e\xda\x77\x37\xe3\xa5\xa8\xda\x8d\x94\xf8\x6b\xe8\x3d\x64\x8f\x27\x3f\x75\xb3\xfc\x6b\x12\xf0\x37\x06\x4f\x64\x58\x75\x14\x5d\x56\x52\xe6\x6a\x2b\x14\x4c\xec\x81\xd1\xea\x3e"), +["\xd7\x6d\x7d\x10\xd1\xa7\xf5\x77\xc2\xc7\xe9\x5f\xd7\x00\xbf\xf9\x82\xc9\x33\x5a\x65\xe1\xd0\xb3\x01\x73\x17\xc0\xc8\xc5\x69\x77"] = CTInfo($description="Google 'Argon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2a\x3a\x67\x8b\xfe\xba\x0c\x86\x2b\x4a\x51\x8a\xe9\x17\xfe\x7b\xa1\x76\x73\xfd\xbc\x65\x4b\xc3\x27\xbf\x4d\xf3\x5f\xa0\xca\x29\x80\x11\x20\x32\x78\xd6\x7e\xf9\x34\x60\x8c\x75\xa0\xf5\x35\x50\x9c\xa1\xd3\x49\x4d\x13\xd5\x3b\x6a\x0e\xea\x45\x9d\x24\x13\x22"), ["\x76\xff\x88\x3f\x0a\xb6\xfb\x95\x51\xc2\x61\xcc\xf5\x87\xba\x34\xb4\xa4\xcd\xbb\x29\xdc\x68\x42\x0a\x9f\xe6\x67\x4c\x5a\x3a\x74"] = CTInfo($description="Google 'Xenon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xb9\x60\xe0\x34\x1e\x35\xe4\x65\x00\x93\x4f\x90\x09\xbd\x5a\xec\x44\xdd\x8c\x0f\xce\xed\x11\x3e\x2a\x59\x46\x9a\x31\xb6\xc7\x99\xf7\xdc\xef\x3d\xcd\x8f\x86\xc2\x35\xa5\x3e\xdc\x29\xba\xbb\xf2\x54\xe2\xa8\x0c\x83\x08\x51\x06\xde\x21\x6d\x36\x50\x8e\x38\x4d"), ["\xcf\x11\x56\xee\xd5\x2e\x7c\xaf\xf3\x87\x5b\xd9\x69\x2e\x9b\xe9\x1a\x71\x67\x4a\xb0\x17\xec\xac\x01\xd2\x5b\x77\xce\xcc\x3b\x08"] = CTInfo($description="Google 'Xenon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x82\xe2\xce\x90\x40\x3f\x81\x0e\xdf\xea\xe1\x20\x2b\x5e\x2e\x30\x54\x46\x81\xb9\x58\xed\xaf\xbd\xff\x36\xa7\x9e\x0b\x5f\x6a\x6b\x91\xa5\xc1\x98\xe1\xf2\xcd\xeb\x17\x20\x70\xca\x2a\x12\xe6\x54\x78\x50\xdc\xff\x6d\xfd\x1c\xa7\xb6\x3a\x1f\xf9\x26\xa9\x1b\xbd"), ["\xdd\xdc\xca\x34\x95\xd7\xe1\x16\x05\xe7\x95\x32\xfa\xc7\x9f\xf8\x3d\x1c\x50\xdf\xdb\x00\x3a\x14\x12\x76\x0a\x2c\xac\xbb\xc8\x2a"] = CTInfo($description="Google 'Xenon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x6b\xe0\xaf\xed\x06\x7c\x3d\xef\xd9\x0e\xe4\x58\x4b\x04\xd8\x2a\x47\x99\x90\x89\x7a\xb9\x36\xa5\x75\xc8\x04\xb8\xcb\xe2\xaa\x2b\xb5\x68\x9d\x88\x29\xa2\xa5\xcf\xce\x2b\x9a\x15\x9b\xa0\x3e\x9d\x94\x1c\xb2\xb7\x4a\xf2\x51\xec\x40\xed\x62\x47\xa4\x03\x49\x86"), +["\x96\x97\x64\xbf\x55\x58\x97\xad\xf7\x43\x87\x68\x37\x08\x42\x77\xe9\xf0\x3a\xd5\xf6\xa4\xf3\x36\x6e\x46\xa4\x3f\x0f\xca\xa9\xc6"] = CTInfo($description="Google 'Xenon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x3a\x1f\xc8\xbb\xce\xd5\x90\x47\x34\xca\xca\x01\x04\x27\x21\x1c\xe2\x29\x3d\x92\xbb\x91\x45\xc7\x5a\x3e\xa5\xd4\xf2\x12\xe6\xe8\xe6\x43\xba\xf3\x7b\xc2\x38\xaf\xfc\x23\x8a\x05\x56\xeb\x03\x0a\x30\xcc\x63\x6c\xd9\x3c\xbe\xf5\x7b\x94\xba\x94\xd3\xbf\x88\x4c"), +["\xd8\x09\x55\x3b\x94\x4f\x7a\xff\xc8\x16\x19\x6f\x94\x4f\x85\xab\xb0\xf8\xfc\x5e\x87\x55\x26\x0f\x15\xd1\x2e\x72\xbb\x45\x4b\x14"] = CTInfo($description="Google 'Xenon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe5\x77\x78\x95\x71\x28\xb3\x95\xc9\xa5\xcc\x7a\x4c\xe8\x32\x03\x96\x7b\xfc\x2e\x1d\xb9\xa4\xdb\x43\xa0\xbd\x69\x72\xf9\x45\xba\x9a\xc3\xe9\x96\xd5\x70\xe7\x0d\x7e\xc9\x95\x15\x27\x8a\x72\x30\x65\x86\x43\x53\xdc\x11\x44\x18\x49\x98\x25\x68\xa7\x3c\x05\xbf"), ["\xda\xb6\xbf\x6b\x3f\xb5\xb6\x22\x9f\x9b\xc2\xbb\x5c\x6b\xe8\x70\x91\x71\x6c\xbb\x51\x84\x85\x34\xbd\xa4\x3d\x30\x48\xd7\xfb\xab"] = CTInfo($description="Cloudflare 'Nimbus2024' Log", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x77\xb1\x9b\x7b\x8f\xe6\x8b\x35\xfe\x3a\x92\x29\x2d\xac\x8a\x8d\x51\x8a\x25\xfc\x93\xb6\xd7\xa0\x8b\x29\x37\x71\x1d\x33\xca\xcc\x33\xea\x28\xb9\x1f\xe2\xac\xc3\xa9\x5d\xdd\x97\xbe\xf6\x9e\x94\x25\xdd\x36\x81\xd1\xeb\x5d\x29\xc3\x2b\x44\xf1\x5b\xca\x15\x48"), ["\xcc\xfb\x0f\x6a\x85\x71\x09\x65\xfe\x95\x9b\x53\xce\xe9\xb2\x7c\x22\xe9\x85\x5c\x0d\x97\x8d\xb6\xa9\x7e\x54\xc0\xfe\x4c\x0d\xb0"] = CTInfo($description="Cloudflare 'Nimbus2025'", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2025/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1a\x80\x1a\x15\x19\x19\x23\x79\xb4\xfa\xa0\x79\x8e\x8d\xd5\xc1\xdc\xc2\xb5\x96\x92\x7e\x94\xe0\xc3\x7e\x14\x7c\x0a\x0d\x2d\x46\xa8\x9d\x1b\xb1\x41\x65\x0c\x5f\x98\xc4\x5a\x17\x79\x81\x5b\x4a\x14\x41\xec\xaf\xa9\x5d\x0e\xab\x12\x19\x71\xcd\x43\xef\xbb\x97"), ["\x48\xb0\xe3\x6b\xda\xa6\x47\x34\x0f\xe5\x6a\x02\xfa\x9d\x30\xeb\x1c\x52\x01\xcb\x56\xdd\x2c\x81\xd9\xbb\xbf\xab\x39\xd8\x84\x73"] = CTInfo($description="DigiCert Yeti2024 Log", $operator="DigiCert", $url="https://yeti2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x57\xb8\xc1\x6f\x30\xa4\x7f\x2e\xe4\xf0\xd0\xd9\x60\x62\x13\x95\xe3\x7a\xe3\x4e\x53\xc3\xb3\xb8\x73\x85\xc1\x18\x0d\x23\x0e\x58\x84\xd2\x78\xef\x9b\xb3\x1e\x2c\x1a\xde\xc1\x8f\x81\x1b\x19\x44\x58\xb7\x00\x77\x60\x20\x1a\x72\xd8\x82\xde\xae\x9e\xb1\xc6\x4b"), ["\x7d\x59\x1e\x12\xe1\x78\x2a\x7b\x1c\x61\x67\x7c\x5e\xfd\xf8\xd0\x87\x5c\x14\xa0\x4e\x95\x9e\xb9\x03\x2f\xd9\x0e\x8c\x2e\x79\xb8"] = CTInfo($description="DigiCert Yeti2025 Log", $operator="DigiCert", $url="https://yeti2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdf\x95\x00\x5e\x10\xc1\x01\xf7\x37\xe3\x10\x74\xd1\xff\xb2\xca\x90\xed\x32\x99\x5f\x0c\x39\xfe\xa1\xd1\x13\x11\xac\xd1\xb3\x73\x93\x20\xc2\x13\x3c\x4c\xb5\x7a\x52\x86\x86\x3d\xe3\x95\x24\x7c\xd8\x91\x98\x48\x3b\xf0\xf0\xdf\x21\xf1\xb0\x81\x5a\x59\x25\x43"), ["\x73\xd9\x9e\x89\x1b\x4c\x96\x78\xa0\x20\x7d\x47\x9d\xe6\xb2\xc6\x1c\xd0\x51\x5e\x71\x19\x2a\x8c\x6b\x80\x10\x7a\xc1\x77\x72\xb5"] = CTInfo($description="DigiCert Nessie2024 Log", $operator="DigiCert", $url="https://nessie2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2d\xfc\xa2\x7b\x36\xbf\x56\x91\xe9\xfe\x3f\xe8\x3d\xfc\xc3\xa7\xe0\x61\x52\xea\x2c\xe9\x05\xa3\x9f\x27\x17\x81\x05\x70\x6b\x81\x61\x44\x8a\xf8\x3b\x10\x80\x42\xed\x03\x2f\x00\x50\x21\xfc\x41\x54\x84\xa3\x54\xd5\x2e\xb2\x7a\x16\x4b\x2a\x1f\x2b\x66\x04\x2b"), ["\xe6\xd2\x31\x63\x40\x77\x8c\xc1\x10\x41\x06\xd7\x71\xb9\xce\xc1\xd2\x40\xf6\x96\x84\x86\xfb\xba\x87\x32\x1d\xfd\x1e\x37\x8e\x50"] = CTInfo($description="DigiCert Nessie2025 Log", $operator="DigiCert", $url="https://nessie2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\xf0\xf0\xa7\x8b\x81\x2e\x09\x39\x3b\x9f\x42\xda\x38\x44\x5f\xb4\xcc\xed\x36\xbb\xd8\x43\x7f\x16\x49\x57\x87\x04\x7f\xa5\x01\x34\xf7\xe8\x68\x3f\xb7\x78\x1f\x60\x66\x2d\x67\x9a\x75\x80\xb7\x53\xa7\x85\xd5\xbc\xab\x47\x06\x55\xdb\xb5\xdf\x88\xa1\x6f\x38"), +["\xb6\x9d\xdc\xbc\x3c\x1a\xbd\xef\x6f\x9f\xd6\x0c\x88\xb1\x06\x7b\x77\xf0\x82\x68\x8b\x2d\x78\x65\xd0\x4b\x39\xab\xe9\x27\xa5\x75"] = CTInfo($description="DigiCert 'Wyvern2024h1' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2024h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x68\xa6\x79\x14\xd1\x58\xe7\xab\xaa\x29\x69\x7f\x60\xed\x68\xe8\x10\xf6\x07\x84\xc0\xfb\x59\x04\x5a\x09\xc9\x1d\xe1\x4b\xfb\xcd\xdc\x03\xf3\xa8\x2a\x46\xb9\x84\x4d\x69\x30\xec\x23\x35\xc1\x8e\xfc\x9f\xb4\x20\x24\xd7\x15\xac\x87\xf7\x1e\xc1\x0b\x3c\x76\x1a"), +["\x0c\x2a\xef\x2c\x4a\x5b\x98\x83\xd4\xdd\xa3\x82\xfe\x50\xfb\x51\x88\xb3\xe9\x73\x33\xa1\xec\x53\xa0\x9d\xc9\xa7\x9d\x0d\x08\x20"] = CTInfo($description="DigiCert 'Wyvern2024h2' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2024h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa8\x73\x12\x9c\x54\xd0\x7a\x7d\xc5\xb5\x17\x2b\x71\x52\x89\x04\x90\xbb\x42\xf1\x9d\xf8\x1c\xde\x4c\xcf\x82\x3c\xbd\x37\x1b\x74\x4c\x3c\xc7\xa3\x13\x87\x01\x51\x13\x14\xda\xa2\x12\x98\x84\xce\x1c\xbe\xcf\x4f\x7a\xef\x15\xfa\xd0\xee\xed\xed\x07\xad\x71\x6d"), +["\x73\x20\x22\x0f\x08\x16\x8a\xf9\xf3\xc4\xa6\x8b\x0a\xb2\x6a\x9a\x4a\x00\xee\xf5\x77\x85\x8a\x08\x4d\x05\x00\xd4\xa5\x42\x44\x59"] = CTInfo($description="DigiCert 'Wyvern2025h1' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\xcb\x80\x61\x86\x1b\x1f\xb5\xab\x2b\x20\x76\x59\x83\x66\x0e\xce\xae\xb8\x6f\x3b\x88\x02\xeb\x43\xf4\x87\x90\xcb\x8b\xda\xac\x0e\x19\x50\xe0\xf9\x24\x0e\xab\x26\x93\x8c\x3f\x9e\x0d\x96\x58\x44\x9d\x3b\x8a\x80\xc5\xc8\xbe\xe1\x89\x46\x6b\x48\x4c\xd6\x09"), +["\xed\x3c\x4b\xd6\xe8\x06\xc2\xa4\xa2\x00\x57\xdb\xcb\x24\xe2\x38\x01\xdf\x51\x2f\xed\xc4\x86\xc5\x70\x0f\x20\xdd\xb7\x3e\x3f\xe0"] = CTInfo($description="DigiCert 'Wyvern2025h2' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe0\xdb\x41\xef\xe4\x04\xbd\xcb\x6b\x2e\x4c\xcc\xf1\x6c\xde\x41\x58\x7f\xfe\x94\xf6\x7a\xf6\x60\xed\x8b\x76\x72\xa3\xa2\x1c\x31\x13\x32\x35\xa1\xf2\x08\xd2\x68\xc5\x34\xa7\x56\x08\x1c\x63\xde\x95\xe2\x81\x69\x97\x8d\x1e\xa8\xb7\x66\x51\x25\x75\x4d\x78\x2e"), +["\xdb\x07\x6c\xde\x6a\x8b\x78\xec\x58\xd6\x05\x64\x96\xeb\x6a\x26\xa8\xc5\x9e\x72\x12\x93\xe8\xac\x03\x27\xdd\xde\x89\xdb\x5a\x2a"] = CTInfo($description="DigiCert 'Sphinx2024h1' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2024h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xc6\xe4\x29\x69\x98\xfe\x28\x92\x57\x12\x4d\x9e\xed\x0e\xe7\x32\xa2\xe6\x9c\x27\x78\xa4\x29\x7c\x99\xd5\xdb\xfa\x22\xc1\xdd\x5e\xa7\xf4\xd8\xea\xc8\xd7\x44\x8d\xe0\xf1\x8c\x0a\x01\x1d\xd8\x22\xa8\xd3\xeb\xc9\x22\x8e\x36\xfb\x4a\xb1\x70\x9c\x5d\xc1\xe8\x33"), +["\xdc\xc9\x5e\x6f\xa2\x99\xb9\xb0\xfd\xbd\x6c\xa6\xa3\x6e\x1d\x72\xc4\x21\x2f\xdd\x1e\x0f\x47\x55\x3a\x36\xd6\xcf\x1a\xd1\x1d\x8d"] = CTInfo($description="DigiCert 'Sphinx2024h2' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2024h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdb\x09\x41\x84\xe7\xd1\xf1\x5b\x25\x09\x7b\xe8\xc6\x98\x51\x5e\x29\x85\xfd\x81\xde\x89\xd7\xd0\x86\xa4\xb0\xe5\x15\xec\x5d\x7b\x17\x55\x5f\xc9\x79\x8d\xe4\x22\x36\xe7\xe9\xbf\x38\x3f\xd1\xe9\xd4\x09\x84\x81\xbe\xb6\xc1\xed\x1b\x17\xea\x26\x97\xba\xe9\x9a"), +["\xde\x85\x81\xd7\x50\x24\x7c\x6b\xcd\xcb\xaf\x56\x37\xc5\xe7\x81\xc6\x4c\xe4\x6e\xd6\x17\x63\x9f\x8f\x34\xa7\x26\xc9\xe2\xbd\x37"] = CTInfo($description="DigiCert 'Sphinx2025h1' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe3\x2f\x1f\x4d\x89\x05\x75\x29\x78\xbb\x22\x3d\x07\x62\x51\x14\x70\x94\xe7\x3c\xea\xf5\xee\xae\xa6\x48\x9a\x86\x52\x4e\x9e\x5c\xe3\x95\x97\x28\xbb\x52\x4b\x2a\xfd\xc8\xc9\x89\x4e\x45\x31\x17\xd3\x8d\xf2\xe7\xce\x18\x11\x58\x98\x2c\x60\x6f\x58\x20\x36\x6e"), +["\xa4\x42\xc5\x06\x49\x60\x61\x54\x8f\x0f\xd4\xea\x9c\xfb\x7a\x2d\x26\x45\x4d\x87\xa9\x7f\x2f\xdf\x45\x59\xf6\x27\x4f\x3a\x84\x54"] = CTInfo($description="DigiCert 'Sphinx2025h2' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x41\x8c\x50\x13\x54\xb1\x19\x05\xb7\x7f\x4a\x20\x6e\xa3\x75\x63\xca\x34\xf4\xcc\x74\xea\x32\x3b\xb6\x8b\x03\x14\xa8\x52\x7f\x32\x87\x5e\x59\x9e\x0f\xab\x18\x9e\x29\x6c\xb5\x72\x77\x1a\x27\x54\x85\x5d\xc1\x7b\x24\xa8\x34\xe3\xcd\x88\xce\xd4\x50\x1b\xbe\x69"), ["\x55\x81\xd4\xc2\x16\x90\x36\x01\x4a\xea\x0b\x9b\x57\x3c\x53\xf0\xc0\xe4\x38\x78\x70\x25\x08\x17\x2f\xa3\xaa\x1d\x07\x13\xd3\x0c"] = CTInfo($description="Sectigo 'Sabre' CT log", $operator="Sectigo", $url="https://sabre.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\x6f\xd2\x89\x0f\x3f\xc5\xf8\x87\x1e\xab\x65\xb3\xd9\xbb\x17\x23\x8c\x06\x0e\x09\x55\x96\x3d\x0a\x08\xa2\xc5\x71\xb3\xd1\xa9\x2f\x28\x3e\x83\x10\xbf\x12\xd0\x44\x66\x15\xef\x54\xe1\x98\x80\xd0\xce\x24\x6d\x3e\x67\x9a\xe9\x37\x23\xce\x52\x93\x86\xda\x80"), ["\xa2\xe2\xbf\xd6\x1e\xde\x2f\x2f\x07\xa0\xd6\x4e\x6d\x37\xa7\xdc\x65\x43\xb0\xc6\xb5\x2e\xa2\xda\xb7\x8a\xf8\x9a\x6d\xf5\x17\xd8"] = CTInfo($description="Sectigo 'Sabre2024h1'", $operator="Sectigo", $url="https://sabre2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2c\x01\xf6\xce\x31\xbc\xaa\x14\x61\x51\xfe\x6b\x7a\x87\xae\xa6\xd3\x9b\xc7\x87\x2d\x0a\x5a\xc8\x4f\xb5\x54\xdc\xc9\x93\xa0\x00\xee\xca\x1c\xb9\xa7\xb6\x7b\x47\x3b\xe5\x4f\xaa\x6c\x16\x1c\x70\x2e\xc8\xec\x53\x5a\x4c\x21\x4c\x7e\x27\x0b\x13\x14\x5e\xfc\x85"), ["\x19\x98\x10\x71\x09\xf0\xd6\x52\x2e\x30\x80\xd2\x9e\x3f\x64\xbb\x83\x6e\x28\xcc\xf9\x0f\x52\x8e\xee\xdf\xce\x4a\x3f\x16\xb4\xca"] = CTInfo($description="Sectigo 'Sabre2024h2'", $operator="Sectigo", $url="https://sabre2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7a\x10\x4c\x8a\xe7\x22\x7b\x6d\x2a\xba\x8e\xfa\x6b\x4a\x81\xd5\x85\xae\x03\xef\xff\x4b\xfc\x4d\x53\x3d\xb7\x8c\xbb\x75\x09\xc9\xea\x16\x7e\xc1\x77\x16\xd2\xc2\x45\x74\x6d\x8d\xc4\xe1\x88\x37\xdf\xd4\xf3\x60\x65\xfc\xa0\x75\xf0\x20\x66\x8e\x4a\xcc\x19\xda"), ["\xe0\x92\xb3\xfc\x0c\x1d\xc8\xe7\x68\x36\x1f\xde\x61\xb9\x96\x4d\x0a\x52\x78\x19\x8a\x72\xd6\x72\xc4\xb0\x4d\xa5\x6d\x6f\x54\x04"] = CTInfo($description="Sectigo 'Sabre2025h1'", $operator="Sectigo", $url="https://sabre2025h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7e\x2f\x39\xf1\xe8\x23\x8e\xb3\x32\x04\xaf\x4d\x57\xf6\xdb\xc5\x74\xa4\x7a\x6d\x3b\x07\x51\x0c\x5a\xfb\x80\x30\x05\xc6\x5a\x0c\xc4\x76\xd6\x06\xa8\x57\x4d\xfb\xdf\xe4\x82\x90\xc2\x41\xae\x70\xb3\x31\xa2\xe3\xfa\x3d\x5f\x2c\x5d\x04\xcd\xb4\x9d\x55\xab\x41"), ["\x1a\x04\xff\x49\xd0\x54\x1d\x40\xaf\xf6\xa0\xc3\xbf\xf1\xd8\xc4\x67\x2f\x4e\xec\xee\x23\x40\x68\x98\x6b\x17\x40\x2e\xdc\x89\x7d"] = CTInfo($description="Sectigo 'Sabre2025h2'", $operator="Sectigo", $url="https://sabre2025h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x13\x11\x2d\x7b\xf3\x93\x81\xe4\xb9\x7c\xd9\x64\x3b\xe7\xb5\x83\x99\x66\x79\x59\x47\x6a\x42\x5e\xd6\xbd\x63\x2e\xb7\x91\x4b\xae\xbc\x56\xc4\xc5\x6e\x09\xa0\xd7\x64\x1a\xc8\xc1\xaf\x89\x8b\xf5\x58\xd8\xba\xeb\x7b\x83\x52\xe9\xf4\xe0\xa5\xcd\xcd\x92\xcc"), -["\x6f\x53\x76\xac\x31\xf0\x31\x19\xd8\x99\x00\xa4\x51\x15\xff\x77\x15\x1c\x11\xd9\x02\xc1\x00\x29\x06\x8d\xb2\x08\x9a\x37\xd9\x13"] = CTInfo($description="Sectigo 'Mammoth' CT log", $operator="Sectigo", $url="https://mammoth.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xef\xe4\x7d\x74\x2e\x15\x15\xb6\xe9\xbb\x23\x8b\xfb\x2c\xb5\xe1\xc7\x80\x98\x47\xfb\x40\x69\x68\xfc\x49\xad\x61\x4e\x83\x47\x3c\x1a\xb7\x8d\xdf\xff\x7b\x30\xb4\xba\xff\x2f\xcb\xa0\x14\xe3\xad\xd5\x85\x3f\x44\x59\x8c\x8c\x60\x8b\xd7\xb8\xb1\xbf\xae\x8c\x67"), ["\x29\xd0\x3a\x1b\xb6\x74\xaa\x71\x1c\xd3\x03\x5b\x65\x57\xc1\x4f\x8a\xa7\x8b\x4f\xe8\x38\x94\x49\xec\xa4\x53\xf9\x44\xbd\x24\x68"] = CTInfo($description="Sectigo 'Mammoth2024h1'", $operator="Sectigo", $url="https://mammoth2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa4\x59\x90\xf3\x71\x24\x24\xf7\xc3\x55\x27\x56\x9c\xa3\x59\x1e\xf7\xb7\x9f\xce\xab\x4e\x19\x66\x4d\xd0\x8a\xfa\x9d\x62\xa4\x24\xf0\x3b\x20\xe4\x1d\x14\x67\xc8\xfc\xe4\x37\xf2\x4b\x38\x54\x5a\xcf\x9f\x6b\x07\x90\xd0\x0e\x7e\x3d\x4c\x87\xb2\xe8\x3f\x07\xcc"), ["\x50\x85\x01\x58\xdc\xb6\x05\x95\xc0\x0e\x92\xa8\x11\x02\xec\xcd\xfe\x3f\x6b\x78\x58\x42\x9f\x57\x98\x35\x38\xc9\xda\x52\x50\x63"] = CTInfo($description="Sectigo 'Mammoth2024h1b'", $operator="Sectigo", $url="https://mammoth2024h1b.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa3\xd5\x07\x28\x7a\x04\x34\xae\xca\xbe\x80\x79\x4f\x3e\xf6\x41\xf4\x24\x04\xe1\xd6\x36\x5a\x1a\x09\xf2\xd1\xba\x84\x17\xae\x1e\xa1\x7c\x00\x1d\x54\x73\x90\x75\x21\xa8\xd1\xda\x5e\x10\xe1\x8c\xec\xb2\x8a\x8c\xc8\xe7\xdd\xcd\xe2\x07\xf0\x4e\x16\x02\x57\x37"), ["\xdf\xe1\x56\xeb\xaa\x05\xaf\xb5\x9c\x0f\x86\x71\x8d\xa8\xc0\x32\x4e\xae\x56\xd9\x6e\xa7\xf5\xa5\x6a\x01\xd1\xc1\x3b\xbe\x52\x5c"] = CTInfo($description="Sectigo 'Mammoth2024h2'", $operator="Sectigo", $url="https://mammoth2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x66\x22\x24\x6e\xbe\x52\x62\x0a\xa0\xaf\xc3\x25\x1a\x36\x2e\xa7\x60\x89\xa2\x65\xbf\xa4\x5f\xbd\x85\x6a\x94\x05\x81\x35\x90\x54\x31\x95\xe7\x11\x9e\xa3\x2e\x0f\x85\xef\xa7\x88\x57\x8b\x63\x1a\x81\xc1\x41\x9d\x7d\xec\x01\x3a\xdb\xb9\xc1\x27\xf4\x65\x1e"), @@ -39,4 +50,6 @@ redef ct_logs += { ["\x87\x4f\xb5\x0d\xc0\x29\xd9\x93\x1d\xe5\x73\xe9\xf2\x89\x9e\x8e\x45\x33\xb3\x92\xd3\x8b\x0a\x46\x25\x74\xbf\x0f\xee\xb2\xfc\x1e"] = CTInfo($description="Trust Asia Log2024-2", $operator="TrustAsia", $url="https://ct2024.trustasia.com/log2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x64\xe2\x79\x81\x3f\x61\xd7\xec\xc6\xf8\x65\x28\x1d\xa0\xb4\x66\x33\xc3\x25\xd5\x0a\x95\x78\x9c\x8f\xfe\xa4\x2a\xd8\x8f\x7e\x72\xe0\xfe\xa8\x7f\xf8\xb1\x2d\x85\xc0\x8e\x12\x74\x0d\x2f\x8c\xab\xd7\x7f\x7a\x1e\xd9\x84\x33\x39\xe8\xfd\x89\x5f\x96\x48\x08"), ["\x28\xe2\x81\x38\xfd\x83\x21\x45\xe9\xa9\xd6\xaa\x75\x37\x6d\x83\x77\xa8\x85\x12\xb3\xc0\x7f\x72\x41\x48\x21\xdc\xbd\xe9\x8c\x66"] = CTInfo($description="TrustAsia Log2025a", $operator="TrustAsia", $url="https://ct2025-a.trustasia.com/log2025a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x70\xe5\xb1\xa4\x09\x79\x2b\x9d\xf8\xa3\xa0\xdf\x18\xef\x95\x5d\x03\x6c\x7b\xa1\x91\xa9\xb8\x80\x7d\xec\x5c\x02\x08\xe2\x6e\x2f\x7c\x32\x70\xbd\x96\x84\x5f\xa6\x62\xe9\x65\xb5\x7c\x90\x58\xba\x22\xd5\xf9\xf5\x69\x54\xb7\xa8\x94\x4e\x32\x09\xae\x26\x11\x4d"), ["\x28\x2c\x8b\xdd\x81\x0f\xf9\x09\x12\x0a\xce\x16\xd6\xe0\xec\x20\x1b\xea\x82\xa3\xa4\xaf\x19\xd9\xef\xfb\x59\xe8\x3f\xdc\x42\x68"] = CTInfo($description="TrustAsia Log2025b", $operator="TrustAsia", $url="https://ct2025-b.trustasia.com/log2025b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaa\xa0\x8b\xdb\x67\x14\x5d\x97\x89\x1d\x08\x8d\x06\xd7\xc1\x94\x8e\xb0\xfa\x4c\x46\xd5\x53\x08\x78\x2b\x04\x53\x6c\xf3\xde\xb1\xd1\x53\x40\xda\x90\x57\xe6\x1a\x9e\x3c\xc7\x03\xb8\xbd\x2f\xa9\xcf\xe8\x7b\x5e\xe1\x4b\x60\xe5\x38\x43\x60\x97\xc1\x5b\x2f\x65"), +["\x74\xdb\x9d\x58\xf7\xd4\x7e\x9d\xfd\x78\x7a\x16\x2a\x99\x1c\x18\xcf\x69\x8d\xa7\xc7\x29\x91\x8c\x9a\x18\xb0\x45\x0d\xba\x44\xbc"] = CTInfo($description="TrustAsia 'log2026a'", $operator="TrustAsia", $url="https://ct2026-a.trustasia.com/log2026a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x4e\x7a\xc9\xa6\x07\xf9\xff\x74\xec\x98\xcb\x49\xe1\x00\x24\xb3\x59\x2e\x83\xfd\xc0\x70\x35\x33\x4c\x63\xca\x74\x83\xc0\x3c\x5b\x53\x40\x7c\x31\x1f\x35\xa4\x5f\x0f\xe4\xee\x4f\x89\x17\xe8\x5b\x2e\xc5\xac\x00\x05\xc9\x76\x37\x45\x97\x03\x15\xff\x60\x59"), +["\x25\xb7\xef\xde\xa1\x13\x01\x93\xed\x93\x07\x97\x70\xaa\x32\x2a\x26\x62\x0d\xe3\x5a\xc8\xaa\x7c\x75\x19\x7d\xe0\xb1\xa9\xe0\x65"] = CTInfo($description="TrustAsia 'log2026b'", $operator="TrustAsia", $url="https://ct2026-b.trustasia.com/log2026b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x0f\x12\x8c\xa9\xe6\xe3\xec\x62\xee\xdf\x58\xc8\x50\xe6\x26\x70\x76\x10\xb7\x04\x39\xb3\xa7\xf8\x4c\x73\x3b\xc3\x38\x5a\x12\x00\x4c\xe0\xda\x0e\x16\x8a\x45\x32\x0a\x31\xaa\x22\xc7\x9d\x7d\x05\x53\xc7\x9e\x94\xea\x9b\x57\x46\xbf\x4f\xa4\x7e\xfb\xdf\xfa\x85"), }; diff --git a/scripts/base/protocols/ssl/mozilla-ca-list.zeek b/scripts/base/protocols/ssl/mozilla-ca-list.zeek index 1206908a10..e308f1fb75 100644 --- a/scripts/base/protocols/ssl/mozilla-ca-list.zeek +++ b/scripts/base/protocols/ssl/mozilla-ca-list.zeek @@ -1,6 +1,6 @@ # Don't edit! This file is automatically generated. -# Generated at: 2024-02-23 11:28:07 +0000 -# Generated from: NSS 3.98 +# Generated at: 2024-07-23 16:04:06 +0100 +# Generated from: NSS 3.102 # # The original source file comes with this licensing statement: # @@ -158,4 +158,5 @@ redef root_certs += { ["CN=CommScope Public Trust RSA Root-02,O=CommScope,C=US"] = "\x30\x82\x05\x6C\x30\x82\x03\x54\xA0\x03\x02\x01\x02\x02\x14\x54\x16\xBF\x3B\x7E\x39\x95\x71\x8D\xD1\xAA\x00\xA5\x86\x0D\x2B\x8F\x7A\x05\x4E\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x30\x4E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x12\x30\x10\x06\x03\x55\x04\x0A\x0C\x09\x43\x6F\x6D\x6D\x53\x63\x6F\x70\x65\x31\x2B\x30\x29\x06\x03\x55\x04\x03\x0C\x22\x43\x6F\x6D\x6D\x53\x63\x6F\x70\x65\x20\x50\x75\x62\x6C\x69\x63\x20\x54\x72\x75\x73\x74\x20\x52\x53\x41\x20\x52\x6F\x6F\x74\x2D\x30\x32\x30\x1E\x17\x0D\x32\x31\x30\x34\x32\x38\x31\x37\x31\x36\x34\x33\x5A\x17\x0D\x34\x36\x30\x34\x32\x38\x31\x37\x31\x36\x34\x32\x5A\x30\x4E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x12\x30\x10\x06\x03\x55\x04\x0A\x0C\x09\x43\x6F\x6D\x6D\x53\x63\x6F\x70\x65\x31\x2B\x30\x29\x06\x03\x55\x04\x03\x0C\x22\x43\x6F\x6D\x6D\x53\x63\x6F\x70\x65\x20\x50\x75\x62\x6C\x69\x63\x20\x54\x72\x75\x73\x74\x20\x52\x53\x41\x20\x52\x6F\x6F\x74\x2D\x30\x32\x30\x82\x02\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x02\x0F\x00\x30\x82\x02\x0A\x02\x82\x02\x01\x00\xE1\xFA\x0E\xFB\x68\x00\x12\xC8\x4D\xD5\xAC\x22\xC4\x35\x01\x3B\xC5\x54\xE5\x59\x76\x63\xA5\x7F\xEB\xC1\xC4\x6A\x98\xBD\x32\x8D\x17\x80\xEB\x5D\xBA\xD1\x62\x3D\x25\x23\x19\x35\x14\xE9\x7F\x89\xA7\x1B\x62\x3C\xD6\x50\xE7\x34\x95\x03\x32\xB1\xB4\x93\x22\x3D\xA7\xE2\xB1\xED\xE6\x7B\x4E\x2E\x87\x9B\x0D\x33\x75\x0A\xDE\xAA\x35\xE7\x7E\xE5\x36\x98\xA2\xAE\x25\x9E\x95\xB3\x32\x96\xA4\x2B\x58\x1E\xEF\x3F\xFE\x62\x34\x48\x51\xD1\xB4\x8D\x42\xAD\x60\xDA\x49\x6A\x95\x70\xDD\xD2\x00\xE2\xCC\x57\x63\x02\x7B\x96\xDD\x49\x97\x5B\x92\x4E\x95\xD3\xF9\xCB\x29\x1F\x18\x4A\xF8\x01\x2A\xD2\x63\x09\x6E\x24\xE9\x89\xD2\xE5\xC7\x22\x4C\xDC\x73\x86\x47\x00\xAA\x0D\x88\x8E\xAE\x85\x7D\x4A\xE9\xBB\x33\x4F\x0E\x52\x70\x9D\x95\xE3\x7C\x6D\x96\x5B\x2D\x3D\x5F\xA1\x83\x46\x5D\xB6\xE3\x25\xB8\x7C\xA7\x19\x80\x1C\xEA\x65\x43\xDC\x91\x79\x36\x2C\x74\x7C\xF2\x67\x06\xC9\x89\xC9\xDB\xBF\xDA\x68\xBF\x23\xED\xDC\x6B\xAD\x28\x83\x79\x2F\xEC\x38\xA5\x0D\x37\x01\x67\x27\x9A\xE9\x33\xD9\x33\x5F\x37\xA1\xC5\xF0\xAB\x3D\xFA\x78\xB0\xE7\x2C\x9F\xF6\x3E\x9F\x60\xE0\xEF\x48\xE9\x90\x45\x1E\x05\x51\x78\x1A\x2C\x12\x2C\x5C\x28\xAC\x0D\xA2\x23\x9E\x34\x8F\x05\xE6\xA2\x33\xCE\x11\x77\x13\xD4\x0E\xA4\x1E\x42\x1F\x86\xCD\x70\xFE\xD9\x2E\x15\x3D\x1D\xBB\xB8\xF2\x53\x57\xDB\xCC\xC6\x74\x29\x9C\x18\xB3\x36\x75\x38\x2E\x0F\x54\xA1\xF8\x92\x1F\x89\x96\x4F\xBB\xD4\xEE\x9D\xE9\x3B\x36\x42\xB5\x0A\x3B\x2A\xD4\x64\x79\x36\x10\xE1\xF9\x91\x03\x2B\x7B\x20\x54\xCD\x0D\x19\x1A\xC8\x41\x32\x34\xD1\xB0\x99\xE1\x90\x1E\x01\x40\x36\xB5\xB7\xFA\xA9\xE5\x77\x75\xA4\x22\x81\x5D\xB0\x8B\xE4\x27\x12\x0F\x54\x88\xC6\xDB\x85\x74\xE6\xB7\xC0\xD7\xA6\x29\xFA\xDB\xDE\xF3\x93\x97\x27\x04\x55\x2F\x0A\x6F\x37\xC5\x3D\x13\xAF\x0A\x00\xA9\x2C\x8B\x1C\x81\x28\xD7\xEF\x86\x31\xA9\xAE\xF2\x6E\xB8\xCA\x6A\x2C\x54\x47\xD8\x2A\x88\x2E\xAF\xC1\x07\x10\x78\xAC\x11\xA2\x2F\x42\xF0\x37\xC5\xF2\xB8\x56\xDD\x0E\x62\x2D\xCE\x2D\x56\x7E\x55\xF2\xA7\x44\xF6\x2B\x32\xF4\x23\xA8\x47\xE8\xD4\x2A\x01\x78\xCF\x6A\xC3\x37\xA8\x9E\x65\xD2\x2C\xE5\xFA\xBA\x33\xC1\x06\x44\xF6\xE6\xCF\xA5\x0D\xA7\x66\x08\x34\x8A\x2C\xF3\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x47\xD0\xE7\xB1\x22\xFF\x9D\x2C\xF5\xD9\x57\x60\xB3\xB1\xB1\x70\x95\xEF\x61\x7A\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0B\x05\x00\x03\x82\x02\x01\x00\x86\x69\xB1\x4D\x2F\xE9\x9F\x4F\x22\x93\x68\x8E\xE4\x21\x99\xA3\xCE\x45\x53\x1B\x73\x44\x53\x00\x81\x61\xCD\x31\xE3\x08\xBA\x81\x28\x28\x7A\x92\xB9\xB6\xA8\xC8\x43\x9E\xC7\x13\x26\x4D\xC2\xD8\xE5\x55\x9C\x92\x5D\x50\xD8\xC2\x2B\xDB\xFE\xE6\xA8\x97\xCF\x52\x3A\x24\xC3\x65\x64\x5C\x47\x31\xA3\x65\x35\x13\xC3\x93\xB9\xF7\xF9\x51\x97\xBB\xA4\xF0\x62\x87\xC5\xD6\x06\xD3\x97\x83\x20\xA9\x7E\xBB\xB6\x21\xC2\xA5\x0D\x84\x00\xE1\xF2\x27\x10\x83\xBA\xDD\x03\x81\xD5\xDD\x68\xC3\x66\x10\xC8\xD1\x76\xB4\xB3\x6F\x29\x9E\x00\xF9\xC2\x29\xF5\xB1\x93\x19\x52\x69\x1A\x2C\x4C\xA0\x8B\xE0\x15\x9A\x31\x2F\xD3\x88\x95\x59\x6E\xE5\xC4\xB3\x50\xC8\x14\x08\x4A\x9B\x8B\x13\x83\xB1\xA4\x72\xB2\x3B\x76\x33\x41\xDC\xDC\xAA\xA6\x07\x6F\x1D\x24\x12\x9F\xC8\x76\xBD\x2F\xD9\x8E\xF4\x2C\xEE\xB7\xD2\x38\x10\x24\x36\x51\x2F\xE3\x5C\x5D\x81\x21\xA7\xDA\xBB\x4E\xFF\xE6\x07\xA8\xFE\xB9\x0D\x27\x6C\xBB\x70\x5A\x55\x7A\x13\xE9\xF1\x2A\x49\x69\xC7\x5F\x87\x57\x4C\x43\x79\x6D\x3A\x65\xE9\x30\x5C\x41\xEE\xEB\x77\xA5\x73\x12\x88\xE8\xBF\x7D\xAE\xE5\xC4\xA8\x1F\x0D\x8E\x1C\x6D\x50\x02\x4F\x26\x18\x43\xDE\x8F\x55\x85\xB1\x0B\x37\x05\x60\xC9\x55\x39\x12\x04\xA1\x2A\xCF\x71\x16\x9F\x36\x51\x49\xBF\x70\x3B\x9E\x67\x9C\xFB\x7B\x79\xC9\x39\x1C\x78\xAC\x77\x91\x54\x9A\xB8\x75\x0A\x81\x52\x97\xE3\x66\x61\x6B\xED\x3E\x38\x1E\x96\x61\x55\xE1\x91\x54\x8C\xED\x8C\x24\x1F\x81\xC9\x10\x9A\x73\x99\x2B\x16\x4E\x72\x00\x3F\x54\x1B\xF8\x8D\xBA\x8B\xE7\x14\xD6\xB6\x45\x4F\x60\xEC\x96\xAE\xC3\x2F\x02\x4E\x5D\x9D\x96\x49\x72\x00\xB2\xAB\x75\x5C\x0F\x68\x5B\x1D\x65\xC2\x5F\x33\x0F\x1E\x0F\xF0\x3B\x86\xF5\xB0\x4E\xBB\x9C\xF7\xEA\x25\x05\xDC\xAD\xA2\x9B\x4B\x17\x01\xBE\x42\xDF\x35\x21\x1D\xAD\xAB\xAE\xF4\xBF\xAE\x1F\x1B\xD3\xE2\x3B\xFC\xB3\x72\x73\x1C\x9B\x28\x90\x89\x13\x3D\x1D\xC1\x00\x47\x09\x96\x9A\x38\x1B\xDD\xB1\xCF\x0D\xC2\xB4\x44\xF3\x96\x95\xCE\x32\x3A\x8F\x34\x9C\xE0\x17\xC7\x5E\xCE\xAE\x0D\xDB\x87\x38\xE5\x3F\x5B\xFD\x9B\x19\xE1\x31\x41\x7A\x70\xAA\x23\x6B\x01\xE1\x45\x4C\xCD\x94\xCE\x3B\x9E\x2D\xE7\x88\x02\x22\xF4\x6E\xE8\xC8\xEC\xD6\x3C\xF3\xB9\xB2\xD7\x77\x7A\xAC\x7B", ["CN=Telekom Security TLS ECC Root 2020,O=Deutsche Telekom Security GmbH,C=DE"] = "\x30\x82\x02\x42\x30\x82\x01\xC9\xA0\x03\x02\x01\x02\x02\x10\x36\x3A\x96\x8C\xC9\x5C\xB2\x58\xCD\xD0\x01\x5D\xC5\xE5\x57\x00\x30\x0A\x06\x08\x2A\x86\x48\xCE\x3D\x04\x03\x03\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x27\x30\x25\x06\x03\x55\x04\x0A\x0C\x1E\x44\x65\x75\x74\x73\x63\x68\x65\x20\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x47\x6D\x62\x48\x31\x2B\x30\x29\x06\x03\x55\x04\x03\x0C\x22\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x54\x4C\x53\x20\x45\x43\x43\x20\x52\x6F\x6F\x74\x20\x32\x30\x32\x30\x30\x1E\x17\x0D\x32\x30\x30\x38\x32\x35\x30\x37\x34\x38\x32\x30\x5A\x17\x0D\x34\x35\x30\x38\x32\x35\x32\x33\x35\x39\x35\x39\x5A\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x27\x30\x25\x06\x03\x55\x04\x0A\x0C\x1E\x44\x65\x75\x74\x73\x63\x68\x65\x20\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x47\x6D\x62\x48\x31\x2B\x30\x29\x06\x03\x55\x04\x03\x0C\x22\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x54\x4C\x53\x20\x45\x43\x43\x20\x52\x6F\x6F\x74\x20\x32\x30\x32\x30\x30\x76\x30\x10\x06\x07\x2A\x86\x48\xCE\x3D\x02\x01\x06\x05\x2B\x81\x04\x00\x22\x03\x62\x00\x04\xCE\xBF\xFE\x57\xA8\xBF\xD5\xAA\xF7\x10\x9A\xCD\xBC\xD1\x11\xA2\xBD\x67\x42\xCC\x90\xEB\x15\x18\x90\xD9\xA2\xCD\x0C\x2A\x25\xEB\x3E\x4F\xCE\xB5\xD2\x8F\x0F\xF3\x35\xDA\x43\x8B\x02\x80\xBE\x6F\x51\x24\x1D\x0F\x6B\x2B\xCA\x9F\xC2\x6F\x50\x32\xE5\x37\x20\xB6\x20\xFF\x88\x0D\x0F\x6D\x49\xBB\xDB\x06\xA4\x87\x90\x92\x94\xF4\x09\xD0\xCF\x7F\xC8\x80\x0B\xC1\x97\xB3\xBB\x35\x27\xC9\xC2\x1B\xA3\x42\x30\x40\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xE3\x72\xCC\x6E\x95\x99\x47\xB1\xE6\xB3\x61\x4C\xD1\xCB\xAB\xE3\xBA\xCD\xDE\x9F\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0A\x06\x08\x2A\x86\x48\xCE\x3D\x04\x03\x03\x03\x67\x00\x30\x64\x02\x30\x75\x52\x8B\xB7\xA4\x10\x4F\xAE\x4A\x10\x8B\xB2\x84\x5B\x42\xE1\xE6\x2A\x36\x02\xDA\xA0\x6E\x19\x3F\x25\xBF\xDA\x59\x32\x8E\xE4\xFB\x90\xDC\x93\x64\xCE\xAD\xB4\x41\x47\x60\xE2\xCF\xA7\xCB\x1E\x02\x30\x37\x41\x8C\x66\xDF\x41\x6B\xD6\x83\x00\x41\xFD\x2F\x5A\xF7\x50\xB4\x67\xD1\x2C\xA8\x71\xD7\x43\xCA\x9C\x27\x24\x91\x83\x48\x0D\xCF\xCD\xF7\x54\x81\xAF\xEC\x7F\xE4\x67\xDB\xB8\x90\xEE\xDD\x25", ["CN=Telekom Security TLS RSA Root 2023,O=Deutsche Telekom Security GmbH,C=DE"] = "\x30\x82\x05\xB3\x30\x82\x03\x9B\xA0\x03\x02\x01\x02\x02\x10\x21\x9C\x54\x2D\xE8\xF6\xEC\x71\x77\xFA\x4E\xE8\xC3\x70\x57\x97\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0C\x05\x00\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x27\x30\x25\x06\x03\x55\x04\x0A\x0C\x1E\x44\x65\x75\x74\x73\x63\x68\x65\x20\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x47\x6D\x62\x48\x31\x2B\x30\x29\x06\x03\x55\x04\x03\x0C\x22\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x54\x4C\x53\x20\x52\x53\x41\x20\x52\x6F\x6F\x74\x20\x32\x30\x32\x33\x30\x1E\x17\x0D\x32\x33\x30\x33\x32\x38\x31\x32\x31\x36\x34\x35\x5A\x17\x0D\x34\x38\x30\x33\x32\x37\x32\x33\x35\x39\x35\x39\x5A\x30\x63\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x44\x45\x31\x27\x30\x25\x06\x03\x55\x04\x0A\x0C\x1E\x44\x65\x75\x74\x73\x63\x68\x65\x20\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x47\x6D\x62\x48\x31\x2B\x30\x29\x06\x03\x55\x04\x03\x0C\x22\x54\x65\x6C\x65\x6B\x6F\x6D\x20\x53\x65\x63\x75\x72\x69\x74\x79\x20\x54\x4C\x53\x20\x52\x53\x41\x20\x52\x6F\x6F\x74\x20\x32\x30\x32\x33\x30\x82\x02\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x02\x0F\x00\x30\x82\x02\x0A\x02\x82\x02\x01\x00\xED\x35\xA1\x81\x80\xF3\xCB\x4A\x69\x5B\xC2\xFB\x51\x83\xAE\x26\xFD\xE1\x6E\xF3\x81\x12\x7D\x71\x40\xFF\x87\x75\x42\x29\x21\xED\x81\x52\x2C\xDF\x12\xC1\x19\x84\x89\xC1\xBD\xC5\x28\xD5\xD5\x4B\x6C\x44\xD6\x4C\xDB\x07\x96\x4A\x55\x7A\xCA\x36\x82\x04\x36\xA8\xA5\xFC\x27\xF6\x49\xF1\xD5\x72\x9E\x91\xF9\x23\xD6\x70\x7B\xBB\xF5\x9B\xC1\xEC\x93\xCF\x19\xEA\x65\x7E\x88\x70\xA0\x73\xFC\xF6\xFF\xB5\x56\x62\xE1\x73\x6A\x34\x98\x3E\x82\xB8\xAC\x95\x53\xF4\x01\xA0\x27\x07\x72\xA3\x00\x53\xA0\xE4\xB2\xAB\x83\x38\x57\x33\x25\x94\x9F\xBE\x48\x1D\x98\xE1\xA3\xBA\x9E\x5C\xCD\x04\x71\x51\x7D\x75\x78\xAB\xF3\x59\xAA\xC4\xE0\x60\xBE\x8F\x83\x52\xB8\x75\x1A\x41\x35\xED\xBC\xF3\x3A\x63\xE9\xA9\x14\x45\xD7\xE6\x52\xD1\x6E\xD2\xDE\xBC\xE3\xF5\x0B\x3B\xE6\xE0\xC4\xBD\x43\x64\x13\xA6\xCE\xF4\x98\x37\x6C\x8A\x95\xA8\x97\xC8\x47\x0F\xF0\x5E\x10\x8B\xE7\x1D\x1C\xFE\xB1\x3B\xA0\x05\x33\x68\x05\x41\x82\xC1\x03\x2B\x01\xC8\xE7\x8F\x4D\xAB\xE8\xB5\xF6\xCD\x6B\x44\xB5\xE7\xDD\x8B\xEC\xEA\x25\xB4\x00\x22\x57\x4D\xB0\xB1\xB2\x31\xC1\x16\xCE\xFF\xFD\x14\x84\xB7\x47\xFA\xB2\xF1\x70\xDE\xDB\x8B\x6C\x36\x58\xA4\x7C\xB3\x11\xD1\xC3\x77\x7F\x5F\xB6\x25\xE0\x0D\xC5\xD2\xB3\xF9\xB8\xB8\x77\xDB\x37\x71\x71\x47\xE3\x60\x18\x4F\x24\xB6\x75\x37\x78\xB9\xA3\x62\xAF\xBD\xC9\x72\x8E\x2F\xCC\xBB\xAE\xDB\xE4\x15\x52\x19\x07\x33\xFB\x6A\xB7\x2D\x4B\x90\x28\x82\x73\xFE\x18\x8B\x35\x8D\xDB\xA7\x04\x6A\xBE\xEA\xC1\x4D\x36\x3B\x16\x36\x91\x32\xEF\xB6\x40\x89\x91\x43\xE0\xF2\xA2\xAB\x04\x2E\xE6\xF2\x4C\x0E\x16\x34\x20\xAC\x87\xC1\x2D\x7E\xC9\x66\x47\x17\x14\x11\xA4\xF3\xF7\xA1\x24\x89\xAB\xD8\x1A\xC8\xA1\x5C\xB1\xA3\xF7\x8C\x6D\xC8\x01\xC9\x4F\xC9\xEC\xC4\xFC\xAC\x51\x33\xD1\xC8\x83\xD1\xC9\x9F\x1D\xD4\x47\x34\x29\x3E\xCB\xB0\x0E\xFA\x83\x0B\x28\x58\xE5\x29\xDC\x3F\x7C\xA8\x9F\xC9\xB6\x0A\xBB\xA6\xE8\x46\x16\x0F\x96\xE5\x7B\xE4\x6A\x7A\x48\x6D\x76\x98\x05\xA5\xDC\x6D\x1E\x42\x1E\x42\xDA\x1A\xE0\x52\xF7\xB5\x83\xC0\x1A\x7B\x78\x35\x2C\x38\xF5\x1F\xFD\x49\xA3\x2E\xD2\x59\x63\xBF\x80\xB0\x8C\x93\x73\xCB\x35\xA6\x99\x95\x22\x61\x65\x03\x60\xFB\x2F\x93\x4B\xFA\x9A\x9C\x80\x3B\x02\x03\x01\x00\x01\xA3\x63\x30\x61\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\xB6\xA7\x97\x82\x3D\x74\x85\x9B\xF7\x3C\x9F\x93\x9A\x95\x79\x75\x52\x8C\x6D\x47\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\xB6\xA7\x97\x82\x3D\x74\x85\x9B\xF7\x3C\x9F\x93\x9A\x95\x79\x75\x52\x8C\x6D\x47\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x0C\x05\x00\x03\x82\x02\x01\x00\xA8\xCC\x61\xA6\xBE\x75\x9E\x15\x50\xA4\x6B\xFB\xA8\x70\x45\x7C\xBA\x7E\xB1\x5A\xFC\x5B\x23\xFA\x0A\x77\xF8\x98\x71\x82\x0C\x6D\xE0\x5E\x46\xAA\x93\xF4\x1E\xA0\xC3\xE1\x93\xDB\x4B\xAD\xB2\xA6\x5D\xAB\xB0\xD4\x62\xCB\x5E\xBB\x66\xF5\x2D\xEE\x97\x40\x3C\x62\xEB\x5E\xD6\x14\xD6\x8C\xE2\x96\x8B\x41\x69\x93\x35\xE6\xB9\x99\x6B\x62\xB4\xA1\x17\x66\x34\xA6\x6B\x63\xC6\xB9\x4E\xF2\x22\xE9\x58\x0D\x56\x41\xD1\xFA\x0C\x4A\xF0\x33\xCD\x3B\xBB\x6D\x21\x3A\xAE\x8E\x72\xB5\xC3\x4A\xFB\xE9\x7D\xE5\xB1\x9B\x86\xEE\xE2\xE0\x7D\xB4\xF7\x32\xFD\x22\x84\xF1\x85\xC9\x37\x79\xE9\xB5\x3F\xBF\x5C\xE4\x74\xB2\x8F\x11\x62\x00\xDD\x18\x66\xA1\xD9\x7B\x23\x5F\xF1\x8E\xD5\x67\xE8\x54\xDA\x5B\x3A\x6B\x36\x6F\xF9\x81\xB1\x33\x47\x33\x77\x40\xF9\x52\xAA\xDD\xD4\x83\xCF\x85\x78\x99\x9A\x93\xB9\x73\x67\x42\x46\x11\x21\xEA\xFE\x0A\xA9\x1B\x1A\x65\x69\xB3\x8F\xAE\x16\xB6\xF6\x4B\x56\xB2\x2D\xF9\xA5\xC8\xEC\x3B\x62\xA3\xED\x6B\xD0\x4E\xD5\x40\x09\xA4\x1F\x98\xD7\x3A\xA5\x92\x59\x20\xE4\xB0\x7D\xCD\x5B\x73\x68\xBD\x6D\xC4\xA2\x13\x0E\x67\x19\xB8\x8D\x42\x7E\x6C\x0C\x9A\x6E\xA0\x24\x2D\xD5\x45\x1B\xDC\xC4\x02\x14\xFE\x85\x5B\x65\x97\xCA\x4E\x90\x50\x08\x7A\x42\x35\xF9\xEA\xC2\x66\xD4\xF8\x01\xAE\x1E\xB4\xBE\xC3\xA8\xEF\xFE\x76\x9A\xA2\xA6\x1F\x46\xF6\x84\xED\xFC\xDB\xCE\xC4\x02\xCE\x77\x48\x2C\x8C\xB2\xEC\xC3\x00\xA3\xEC\x2C\x55\x18\xC1\x7E\x19\xEE\xE1\x2F\xF2\xAD\x83\x9B\x9E\xAB\x19\xDF\xC6\x8A\x2F\x8C\x77\xE5\xB7\x05\xEC\x3B\xC1\xEC\xBE\x86\xB3\x86\xBC\xC0\xF7\xDC\xE7\xEA\x5B\xAE\xB2\xCC\xB5\x35\x86\x4B\xD0\xE2\x3F\xB6\xD8\xF8\x0E\x00\xEE\x5D\xE3\xF7\x8D\x58\xFF\xCF\x8B\x37\xE9\x63\x5F\x6E\xF7\x09\x71\x36\xC2\x12\x5D\x57\xF2\xC8\xB4\xCD\xF3\xEE\x02\xDF\x11\xDC\x6A\xB9\x57\x84\x1D\x59\x4D\x8C\xCE\xC8\x0E\x23\xC2\xB7\x26\x9A\x10\x14\x71\xFE\x93\xB2\x8A\xB8\x80\xF0\x0E\x10\x9E\xD3\xA8\x50\x0C\x37\x82\x2F\xEA\xE0\x8A\x9D\xE1\x2C\x39\xFF\xB5\xB4\x73\x00\xE4\xF7\x48\xA6\x73\xAC\xBF\xB2\xDE\x77\x04\x87\xB4\xA3\xCD\x9B\x35\x24\x37\xFA\x90\x93\x13\x81\x42\xC6\x98\x26\x75\x37\x66\x41\x10\xAC\xBB\xF5\x94\xE3\xC2\x31\x2B\xAD\xE7\x23\x56\xCC\x35\x25\x92\xB3\x50", + ["CN=FIRMAPROFESIONAL CA ROOT-A WEB,organizationIdentifier=VATES-A62634068,O=Firmaprofesional SA,C=ES"] = "\x30\x82\x02\x7A\x30\x82\x02\x00\xA0\x03\x02\x01\x02\x02\x10\x31\x97\x21\xED\xAF\x89\x42\x7F\x35\x41\x87\xA1\x67\x56\x4C\x6D\x30\x0A\x06\x08\x2A\x86\x48\xCE\x3D\x04\x03\x03\x30\x6E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x53\x31\x1C\x30\x1A\x06\x03\x55\x04\x0A\x0C\x13\x46\x69\x72\x6D\x61\x70\x72\x6F\x66\x65\x73\x69\x6F\x6E\x61\x6C\x20\x53\x41\x31\x18\x30\x16\x06\x03\x55\x04\x61\x0C\x0F\x56\x41\x54\x45\x53\x2D\x41\x36\x32\x36\x33\x34\x30\x36\x38\x31\x27\x30\x25\x06\x03\x55\x04\x03\x0C\x1E\x46\x49\x52\x4D\x41\x50\x52\x4F\x46\x45\x53\x49\x4F\x4E\x41\x4C\x20\x43\x41\x20\x52\x4F\x4F\x54\x2D\x41\x20\x57\x45\x42\x30\x1E\x17\x0D\x32\x32\x30\x34\x30\x36\x30\x39\x30\x31\x33\x36\x5A\x17\x0D\x34\x37\x30\x33\x33\x31\x30\x39\x30\x31\x33\x36\x5A\x30\x6E\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x45\x53\x31\x1C\x30\x1A\x06\x03\x55\x04\x0A\x0C\x13\x46\x69\x72\x6D\x61\x70\x72\x6F\x66\x65\x73\x69\x6F\x6E\x61\x6C\x20\x53\x41\x31\x18\x30\x16\x06\x03\x55\x04\x61\x0C\x0F\x56\x41\x54\x45\x53\x2D\x41\x36\x32\x36\x33\x34\x30\x36\x38\x31\x27\x30\x25\x06\x03\x55\x04\x03\x0C\x1E\x46\x49\x52\x4D\x41\x50\x52\x4F\x46\x45\x53\x49\x4F\x4E\x41\x4C\x20\x43\x41\x20\x52\x4F\x4F\x54\x2D\x41\x20\x57\x45\x42\x30\x76\x30\x10\x06\x07\x2A\x86\x48\xCE\x3D\x02\x01\x06\x05\x2B\x81\x04\x00\x22\x03\x62\x00\x04\x47\x53\xEA\x2C\x11\xA4\x77\xC7\x2A\xEA\xF3\xD6\x5F\x7B\xD3\x04\x91\x5C\xFA\x88\xC6\x22\xB9\x83\x10\x62\x77\x84\x33\x2D\xE9\x03\x88\xD4\xE0\x33\xF7\xED\x77\x2C\x4A\x60\xEA\xE4\x6F\xAD\x6D\xB4\xF8\x4C\x8A\xA4\xE4\x1F\xCA\xEA\x4F\x38\x4A\x2E\x82\x73\x2B\xC7\x66\x9B\x0A\x8C\x40\x9C\x7C\x8A\xF6\xF2\x39\x60\xB2\xDE\xCB\xEC\xB8\xE4\x6F\xEA\x9B\x5D\xB7\x53\x90\x18\x32\x55\xC5\x20\xB7\x94\xA3\x63\x30\x61\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1F\x06\x03\x55\x1D\x23\x04\x18\x30\x16\x80\x14\x93\xE1\x43\x63\x5C\x3C\x9D\xD6\x27\xF3\x52\xEC\x17\xB2\xA9\xAF\x2C\xF7\x76\xF8\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x93\xE1\x43\x63\x5C\x3C\x9D\xD6\x27\xF3\x52\xEC\x17\xB2\xA9\xAF\x2C\xF7\x76\xF8\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0A\x06\x08\x2A\x86\x48\xCE\x3D\x04\x03\x03\x03\x68\x00\x30\x65\x02\x30\x1D\x7C\xA4\x7B\xC3\x89\x75\x33\xE1\x3B\xA9\x45\xBF\x46\xE9\xE9\xA1\xDD\xC9\x22\x16\xB7\x47\x11\x0B\xD8\x9A\xBA\xF1\xC8\x0B\x70\x50\x53\x02\x91\x70\x85\x59\xA9\x1E\xA4\xE6\xEA\x23\x31\xA0\x00\x02\x31\x00\xFD\xE2\xF8\xB3\xAF\x16\xB9\x1E\x73\xC4\x96\xE3\xC1\x30\x19\xD8\x7E\xE6\xC3\x97\xDE\x1C\x4F\xB8\x89\x2F\x33\xEB\x48\x0F\x19\xF7\x87\x46\x5D\x26\x90\xA5\x85\xC5\xB9\x7A\x94\x3E\x87\xA8\xBD\x00", }; diff --git a/scripts/policy/frameworks/management/controller/config.zeek b/scripts/policy/frameworks/management/controller/config.zeek index 0b3d50b4ab..9fe10edaa6 100644 --- a/scripts/policy/frameworks/management/controller/config.zeek +++ b/scripts/policy/frameworks/management/controller/config.zeek @@ -65,13 +65,11 @@ export { ## cluster nodes that need them and don't have them explicitly specified ## in cluster configurations. const auto_assign_broker_ports = T &redef; - const auto_assign_ports = T &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_ports."; ## The TCP start port to use for auto-assigning cluster node listening ## ports, if :zeek:see:`Management::Controller::auto_assign_broker_ports` is ## enabled (the default) and nodes don't come with those ports assigned. const auto_assign_broker_start_port = 2200/tcp &redef; - const auto_assign_start_port = 2200/tcp &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_start_port."; ## Whether the controller should auto-assign metrics ports for Prometheus ## to nodes that need them and don't have them explicitly specified in diff --git a/scripts/policy/frameworks/management/controller/main.zeek b/scripts/policy/frameworks/management/controller/main.zeek index fd7df8343d..efc603e227 100644 --- a/scripts/policy/frameworks/management/controller/main.zeek +++ b/scripts/policy/frameworks/management/controller/main.zeek @@ -335,12 +335,6 @@ function config_assign_broker_ports(config: Management::Configuration) # instances. local start_port = Management::Controller::auto_assign_broker_start_port; -@pragma push ignore-deprecations - # Keep deprecated config setting working until 7.1: - if ( Management::Controller::auto_assign_start_port != 2200/tcp ) - start_port = Management::Controller::auto_assign_start_port; -@pragma pop ignore-deprecations - local p = port_to_count(start_port); # A set that tracks the ports we've used so far. Helpful for avoiding @@ -613,17 +607,10 @@ function config_validate(config: Management::Configuration, # ports. Verify this both for Broker's ports and the metrics export # ones. -@pragma push ignore-deprecations - # Keep deprecated config setting working until 7.1: - local auto_broker_ports = Management::Controller::auto_assign_broker_ports; - if ( ! Management::Controller::auto_assign_ports ) - auto_broker_ports = F; -@pragma pop ignore-deprecations - local nodes: vector of string; local nodes_str: string; - if ( ! auto_broker_ports ) + if ( ! Management::Controller::auto_assign_broker_ports ) { nodes = config_nodes_lacking_broker_ports(config); @@ -1042,17 +1029,10 @@ event Management::Controller::API::stage_configuration_request(reqid: string, co g_configs[STAGED] = config; config_copy = copy(config); -@pragma push ignore-deprecations - # Keep deprecated config setting working until 7.1: - local auto_broker_ports = Management::Controller::auto_assign_broker_ports; - if ( ! Management::Controller::auto_assign_ports ) - auto_broker_ports = F; - - if ( auto_broker_ports ) + if ( Management::Controller::auto_assign_broker_ports ) config_assign_broker_ports(config_copy); if ( Management::Controller::auto_assign_metrics_ports ) config_assign_metrics_ports(config_copy); -@pragma pop ignore-deprecations g_configs[READY] = config_copy; diff --git a/scripts/policy/frameworks/telemetry/prometheus.zeek b/scripts/policy/frameworks/telemetry/prometheus.zeek deleted file mode 100644 index a7d0226d73..0000000000 --- a/scripts/policy/frameworks/telemetry/prometheus.zeek +++ /dev/null @@ -1,2 +0,0 @@ -@deprecated "Remove in v7.1: Cluster nodes now implicitly listen on metrics port if set in cluster-layout." -@load base/frameworks/telemetry diff --git a/scripts/policy/misc/load-balancing.zeek b/scripts/policy/misc/load-balancing.zeek deleted file mode 100644 index f53e65c494..0000000000 --- a/scripts/policy/misc/load-balancing.zeek +++ /dev/null @@ -1,117 +0,0 @@ -##! This script implements the "Zeek side" of several load balancing -##! approaches for Zeek clusters. - -@deprecated "Remove in v7.1. This script has not seen extensions for the past 10 years and is not at all recommended to use for packet load balancing purposes. On Linux, AF_PACKET is recommended and works out of the box. On FreeBSD, there is Netmap with lb. Otherwise, NIC specific packet sources and approaches exist that handle the load balancing." - -@pragma push ignore-deprecations - -@load base/frameworks/cluster -@load base/frameworks/packet-filter - -module LoadBalancing; - -export { - - type Method: enum { - ## Apply BPF filters to each worker in a way that causes them to - ## automatically flow balance traffic between them. - AUTO_BPF, - }; - - ## Defines the method of load balancing to use. - const method = AUTO_BPF &redef; - - redef record Cluster::Node += { - ## A BPF filter for load balancing traffic sniffed on a single - ## interface across a number of processes. In normal uses, this - ## will be assigned dynamically by the manager and installed by - ## the workers. - lb_filter: string &optional; - }; -} - -@if ( Cluster::is_enabled() ) - -event zeek_init() &priority=5 - { - if ( method != AUTO_BPF ) - return; - - local worker_ip_interface: table[addr, string] of count = table(); - local sorted_node_names: vector of string = vector(); - local node: Cluster::Node; - local name: string; - - # Sort nodes list so that every node iterates over it in same order. - for ( name in Cluster::nodes ) - sorted_node_names += name; - - sort(sorted_node_names, strcmp); - - for ( idx in sorted_node_names ) - { - name = sorted_node_names[idx]; - node = Cluster::nodes[name]; - - if ( node$node_type != Cluster::WORKER ) - next; - - if ( ! node?$interface ) - next; - - if ( [node$ip, node$interface] !in worker_ip_interface ) - worker_ip_interface[node$ip, node$interface] = 0; - - ++worker_ip_interface[node$ip, node$interface]; - } - - # Now that we've counted up how many processes are running per - # interface, let's create the filters for each worker. - local lb_proc_track: table[addr, string] of count = table(); - - for ( idx in sorted_node_names ) - { - name = sorted_node_names[idx]; - node = Cluster::nodes[name]; - - if ( node$node_type != Cluster::WORKER ) - next; - - if ( ! node?$interface ) - next; - - if ( [node$ip, node$interface] !in worker_ip_interface ) - next; - - if ( [node$ip, node$interface] !in lb_proc_track ) - lb_proc_track[node$ip, node$interface] = 0; - - local this_lb_proc = lb_proc_track[node$ip, node$interface]; - local total_lb_procs = worker_ip_interface[node$ip, node$interface]; - ++lb_proc_track[node$ip, node$interface]; - - if ( total_lb_procs > 1 ) - node$lb_filter = PacketFilter::sampling_filter(total_lb_procs, - this_lb_proc); - } - - # Finally, install filter for the current node if it needs one. - for ( idx in sorted_node_names ) - { - name = sorted_node_names[idx]; - node = Cluster::nodes[name]; - - if ( name != Cluster::node ) - next; - - if ( ! node?$lb_filter ) - next; - - restrict_filters["lb_filter"] = node$lb_filter; - PacketFilter::install(); - } - } - -@endif - -@pragma pop diff --git a/scripts/policy/tuning/__load__.zeek b/scripts/policy/tuning/__load__.zeek deleted file mode 100644 index db9fe9a572..0000000000 --- a/scripts/policy/tuning/__load__.zeek +++ /dev/null @@ -1,2 +0,0 @@ -##! This loads the default tuning -@load ./defaults \ No newline at end of file diff --git a/scripts/policy/tuning/defaults/README b/scripts/policy/tuning/defaults/README deleted file mode 100644 index d5417588c9..0000000000 --- a/scripts/policy/tuning/defaults/README +++ /dev/null @@ -1,2 +0,0 @@ -Sets various defaults, and prints warning messages to stdout under -certain conditions. diff --git a/scripts/policy/tuning/defaults/__load__.zeek b/scripts/policy/tuning/defaults/__load__.zeek deleted file mode 100644 index 09bacaace3..0000000000 --- a/scripts/policy/tuning/defaults/__load__.zeek +++ /dev/null @@ -1 +0,0 @@ -@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; diff --git a/scripts/policy/tuning/defaults/extracted_file_limits.zeek b/scripts/policy/tuning/defaults/extracted_file_limits.zeek deleted file mode 100644 index 09bacaace3..0000000000 --- a/scripts/policy/tuning/defaults/extracted_file_limits.zeek +++ /dev/null @@ -1 +0,0 @@ -@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; diff --git a/scripts/policy/tuning/defaults/packet-fragments.zeek b/scripts/policy/tuning/defaults/packet-fragments.zeek deleted file mode 100644 index 09bacaace3..0000000000 --- a/scripts/policy/tuning/defaults/packet-fragments.zeek +++ /dev/null @@ -1 +0,0 @@ -@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; diff --git a/scripts/policy/tuning/defaults/warnings.zeek b/scripts/policy/tuning/defaults/warnings.zeek deleted file mode 100644 index 09bacaace3..0000000000 --- a/scripts/policy/tuning/defaults/warnings.zeek +++ /dev/null @@ -1 +0,0 @@ -@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; diff --git a/scripts/spicy/zeek_rt.hlt b/scripts/spicy/zeek_rt.hlt index 3f4dd28adc..801c49a9b1 100644 --- a/scripts/spicy/zeek_rt.hlt +++ b/scripts/spicy/zeek_rt.hlt @@ -18,7 +18,7 @@ type ZeekTypeTag = enum { } &cxxname="::zeek::spicy::rt::ZeekTypeTag"; declare public void register_spicy_module_begin(string name, string description) &cxxname="zeek::spicy::rt::register_spicy_module_begin"; -declare public void register_protocol_analyzer(string name, hilti::Protocol protocol, vector ports, string parser_orig, string parser_resp, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_protocol_analyzer" &have_prototype; +declare public void register_protocol_analyzer(string name, hilti::Protocol protocol, string parser_orig, string parser_resp, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_protocol_analyzer" &have_prototype; declare public void register_file_analyzer(string name, vector mime_types, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_file_analyzer" &have_prototype; declare public void register_packet_analyzer(string name, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_packet_analyzer" &have_prototype; declare public void register_type(string ns, string id, BroType t) &cxxname="zeek::spicy::rt::register_type" &have_prototype; diff --git a/scripts/test-all-policy.zeek b/scripts/test-all-policy.zeek index cf75ed5aa0..e0da69a8e1 100644 --- a/scripts/test-all-policy.zeek +++ b/scripts/test-all-policy.zeek @@ -78,7 +78,6 @@ # @load frameworks/spicy/record-spicy-batch.zeek # @load frameworks/spicy/resource-usage.zeek @load frameworks/software/windows-version-detection.zeek -@load frameworks/telemetry/prometheus.zeek @load frameworks/telemetry/log.zeek @load integration/collective-intel/__load__.zeek @load integration/collective-intel/main.zeek @@ -86,7 +85,6 @@ @load misc/detect-traceroute/__load__.zeek @load misc/detect-traceroute/main.zeek # @load misc/dump-events.zeek -@load misc/load-balancing.zeek @load misc/loaded-scripts.zeek @load misc/profiling.zeek @load misc/stats.zeek @@ -142,10 +140,5 @@ @load protocols/ssl/validate-ocsp.zeek @load protocols/ssl/validate-sct.zeek @load protocols/ssl/weak-keys.zeek -@load tuning/__load__.zeek -@load tuning/defaults/__load__.zeek -@load tuning/defaults/extracted_file_limits.zeek -@load tuning/defaults/packet-fragments.zeek -@load tuning/defaults/warnings.zeek @load tuning/json-logs.zeek @load tuning/track-all-assets.zeek diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c7ae4f183c..6920e42a01 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -636,6 +636,8 @@ install( # https://gitlab.kitware.com/cmake/cmake/-/issues/17122 Exclude the ones that # this affects explicitly. PATTERN "script_opt/CPP/maint" EXCLUDE + PATTERN "script_opt/ZAM/maint" EXCLUDE + PATTERN "script_opt/ZAM/OPs" EXCLUDE PATTERN "fuzzers/corpora" EXCLUDE) install( diff --git a/src/Conn.cc b/src/Conn.cc index 8510dc192b..2e53c8184f 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -258,15 +258,6 @@ analyzer::Analyzer* Connection::FindAnalyzer(const zeek::Tag& tag) { analyzer::Analyzer* Connection::FindAnalyzer(const char* name) { return adapter->FindChild(name); } -void Connection::AppendAddl(const char* str) { - const auto& cv = GetVal(); - - const char* old = cv->GetFieldAs(6)->CheckString(); - const char* format = *old ? "%s %s" : "%s%s"; - - cv->Assign(6, util::fmt(format, old, str)); -} - void Connection::Match(detail::Rule::PatternType type, const u_char* data, int len, bool is_orig, bool bol, bool eol, bool clear_state) { if ( primary_PIA ) diff --git a/src/Conn.h b/src/Conn.h index f6ec77e59a..67f41dc68d 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -145,12 +145,6 @@ public: */ const RecordValPtr& GetVal() override; - /** - * Append additional entries to the history field in the connection record. - */ - [[deprecated("Remove in v7.1 - Appears unused and named rough. Use CheckHistory() or AddHistory() instead.")]] void - AppendAddl(const char* str); - void Match(detail::Rule::PatternType type, const u_char* data, int len, bool is_orig, bool bol, bool eol, bool clear_state); diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index a707d8ead0..33da7dd0ae 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -45,6 +45,7 @@ using ztd::out_ptr::out_ptr; #include "zeek/Val.h" #include "zeek/ZeekString.h" #include "zeek/iosource/Manager.h" +#include "zeek/telemetry/Manager.h" // Number of seconds we'll wait for a reply. constexpr int DNS_TIMEOUT = 5; @@ -545,6 +546,55 @@ void DNS_Mgr::InitSource() { } void DNS_Mgr::InitPostScript() { + num_requests_metric = + telemetry_mgr->CounterInstance("zeek", "dnsmgr_requests", {}, "Total number of requests through DNS_Mgr"); + successful_metric = telemetry_mgr->CounterInstance("zeek", "dnsmgr_successful_requests", {}, + "Total number of successful requests through DNS_Mgr"); + failed_metric = telemetry_mgr->CounterInstance("zeek", "dnsmgr_failed_requests", {}, + "Total number of failed requests through DNS_Mgr"); + asyncs_pending_metric = telemetry_mgr->GaugeInstance("zeek", "dnsmgr_pending_asyncs_requests", {}, + "Number of pending async requests through DNS_Mgr"); + + cached_hosts_metric = + telemetry_mgr->GaugeInstance("zeek", "dnsmgr_cache_entries", {{"type", "host"}}, + "Number of cached hosts in DNS_Mgr", "", []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = 0; + + if ( dns_mgr ) { + dns_mgr->UpdateCachedStats(false); + metric.gauge.value = static_cast(dns_mgr->last_cached_stats.hosts); + } + return metric; + }); + + cached_addresses_metric = + telemetry_mgr->GaugeInstance("zeek", "dnsmgr_cache_entries", {{"type", "address"}}, + "Number of cached addresses in DNS_Mgr", "", []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = 0; + + if ( dns_mgr ) { + dns_mgr->UpdateCachedStats(false); + metric.gauge.value = + static_cast(dns_mgr->last_cached_stats.addresses); + } + return metric; + }); + + cached_texts_metric = + telemetry_mgr->GaugeInstance("zeek", "dnsmgr_cache_entries", {{"type", "text"}}, + "Number of cached texts in DNS_Mgr", "", []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = 0; + + if ( dns_mgr ) { + dns_mgr->UpdateCachedStats(false); + metric.gauge.value = static_cast(dns_mgr->last_cached_stats.texts); + } + return metric; + }); + if ( ! doctest::is_running_in_test ) { dm_rec = id::find_type("dns_mapping"); @@ -1158,7 +1208,7 @@ void DNS_Mgr::IssueAsyncRequests() { AsyncRequest* req = asyncs_queued.front(); asyncs_queued.pop_front(); - ++num_requests; + num_requests_metric->Inc(); req->time = util::current_time(); if ( req->type == T_PTR ) @@ -1173,6 +1223,7 @@ void DNS_Mgr::IssueAsyncRequests() { dns_req->MakeRequest(channel, this); ++asyncs_pending; + asyncs_pending_metric->Inc(); } } @@ -1182,11 +1233,11 @@ void DNS_Mgr::CheckAsyncHostRequest(const std::string& host, bool timeout) { if ( i != asyncs.end() ) { if ( timeout ) { - ++failed; + failed_metric->Inc(); i->second->Timeout(); } else if ( auto addrs = LookupNameInCache(host, true, false) ) { - ++successful; + successful_metric->Inc(); i->second->Resolved(addrs); } else @@ -1195,6 +1246,7 @@ void DNS_Mgr::CheckAsyncHostRequest(const std::string& host, bool timeout) { delete i->second; asyncs.erase(i); --asyncs_pending; + asyncs_pending_metric->Dec(); } } @@ -1207,11 +1259,11 @@ void DNS_Mgr::CheckAsyncAddrRequest(const IPAddr& addr, bool timeout) { if ( i != asyncs.end() ) { if ( timeout ) { - ++failed; + failed_metric->Inc(); i->second->Timeout(); } else if ( auto name = LookupAddrInCache(addr, true, false) ) { - ++successful; + successful_metric->Inc(); i->second->Resolved(name->CheckString()); } else @@ -1220,6 +1272,7 @@ void DNS_Mgr::CheckAsyncAddrRequest(const IPAddr& addr, bool timeout) { delete i->second; asyncs.erase(i); --asyncs_pending; + asyncs_pending_metric->Dec(); } } @@ -1229,11 +1282,11 @@ void DNS_Mgr::CheckAsyncOtherRequest(const std::string& host, bool timeout, int auto i = asyncs.find(std::make_pair(request_type, host)); if ( i != asyncs.end() ) { if ( timeout ) { - ++failed; + failed_metric->Inc(); i->second->Timeout(); } else if ( auto name = LookupOtherInCache(host, request_type, true) ) { - ++successful; + successful_metric->Inc(); i->second->Resolved(name->CheckString()); } else @@ -1242,6 +1295,7 @@ void DNS_Mgr::CheckAsyncOtherRequest(const std::string& host, bool timeout, int delete i->second; asyncs.erase(i); --asyncs_pending; + asyncs_pending_metric->Dec(); } } @@ -1293,26 +1347,35 @@ void DNS_Mgr::Process() { ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD); } +void DNS_Mgr::UpdateCachedStats(bool force) { + double now = util::current_time(); + if ( force || last_cached_stats_update < now - 0.01 ) { + last_cached_stats.hosts = 0; + last_cached_stats.addresses = 0; + last_cached_stats.texts = 0; + last_cached_stats.total = all_mappings.size(); + + for ( const auto& [key, mapping] : all_mappings ) { + if ( mapping->ReqType() == T_PTR ) + last_cached_stats.addresses++; + else if ( mapping->ReqType() == T_A ) + last_cached_stats.hosts++; + else + last_cached_stats.texts++; + } + + last_cached_stats_update = now; + } +} + void DNS_Mgr::GetStats(Stats* stats) { - // TODO: can this use the telemetry framework? - stats->requests = num_requests; - stats->successful = successful; - stats->failed = failed; + stats->requests = static_cast(num_requests_metric->Value()); + stats->successful = static_cast(successful_metric->Value()); + stats->failed = static_cast(failed_metric->Value()); stats->pending = asyncs_pending; - stats->cached_hosts = 0; - stats->cached_addresses = 0; - stats->cached_texts = 0; - stats->cached_total = all_mappings.size(); - - for ( const auto& [key, mapping] : all_mappings ) { - if ( mapping->ReqType() == T_PTR ) - stats->cached_addresses++; - else if ( mapping->ReqType() == T_A ) - stats->cached_hosts++; - else - stats->cached_texts++; - } + UpdateCachedStats(true); + stats->cached = last_cached_stats; } void DNS_Mgr::AsyncRequest::Resolved(const std::string& name) { diff --git a/src/DNS_Mgr.h b/src/DNS_Mgr.h index 5d0f9a84b7..7e063b28a3 100644 --- a/src/DNS_Mgr.h +++ b/src/DNS_Mgr.h @@ -42,6 +42,13 @@ using TableValPtr = IntrusivePtr; using StringValPtr = IntrusivePtr; using RecordValPtr = IntrusivePtr; +namespace telemetry { +class Gauge; +class Counter; +using GaugePtr = std::shared_ptr; +using CounterPtr = std::shared_ptr; +} // namespace telemetry + } // namespace zeek namespace zeek::detail { @@ -198,15 +205,19 @@ public: */ bool Save(); + struct CachedStats { + unsigned long hosts; + unsigned long addresses; + unsigned long texts; + unsigned long total; + }; + struct Stats { unsigned long requests; // These count only async requests. unsigned long successful; unsigned long failed; unsigned long pending; - unsigned long cached_hosts; - unsigned long cached_addresses; - unsigned long cached_texts; - unsigned long cached_total; + CachedStats cached; }; /** @@ -285,6 +296,8 @@ protected: const char* Tag() override { return "DNS_Mgr"; } double GetNextTimeout() override; + void UpdateCachedStats(bool force); + DNS_MgrMode mode; MappingMap all_mappings; @@ -293,7 +306,6 @@ protected: std::string dir; // directory in which cache_name resides bool did_init = false; - int asyncs_pending = 0; RecordTypePtr dm_rec; @@ -327,9 +339,19 @@ protected: using QueuedList = std::list; QueuedList asyncs_queued; - unsigned long num_requests = 0; - unsigned long successful = 0; - unsigned long failed = 0; + telemetry::CounterPtr num_requests_metric; + telemetry::CounterPtr successful_metric; + telemetry::CounterPtr failed_metric; + telemetry::GaugePtr asyncs_pending_metric; + + telemetry::GaugePtr cached_hosts_metric; + telemetry::GaugePtr cached_addresses_metric; + telemetry::GaugePtr cached_texts_metric; + + double last_cached_stats_update = 0; + CachedStats last_cached_stats; + + int asyncs_pending = 0; std::set socket_fds; std::set write_socket_fds; diff --git a/src/EventHandler.h b/src/EventHandler.h index ef647a7c6e..0fbd5c1282 100644 --- a/src/EventHandler.h +++ b/src/EventHandler.h @@ -44,13 +44,6 @@ public: // Returns true if there is at least one local or remote handler. explicit operator bool() const; - [[deprecated("Remove in v7.1 - Unused event handlers are now found via UsageAnalyzer.")]] void SetUsed() { - used = true; - } - [[deprecated("Remove in v7.1 - Unused event handlers are now found via UsageAnalyzer.")]] bool Used() const { - return used; - } - // Handlers marked as error handlers will not be called recursively to // avoid infinite loops if they trigger a similar error themselves. void SetErrorHandler() { error_handler = true; } diff --git a/src/EventRegistry.cc b/src/EventRegistry.cc index 93a5844331..b166546276 100644 --- a/src/EventRegistry.cc +++ b/src/EventRegistry.cc @@ -21,23 +21,12 @@ EventHandlerPtr EventRegistry::Register(std::string_view name, bool is_from_scri if ( ! is_from_script ) not_only_from_script.insert(std::string(name)); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - // Remove in v7.1 - h->SetUsed(); -#pragma GCC diagnostic pop return h; } h = new EventHandler(std::string(name)); event_registry->Register(h, is_from_script); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - // Remove in v7.1 - h->SetUsed(); -#pragma GCC diagnostic pop - return h; } @@ -74,36 +63,6 @@ EventRegistry::string_list EventRegistry::Match(RE_Matcher* pattern) { return names; } -EventRegistry::string_list EventRegistry::UnusedHandlers() { - string_list names; - - for ( const auto& entry : handlers ) { - EventHandler* v = entry.second.get(); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - if ( v->GetFunc() && ! v->Used() ) - names.push_back(entry.first); -#pragma GCC diagnostic pop - } - - return names; -} - -EventRegistry::string_list EventRegistry::UsedHandlers() { - string_list names; - - for ( const auto& entry : handlers ) { - EventHandler* v = entry.second.get(); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - if ( v->GetFunc() && v->Used() ) - names.push_back(entry.first); -#pragma GCC diagnostic pop - } - - return names; -} - EventRegistry::string_list EventRegistry::AllHandlers() { string_list names; diff --git a/src/EventRegistry.h b/src/EventRegistry.h index eb9b9b0026..cbe9c622bf 100644 --- a/src/EventRegistry.h +++ b/src/EventRegistry.h @@ -69,8 +69,6 @@ public: // themselves. void SetErrorHandler(std::string_view name); - [[deprecated("Remove in v7.1 - Unused handlers are now found via UsageAnalyzer.")]] string_list UnusedHandlers(); - [[deprecated("Remove in v7.1 - UsedHandlers() is unreliable - use AllHandlers().")]] string_list UsedHandlers(); string_list AllHandlers(); void PrintDebug(); diff --git a/src/Expr.cc b/src/Expr.cc index abbb07a3d4..3147906b92 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -414,13 +414,6 @@ NameExpr::NameExpr(IDPtr arg_id, bool const_init) : Expr(EXPR_NAME), id(std::mov SetType(make_intrusive(id->GetType())); else SetType(id->GetType()); - - EventHandler* h = event_registry->Lookup(id->Name()); - if ( h ) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - h->SetUsed(); -#pragma GCC diagnostic pop } bool NameExpr::CanDel() const { @@ -4417,11 +4410,6 @@ EventExpr::EventExpr(const char* arg_name, ListExprPtr arg_args) event_registry->Register(h, true); } -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - h->SetUsed(); -#pragma GCC diagnostic pop - handler = h; if ( args->IsError() ) { diff --git a/src/Func.cc b/src/Func.cc index 87783a30a9..85d184295d 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -434,7 +434,7 @@ ValPtr ScriptFunc::Invoke(zeek::Args* args, Frame* parent) const { // Warn if the function returns something, but we returned from // the function without an explicit return, or without a value. - else if ( GetType()->Yield() && GetType()->Yield()->Tag() != TYPE_VOID && + else if ( GetType()->Yield() && GetType()->Yield()->Tag() != TYPE_VOID && ! GetType()->ExpressionlessReturnOkay() && (flow != FLOW_RETURN /* we fell off the end */ || ! result /* explicit return with no result */) && ! f->HasDelayed() ) reporter->Warning("non-void function returning without a value: %s", Name()); diff --git a/src/NetVar.cc b/src/NetVar.cc index 67e078e4d6..1b97300858 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -182,8 +182,6 @@ int dpd_match_only_beginning; int dpd_late_match_stop; int dpd_ignore_ports; -int check_for_unused_event_handlers; - int record_all_packets; zeek_uint_t bits_per_uid; @@ -229,7 +227,6 @@ void init_general_global_var() { table_incremental_step = id::find_val("table_incremental_step")->AsCount(); packet_filter_default = id::find_val("packet_filter_default")->AsBool(); sig_max_group_size = id::find_val("sig_max_group_size")->AsCount(); - check_for_unused_event_handlers = id::find_val("check_for_unused_event_handlers")->AsBool(); record_all_packets = id::find_val("record_all_packets")->AsBool(); bits_per_uid = id::find_val("bits_per_uid")->AsCount(); } diff --git a/src/NetVar.h b/src/NetVar.h index 8d628c1ff9..8f7319f30f 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -85,8 +85,6 @@ extern int dpd_match_only_beginning; extern int dpd_late_match_stop; extern int dpd_ignore_ports; -extern int check_for_unused_event_handlers; - extern int record_all_packets; extern zeek_uint_t bits_per_uid; diff --git a/src/OpaqueVal.cc b/src/OpaqueVal.cc index 4c36084091..b4dcc25a32 100644 --- a/src/OpaqueVal.cc +++ b/src/OpaqueVal.cc @@ -70,12 +70,6 @@ OpaqueValPtr OpaqueMgr::Instantiate(const std::string& id) const { return x != _types.end() ? (*x->second)() : nullptr; } -broker::expected OpaqueVal::Serialize() const { - if ( auto res = SerializeData() ) - return zeek::detail::BrokerDataAccess::Unbox(*res); - return {broker::make_error(broker::ec::serialization_failed)}; -} - std::optional OpaqueVal::SerializeData() const { auto type = OpaqueMgr::mgr()->TypeID(this); @@ -89,8 +83,6 @@ std::optional OpaqueVal::SerializeData() const { return std::move(builder).Build(); } -OpaqueValPtr OpaqueVal::Unserialize(const broker::data& data) { return UnserializeData(BrokerDataView(&data)); } - OpaqueValPtr OpaqueVal::UnserializeData(BrokerDataView data) { if ( ! data.IsList() ) return nullptr; @@ -114,22 +106,9 @@ OpaqueValPtr OpaqueVal::UnserializeData(BrokerListView v) { return val; } -broker::expected OpaqueVal::DoSerialize() const { - return {broker::make_error(broker::ec::serialization_failed)}; -} +std::optional OpaqueVal::DoSerializeData() const { return std::nullopt; } -std::optional OpaqueVal::DoSerializeData() const { - if ( auto res = DoSerialize() ) { - return BrokerData{std::move(*res)}; - } - return std::nullopt; -} - -bool OpaqueVal::DoUnserialize(const broker::data&) { return false; } - -bool OpaqueVal::DoUnserializeData(BrokerDataView data) { - return DoUnserialize(zeek::detail::BrokerDataAccess::Unbox(data)); -} +bool OpaqueVal::DoUnserializeData(BrokerDataView data) { return false; } std::optional OpaqueVal::SerializeType(const TypePtr& t) { if ( t->InternalType() == TYPE_INTERNAL_ERROR ) diff --git a/src/OpaqueVal.h b/src/OpaqueVal.h index 371e5fad57..ba0a048979 100644 --- a/src/OpaqueVal.h +++ b/src/OpaqueVal.h @@ -91,19 +91,6 @@ private: std::unordered_map _types; }; -/** - * Legacy macro to insert into an OpaqueVal-derived class's declaration. Overrides the "old" serialization methods - * DoSerialize and DoUnserialize. - * @deprecated Use DECLARE_OPAQUE_VALUE_DATA instead. Remove in v7.1. - */ -#define DECLARE_OPAQUE_VALUE(T) \ - friend class zeek::OpaqueMgr::Register; \ - friend zeek::IntrusivePtr zeek::make_intrusive(); \ - broker::expected DoSerialize() const override; \ - bool DoUnserialize(const broker::data& data) override; \ - const char* OpaqueName() const override { return #T; } \ - static zeek::OpaqueValPtr OpaqueInstantiate() { return zeek::make_intrusive(); } - /** * Macro to insert into an OpaqueVal-derived class's declaration. Overrides the "new" serialization methods * DoSerializeData and DoUnserializeData. @@ -132,28 +119,11 @@ public: explicit OpaqueVal(OpaqueTypePtr t); ~OpaqueVal() override = default; - /** - * Serializes the value into a Broker representation. - * - * @return the broker representation, or an error if serialization - * isn't supported or failed. - */ - [[deprecated("Remove in v7.1: use SerializeData instead")]] broker::expected Serialize() const; - /** * @copydoc Serialize */ std::optional SerializeData() const; - /** - * Reinstantiates a value from its serialized Broker representation. - * - * @param data Broker representation as returned by *Serialize()*. - * @return unserialized instances with reference count at +1 - */ - [[deprecated("Remove in v7.1: use UnserializeData instead")]] static OpaqueValPtr Unserialize( - const broker::data& data); - /** * @copydoc Unserialize */ @@ -168,11 +138,6 @@ protected: friend class Val; friend class OpaqueMgr; - /** - * @deprecated Override DoSerializeData instead. Remove in v7.1. - */ - virtual broker::expected DoSerialize() const; - /** * Must be overridden to provide a serialized version of the derived * class' state. @@ -182,11 +147,6 @@ protected: */ virtual std::optional DoSerializeData() const; - /** - * @deprecated Override DoUnserializeData instead. Remove in v7.1. - */ - virtual bool DoUnserialize(const broker::data& data); - /** * Must be overridden to recreate the derived class' state from a * serialization. diff --git a/src/Scope.cc b/src/Scope.cc index 91b806c921..4bebbcdbbf 100644 --- a/src/Scope.cc +++ b/src/Scope.cc @@ -93,11 +93,9 @@ const IDPtr& lookup_ID(const char* name, const char* curr_module, bool no_global bool check_export) { bool explicit_global = zeek::util::starts_with(name, "::"); - // Ad-hoc deprecation if a name starts with "GLOBAL::". In v7.1 we could - // tweak {ID} to reject GLOBAL::, or switch this warning to error instead. - static std::string deprecated_prefix = util::fmt("%s::", GLOBAL_MODULE_NAME); - if ( zeek::util::starts_with(name, deprecated_prefix) ) - reporter->Deprecation(util::fmt("Remove in v7.1: Use :: instead of %s (%s)", deprecated_prefix.c_str(), name)); + static std::string global_prefix = util::fmt("%s::", GLOBAL_MODULE_NAME); + if ( zeek::util::starts_with(name, global_prefix) ) + reporter->Error("Using GLOBAL:: as a prefix is invalid. Use :: instead (%s)", name); std::string fullname = make_full_var_name(curr_module, name); std::string ID_module = extract_module_name(fullname.c_str()); diff --git a/src/Stats.cc b/src/Stats.cc index 7f49585121..7b776b0138 100644 --- a/src/Stats.cc +++ b/src/Stats.cc @@ -119,7 +119,7 @@ void ProfileLogger::Log() { // TODO: This previously output the number of connections, but now that we're storing // sessions as well as connections, this might need to be renamed. - file->Write(util::fmt("%.06f Conns: total=%" PRIu64 " current=%" PRIu64 "/%u\n", run_state::network_time, + file->Write(util::fmt("%.06f Conns: total=%" PRIu64 " current=%" PRIu64 "/%zu\n", run_state::network_time, Connection::TotalConnections(), Connection::CurrentConnections(), session_mgr->CurrentSessions())); @@ -173,7 +173,7 @@ void ProfileLogger::Log() { util::fmt("%.06f DNS_Mgr: requests=%lu successful=%lu failed=%lu pending=%lu " "cached_hosts=%lu cached_addrs=%lu\n", run_state::network_time, dstats.requests, dstats.successful, dstats.failed, dstats.pending, - dstats.cached_hosts, dstats.cached_addresses)); + dstats.cached.hosts, dstats.cached.addresses)); trigger::Manager::Stats tstats; trigger_mgr->GetStats(&tstats); @@ -188,7 +188,7 @@ void ProfileLogger::Log() { timer_type_to_string(static_cast(i)), current_timers[i])); } - file->Write(util::fmt("%0.6f Threads: current=%d\n", run_state::network_time, thread_mgr->NumThreads())); + file->Write(util::fmt("%0.6f Threads: current=%zu\n", run_state::network_time, thread_mgr->NumThreads())); const threading::Manager::msg_stats_list& thread_stats = thread_mgr->GetMsgThreadStats(); for ( threading::Manager::msg_stats_list::const_iterator i = thread_stats.begin(); i != thread_stats.end(); ++i ) { @@ -213,14 +213,12 @@ void ProfileLogger::Log() { cs.num_events_outgoing, cs.num_logs_incoming, cs.num_logs_outgoing, cs.num_ids_incoming, cs.num_ids_outgoing)); - // Script-level state. - const auto& globals = global_scope()->Vars(); - if ( expensive ) { + // Script-level state. int total_table_entries = 0; int total_table_rentries = 0; - for ( const auto& global : globals ) { + for ( const auto& global : global_scope()->Vars() ) { auto& id = global.second; // We don't show/count internal globals as they are always diff --git a/src/Stmt.cc b/src/Stmt.cc index a1edb51ee0..cd84febe8e 100644 --- a/src/Stmt.cc +++ b/src/Stmt.cc @@ -54,12 +54,6 @@ const char* stmt_name(StmtTag t) { "std-function", }; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - if ( int(t) == STMT_ANY ) - return "any"; -#pragma GCC diagnostic pop - return stmt_names[int(t)]; } diff --git a/src/StmtEnums.h b/src/StmtEnums.h index 04bbfcb189..b5a3030088 100644 --- a/src/StmtEnums.h +++ b/src/StmtEnums.h @@ -6,7 +6,6 @@ namespace zeek::detail { // These are in a separate file to break circular dependences enum StmtTag { - STMT_ANY [[deprecated("Remove in v7.1 - Unused and plugins should use STMT_EXTERN.")]] = -1, STMT_ALARM, // Does no longer exist but kept to create enums consistent. STMT_PRINT, STMT_EVENT, diff --git a/src/Timer.cc b/src/Timer.cc index ef2763071a..f348731c73 100644 --- a/src/Timer.cc +++ b/src/Timer.cc @@ -9,7 +9,7 @@ #include "zeek/RunState.h" #include "zeek/broker/Manager.h" #include "zeek/iosource/Manager.h" -#include "zeek/iosource/PktSrc.h" +#include "zeek/telemetry/Manager.h" #include "zeek/util.h" namespace zeek::detail { @@ -97,6 +97,34 @@ void TimerMgr::InitPostScript() { iosource_mgr->Register(this, true); dispatch_all_expired = zeek::detail::max_timer_expires == 0; + + cumulative_num_metric = telemetry_mgr->CounterInstance("zeek", "timers", {}, "Cumulative number of timers", "", + []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.counter.value = + static_cast(timer_mgr->CumulativeNum()); + return metric; + }); + + lag_time_metric = telemetry_mgr->GaugeInstance("zeek", "timers_lag_time", {}, + "Lag between current network time and last expired timer", "seconds", + []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = + run_state::network_time - timer_mgr->last_timestamp; + return metric; + }); + + std::shared_ptr family = + telemetry_mgr->GaugeFamily("zeek", "timers_pending", {"type"}, "Number of timers for a certain type"); + for ( int i = 0; i < NUM_TIMER_TYPES; i++ ) { + current_timer_metrics[i] = family->GetOrAdd({{"type", timer_type_to_string(static_cast(i))}}, + [i]() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = TimerMgr::CurrentTimers()[i]; + return metric; + }); + } } void TimerMgr::Add(Timer* timer) { diff --git a/src/Timer.h b/src/Timer.h index ff535f64e1..45ba50314e 100644 --- a/src/Timer.h +++ b/src/Timer.h @@ -10,7 +10,14 @@ namespace zeek { class ODesc; -} + +namespace telemetry { +class Gauge; +class Counter; +using GaugePtr = std::shared_ptr; +using CounterPtr = std::shared_ptr; +} // namespace telemetry +} // namespace zeek namespace zeek::detail { @@ -153,10 +160,12 @@ private: // for the max_timer_expires=0 case. bool dispatch_all_expired = false; - size_t peak_size = 0; - size_t cumulative_num = 0; - static unsigned int current_timers[NUM_TIMER_TYPES]; + + telemetry::CounterPtr cumulative_num_metric; + telemetry::GaugePtr lag_time_metric; + telemetry::GaugePtr current_timer_metrics[NUM_TIMER_TYPES]; + std::unique_ptr q; }; diff --git a/src/Trigger.cc b/src/Trigger.cc index dae99bab37..d165da7f5a 100644 --- a/src/Trigger.cc +++ b/src/Trigger.cc @@ -13,6 +13,7 @@ #include "zeek/Traverse.h" #include "zeek/Val.h" #include "zeek/iosource/Manager.h" +#include "zeek/telemetry/Manager.h" using namespace zeek::detail; using namespace zeek::detail::trigger; @@ -88,12 +89,6 @@ protected: double time; }; -Trigger::Trigger(std::shared_ptr wi, double timeout, const IDSet& _globals, std::vector _local_aggrs, - Frame* f, const Location* loc) - : Trigger(std::move(wi), _globals, std::move(_local_aggrs), timeout, f, loc) { - Unref(this); -} - Trigger::Trigger(std::shared_ptr wi, const IDSet& _globals, std::vector _local_aggrs, double timeout, Frame* f, const Location* loc) { timeout_value = timeout; @@ -437,7 +432,19 @@ Manager::Manager() : iosource::IOSource() { pending = new TriggerList(); } Manager::~Manager() { delete pending; } -void Manager::InitPostScript() { iosource_mgr->Register(this, true); } +void Manager::InitPostScript() { + trigger_count = telemetry_mgr->CounterInstance("zeek", "triggers", {}, "Total number of triggers scheduled"); + trigger_pending = + telemetry_mgr->GaugeInstance("zeek", "pending_triggers", {}, "Pending number of triggers", "", + []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = + trigger_mgr ? static_cast(trigger_mgr->pending->size()) : 0.0; + return metric; + }); + + iosource_mgr->Register(this, true); +} double Manager::GetNextTimeout() { return pending->empty() ? -1 : run_state::network_time + 0.100; } @@ -468,13 +475,13 @@ void Manager::Queue(Trigger* trigger) { if ( std::find(pending->begin(), pending->end(), trigger) == pending->end() ) { Ref(trigger); pending->push_back(trigger); - total_triggers++; + trigger_count->Inc(); iosource_mgr->Wakeup(Tag()); } } void Manager::GetStats(Stats* stats) { - stats->total = total_triggers; + stats->total = static_cast(trigger_count->Value()); stats->pending = pending->size(); } diff --git a/src/Trigger.h b/src/Trigger.h index 6c4fade3be..0cba28fd20 100644 --- a/src/Trigger.h +++ b/src/Trigger.h @@ -18,6 +18,13 @@ class Val; using ValPtr = IntrusivePtr; +namespace telemetry { +class Gauge; +class Counter; +using GaugePtr = std::shared_ptr; +using CounterPtr = std::shared_ptr; +} // namespace telemetry + namespace detail { class Frame; @@ -39,13 +46,6 @@ class TriggerTraversalCallback; class Trigger final : public Obj, public notifier::detail::Receiver { public: - // This first constructor can return an invalid pointer, so - // its value must not be used further. - [[deprecated( - "Remove in v7.1. Use second Trigger constructor via " - "make_intrusive<...>.")]] Trigger(std::shared_ptr wi, double timeout, const IDSet& globals, - std::vector local_aggrs, Frame* f, const Location* loc); - // Use this constructor via make_intrusive<...>. The usual pattern is // to then discard what's returned, i.e. "(void)make_intrusive<...>" - // however, a valid pointer will be returned that can be used for @@ -187,7 +187,8 @@ public: private: using TriggerList = std::list; TriggerList* pending; - unsigned long total_triggers = 0; + telemetry::CounterPtr trigger_count; + telemetry::GaugePtr trigger_pending; }; } // namespace trigger diff --git a/src/Type.cc b/src/Type.cc index 5341476a3f..402aee2505 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -599,12 +599,9 @@ TypePtr SetType::ShallowClone() { return make_intrusive(indices, elemen SetType::~SetType() = default; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" FuncType::Capture::Capture(detail::IDPtr _id, bool _deep_copy) : id(std::move(_id)), deep_copy(_deep_copy) { is_managed = id ? ZVal::IsManagedType(id->GetType()) : false; } -#pragma GCC diagnostic pop FuncType::FuncType(RecordTypePtr arg_args, TypePtr arg_yield, FunctionFlavor arg_flavor) : Type(TYPE_FUNC), args(std::move(arg_args)), arg_types(make_intrusive()), yield(std::move(arg_yield)) { diff --git a/src/Type.h b/src/Type.h index 7261a445f4..c5c52bc095 100644 --- a/src/Type.h +++ b/src/Type.h @@ -513,8 +513,6 @@ public: public: Capture(detail::IDPtr _id, bool _deep_copy); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" Capture(const Capture&) = default; Capture(Capture&&) = default; Capture& operator=(const Capture&) = default; @@ -527,11 +525,11 @@ public: // For script optimization: void SetID(detail::IDPtr new_id) { id = std::move(new_id); } -#pragma GCC diagnostic pop - [[deprecated("Remove in v7.1. Use non-default constructor and associated accessors.")]] detail::IDPtr id; - [[deprecated("Remove in v7.1. Use non-default constructor and associated accessors.")]] bool deep_copy; - [[deprecated("Remove in v7.1. Use non-default constructor and associated accessors.")]] bool is_managed; + private: + detail::IDPtr id; + bool deep_copy; + bool is_managed; }; using CaptureList = std::vector; diff --git a/src/analyzer/protocol/ldap/CMakeLists.txt b/src/analyzer/protocol/ldap/CMakeLists.txt index a687e880ff..3f69e6543d 100644 --- a/src/analyzer/protocol/ldap/CMakeLists.txt +++ b/src/analyzer/protocol/ldap/CMakeLists.txt @@ -1,5 +1,5 @@ spicy_add_analyzer( NAME LDAP PACKAGE_NAME spicy-ldap - SOURCES ldap.spicy ldap.evt asn1.spicy - MODULES LDAP ASN1) + SOURCES ldap.spicy ldap.evt asn1.spicy ldap_zeek.spicy + MODULES LDAP ASN1 LDAP_Zeek) diff --git a/src/analyzer/protocol/ldap/ldap.evt b/src/analyzer/protocol/ldap/ldap.evt index 96baef6f98..77f34fb62e 100644 --- a/src/analyzer/protocol/ldap/ldap.evt +++ b/src/analyzer/protocol/ldap/ldap.evt @@ -41,3 +41,18 @@ on LDAP::SearchRequest -> event LDAP::search_request($conn, on LDAP::SearchResultEntry -> event LDAP::search_result_entry($conn, message.messageID, self.objectName); + +on LDAP::ExtendedRequest -> event LDAP::extended_request($conn, + message.messageID, + self.requestName, + self.requestValue); + +on LDAP::ExtendedResponse -> event LDAP::extended_response($conn, + message.messageID, + message.result_.code, + self.responseName, + self.responseValue); + +# Once switched into MessageMode::TLS, we won't parse messages anymore, +# so this is raised just once. +on LDAP::Message if (ctx.messageMode == LDAP::MessageMode::TLS) -> event LDAP::starttls($conn); diff --git a/src/analyzer/protocol/ldap/ldap.spicy b/src/analyzer/protocol/ldap/ldap.spicy index 2d4f821d78..0816e6afe9 100644 --- a/src/analyzer/protocol/ldap/ldap.spicy +++ b/src/analyzer/protocol/ldap/ldap.spicy @@ -130,29 +130,104 @@ public type Result = unit { const GSSAPI_MECH_MS_KRB5 = "1.2.840.48018.1.2.2"; # Supported SASL stripping modes. -type SaslStripping = enum { - MS_KRB5 = 1, # Payload starts with a 4 byte length followed by a wrap token that may or may not be sealed. +type MessageMode = enum { + MS_KRB5 = 1, # Payload starts with a 4 byte length followed by a wrap token that may or may not be sealed. + TLS = 2, # Client/server used StartTLS, forward to SSL analyzer. + MAYBE_ENCRYPTED = 3, # Use a heuristic to determine encrypted traffic. + CLEARTEXT = 4, # Assume cleartext. + ENCRYPTED = 5, # Assume encrypted. }; type Ctx = struct { - saslStripping: SaslStripping; # Which mode of SASL stripping to use. + messageMode: MessageMode; # Message dispatching mode + saslMechanism: string; # The SASL mechanism selected by the client. + startTlsRequested: bool; # Did the client use the StartTLS extended request? }; #----------------------------------------------------------------------------- public type Messages = unit { %context = Ctx; - : SASLStrip(self.context())[]; + : MessageDispatch(self.context())[]; }; #----------------------------------------------------------------------------- -public type SASLStrip = unit(ctx: Ctx&) { - switch( ctx.saslStripping ) { - SaslStripping::Undef -> : Message(ctx); - SaslStripping::MS_KRB5 -> : SaslMsKrb5Stripper(ctx); +public type MessageDispatch = unit(ctx: Ctx&) { + switch( ctx.messageMode ) { + MessageMode::Undef -> : Message(ctx); + MessageMode::MS_KRB5 -> : SaslMsKrb5Stripper(ctx); + MessageMode::TLS -> : TlsForward; # never returns + MessageMode::MAYBE_ENCRYPTED -> : MaybeEncrypted(ctx); + MessageMode::CLEARTEXT -> : Message(ctx); + MessageMode::ENCRYPTED -> : EncryptedMessage; }; }; +#----------------------------------------------------------------------------- +type MaybeEncrypted = unit(ctx: Ctx&) { + # A plaintext LDAP message always starts with at least 3 bytes and the first + # byte is 0x30 for the sequence. A SASL encrypted message starts with a 4 byte + # length field. The heuristic here is that if the first byte is a 0x30, + # assume it's unencrypted LDAP. This should be pretty good, if it was an + # encrypted/SASL wrapped message, it would have a size between 0x30000000 and + # 0x30FFFFFF, meaning at least a size of ~768MB, which seems unlikely. + var start: iterator; + var saslLen: uint64; + var mech: bytes; + + on %init { + self.start = self.input(); + # Don't have starts_with() on string, work around that. + # https://github.com/zeek/spicy/issues/1807 + self.mech = ctx.saslMechanism.encode(spicy::Charset::UTF8); + } + + first: uint8 { + if ( $$ == 0x30 ) { + ctx.messageMode = MessageMode::CLEARTEXT; + } else { + ctx.messageMode = MessageMode::ENCRYPTED; + } + } + + # As a further heuristic, if encrypted mode was decided and the client + # requested GSSAPI or GSS-SPNEGO (or we just didn't see it) peak a bit + # into the SASL payload and check if it starts with a 0504 (WRAP_TOKEN). + # If so, switch into KRB mode assuming that's what is being used and + # have a chance seeing some more plaintext LDAP in non-sealed tokens. + rem: uint8[3] if ( ctx.messageMode == MessageMode::ENCRYPTED && (|self.mech| == 0 || self.mech.starts_with(b"GSS")) ) { + self.saslLen = (uint64(self.first) << 24) + (uint64($$[0]) << 16) + (uint64($$[1]) << 8) + uint64($$[2]); + } + + : uint16 if ( self.saslLen >= 2 ) { + if ( $$ == 0x0504 ) { + ctx.messageMode = MessageMode::MS_KRB5; + } + } + + # Rewind the input. + : void { + # Prevent MessageDispatch from recursing endlessly. + assert ctx.messageMode != MessageMode::MAYBE_ENCRYPTED; + self.set_input(self.start); + } + + # One recursion to parse with the new ctx.messageMode setting. + : MessageDispatch(ctx); +}; + +#----------------------------------------------------------------------------- +type EncryptedMessage = unit { + len: uint32; + : skip bytes &size=self.len; +}; + +#----------------------------------------------------------------------------- +type TlsForward = unit { + # Just consume everything. This is hooked in ldap_zeek.spicy + chunk: bytes &chunked &eod; +}; + type KrbWrapToken = unit { # https://datatracker.ietf.org/doc/html/rfc4121#section-4.2.6.2 @@ -174,7 +249,10 @@ type KrbWrapToken = unit { } else if ( self.rrc == 0 ) { self.trailer_ec = self.ec; } else { - throw "Unhandled rc %s and ec %s" % (self.ec, self.rrc); + if ( ! self.ctx_flags.sealed ) + # If it's sealed, we'll consume until &eod anyhow + # and ec/rrc shouldn't apply, otherwise, bail. + throw "Unhandled rc %s and ec %s" % (self.ec, self.rrc); } } @@ -223,6 +301,7 @@ public type Message = unit(ctx: Ctx&) { var arg: string = ""; var seqHeaderLen: uint64; var msgLen: uint64; + var opLen: uint64; seqHeader: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::Sequence) { self.msgLen = $$.len.len; @@ -241,10 +320,11 @@ public type Message = unit(ctx: Ctx&) { protocolOp: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Application) { self.opcode = cast(cast($$.tag.type_)); + self.opLen = $$.len.len; } switch ( self.opcode ) { - ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self); + ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self, ctx); ProtocolOpcode::BIND_RESPONSE -> BIND_RESPONSE: BindResponse(self, ctx); ProtocolOpcode::UNBIND_REQUEST -> UNBIND_REQUEST: UnbindRequest(self); ProtocolOpcode::SEARCH_REQUEST -> SEARCH_REQUEST: SearchRequest(self); @@ -263,12 +343,12 @@ public type Message = unit(ctx: Ctx&) { # just commenting this out, it will stop processing LDAP Messages in this connection ProtocolOpcode::ADD_REQUEST -> ADD_REQUEST: NotImplemented(self); ProtocolOpcode::COMPARE_REQUEST -> COMPARE_REQUEST: NotImplemented(self); - ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: NotImplemented(self); - ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: NotImplemented(self); + ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: ExtendedRequest(self, ctx); + ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: ExtendedResponse(self, ctx); ProtocolOpcode::INTERMEDIATE_RESPONSE -> INTERMEDIATE_RESPONSE: NotImplemented(self); ProtocolOpcode::MOD_DN_REQUEST -> MOD_DN_REQUEST: NotImplemented(self); ProtocolOpcode::SEARCH_RESULT_REFERENCE -> SEARCH_RESULT_REFERENCE: NotImplemented(self); - } &size=self.protocolOp.len.len; + } &size=self.opLen; # Ensure some invariants hold after parsing the command. : void &requires=(self.offset() >= self.seqHeaderLen); @@ -295,26 +375,29 @@ type GSS_SPNEGO_negTokenInit = unit { : skip bytes &eod; }; -# Peak into GSS-SPNEGO payload and ensure it is indeed GSS-SPNEGO. -type GSS_SPNEGO = unit { +# Peak into GSS-SPNEGO payload and ensure it is indeed GSS-SPNEGO, +# or GSS-SPNEGO with a NTMLSSP payload that starts with NTLMSSP. +type GSS_SPNEGO_Init = unit { # This is the optional octet string in SaslCredentials. credentialsHeader: ASN1::ASN1Header &requires=($$.tag.type_ == ASN1::ASN1Type::OctetString); # Now we either have the initial message as specified in RFC2743 or - # a continuation from RFC4178 + # a continuation from RFC4178, or a "NTMLSSP" signature. # - # 60 -> APPLICATION [0] https://datatracker.ietf.org/doc/html/rfc2743#page-81) + # 60 -> APPLICATION [0] https://datatracker.ietf.org/doc/html/rfc2743#page-81 # a1 -> CHOICE [1] https://www.rfc-editor.org/rfc/rfc4178#section-4.2 + # "NTMLSSP" https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/907f519d-6217-45b1-b421-dca10fc8af0d # - gssapiHeader: ASN1::ASN1Header &requires=( - $$.tag.class == ASN1::ASN1Class::Application && $$.tag.type_ == ASN1::ASN1Type(0) - || $$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(1) - ); + switch { + -> spnegoInitByte: uint8(0x60); + -> spnegoChoiceByte: uint8(0xa1); + -> ntlmSignature: skip b"NTLMSSP"; # Unsupported, should forward to child analyzer! + }; - switch ( self.gssapiHeader.tag.type_ ) { - ASN1::ASN1Type(0) -> initial: GSS_SPNEGO_negTokenInit; - * -> : skip bytes &eod; - } &size=self.gssapiHeader.len.len; + spnegoLen: skip ASN1::LengthType if (self?.spnegoInitByte || self?.spnegoChoiceByte); + + # Peak into the SPNEGO_negTokenInit + spnegoInitial: skip GSS_SPNEGO_negTokenInit if (self?.spnegoInitByte); }; type SaslCredentials = unit() { @@ -322,12 +405,22 @@ type SaslCredentials = unit() { # Peak into GSS-SPNEGO payload if we have any. switch ( self.mechanism ) { - "GSS-SPNEGO" -> gss_spnego: GSS_SPNEGO; + "GSS-SPNEGO" -> gss_spnego: GSS_SPNEGO_Init; * -> : skip bytes &eod; }; }; -type NegTokenResp = unit { +type GSS_SPNEGO_Subsequent = unit { + switch { + -> spnegoChoiceByte: uint8(0xa1); + -> ntmlSignature: skip b"NTLMSSP"; # Unsupported, should forward to NTLM! + }; + + spnegoChoiceLen: skip ASN1::LengthType if (self?.spnegoChoiceByte); + negTokenResp: GSS_SPNEGO_negTokenResp if (self?.spnegoChoiceByte); +}; + +type GSS_SPNEGO_negTokenResp = unit { var accepted: bool; var supportedMech: ASN1::ASN1Message; @@ -355,34 +448,13 @@ type NegTokenResp = unit { } &parse-from=self.supportedMech.application_data; }; -type ServerSaslCreds = unit { - serverSaslCreds: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(7)); - - # The PCAP missing_ldap_logs.pcapng has a1 81 b6 here for the GSS-SPNEGO response. - # - # This is context-specific ID 1, constructed, and a length of 182 as - # specified by in 4.2 of RFC4178. - # - # https://www.rfc-editor.org/rfc/rfc4178#section-4.2 - # - # TODO: This is only valid for a GSS-SPNEGO negTokenResp. - # If you want to support something else, remove the requires - # and add more to the switch below. - choice: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific); - - switch ( self.choice.tag.type_ ) { - ASN1::ASN1Type(1) -> negTokenResp: NegTokenResp; - # ... - } &size=self.choice.len.len; -}; - # TODO(fox-ds): A helper unit for requests for which no handling has been implemented. # Eventually all uses of this unit should be replaced with actual parsers so this unit can be removed. type NotImplemented = unit(inout message: Message) { : skip bytes &eod; }; -type BindRequest = unit(inout message: Message) { +type BindRequest = unit(inout message: Message, ctx: Ctx&) { version: ASN1::ASN1Message(True) &convert=$$.body.num_value; name: ASN1::ASN1Message(True) &convert=$$.body.str_value { message.obj = self.name; @@ -406,12 +478,32 @@ type BindRequest = unit(inout message: Message) { saslCreds: SaslCredentials() &parse-from=self.authData if ((self.authType == BindAuthType::BIND_AUTH_SASL) && (|self.authData| > 0)) { message.arg = self.saslCreds.mechanism; + ctx.saslMechanism = self.saslCreds.mechanism; } } &requires=(self?.authType && (self.authType != BindAuthType::Undef)); +type ServerSaslCreds = unit { + serverSaslCreds: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(7)); + payload: bytes &size=self.serverSaslCreds.len.len; +}; + type BindResponse = unit(inout message: Message, ctx: Ctx&) { : Result { message.result_ = $$; + + # The SASL authentication was successful. We do not actually + # know if the following messages are encrypted or not. This may be + # mechanism and parameter specific. For example SCRAM-SHA512 or NTLM + # will continue to be cleartext, while SRP or GSS-API would be encrypted. + # + # Switch messageMode into trial mode which is explored via MessageDispatch + # and the MaybeEncrypted unit. + # + # Note, messageMode may be changed to something more specific like + # MS_KRB5 below. + if ( |ctx.saslMechanism| > 0 && $$.code == ResultCode::SUCCESS ) { + ctx.messageMode = MessageMode::MAYBE_ENCRYPTED; + } } # Try to parse serverSaslCreds if there's any input remaining. This @@ -421,14 +513,18 @@ type BindResponse = unit(inout message: Message, ctx: Ctx&) { # if the serverSaslCreds field exists or not. But, not sure we can # check if there's any bytes left at this point outside of passing # in the length and playing with offset(). - serverSaslCreds: ServerSaslCreds[] &eod { - if ( |self.serverSaslCreds| > 0 ) { - if ( self.serverSaslCreds[0]?.negTokenResp ) { - local token = self.serverSaslCreds[0].negTokenResp; - if ( token.accepted && token?.supportedMechOid ) { - if ( token.supportedMechOid == GSSAPI_MECH_MS_KRB5 ) { - ctx.saslStripping = SaslStripping::MS_KRB5; - } + serverSaslCreds: ServerSaslCreds[] &eod; + + # If the client requested GSS-SPNEGO, try to parse the server's response + # to switch message mode. + gss_spnego: GSS_SPNEGO_Subsequent &parse-from=self.serverSaslCreds[0].payload + if (ctx.saslMechanism == "GSS-SPNEGO" && |self.serverSaslCreds| > 0) { + + if ( $$?.negTokenResp ) { + local token = $$.negTokenResp; + if ( token.accepted && token?.supportedMechOid ) { + if ( token.supportedMechOid == GSSAPI_MECH_MS_KRB5 ) { + ctx.messageMode = MessageMode::MS_KRB5; } } } @@ -980,16 +1076,61 @@ type AbandonRequest = unit(inout message: Message) { #----------------------------------------------------------------------------- # Extended Operation # https://tools.ietf.org/html/rfc4511#section-4.12 +type ExtendedRequest = unit(inout message: Message, ctx: Ctx&) { + var requestValue: bytes; + header: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific); + requestName: bytes &size=self.header.len.len &convert=$$.decode(spicy::Charset::ASCII) { + message.obj = $$; + } -# TODO: implement ExtendedRequest -# type ExtendedRequest = unit(inout message: Message) { -# -# }; + # If there's more byte to parse, it's the requestValue. + : ASN1::ASN1Message(False) + &requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific) + if ( message.opLen > self.offset() ) { -# TODO: implement ExtendedResponse -# type ExtendedResponse = unit(inout message: Message) { -# -# }; + self.requestValue = $$.application_data; + } + + on %done { + # Did the client request StartTLS? + # + # https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1 + if ( self.requestName == "1.3.6.1.4.1.1466.20037" ) + ctx.startTlsRequested = True; + } +}; + +#----------------------------------------------------------------------------- +type ExtendedResponseEntry = unit(inout r: ExtendedResponse) { + : ASN1::ASN1Message(False) &requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific) { + if ( $$.head.tag.type_ == ASN1::ASN1Type(10) ) + r.responseName = $$.application_data; + else if ( $$.head.tag.type_ == ASN1::ASN1Type(11) ) + r.responseValue = $$.application_data; + else + throw "Unhandled extended response tag %s" % $$.head.tag; + } +}; + +#----------------------------------------------------------------------------- +type ExtendedResponse = unit(inout message: Message, ctx: Ctx&) { + var responseName: bytes; + var responseValue: bytes; + : Result { + message.result_ = $$; + } + + # Try to parse two ASN1 entries if there are bytes left in the unit. + # Both are optional and identified by context specific tagging. + : ExtendedResponseEntry(self) if ( message.opLen > self.offset() ); + : ExtendedResponseEntry(self) if ( message.opLen > self.offset() ); + + on %done { + # Client had requested StartTLS and it was successful? Switch to SSL. + if ( ctx.startTlsRequested && message.result_.code == ResultCode::SUCCESS ) + ctx.messageMode = MessageMode::TLS; + } +}; #----------------------------------------------------------------------------- # IntermediateResponse Message diff --git a/src/analyzer/protocol/ldap/ldap_zeek.spicy b/src/analyzer/protocol/ldap/ldap_zeek.spicy new file mode 100644 index 0000000000..3a6784589f --- /dev/null +++ b/src/analyzer/protocol/ldap/ldap_zeek.spicy @@ -0,0 +1,12 @@ +module LDAP_Zeek; + +import LDAP; +import zeek; + +on LDAP::TlsForward::%init { + zeek::protocol_begin("SSL"); +} + +on LDAP::TlsForward::chunk { + zeek::protocol_data_in(zeek::is_orig(), self.chunk); +} diff --git a/src/analyzer/protocol/modbus/events.bif b/src/analyzer/protocol/modbus/events.bif index ecd47379c5..6ec72fe4e5 100644 --- a/src/analyzer/protocol/modbus/events.bif +++ b/src/analyzer/protocol/modbus/events.bif @@ -195,7 +195,6 @@ event modbus_write_multiple_registers_response%(c: connection, headers: ModbusHe ## ## refs: A vector of reference records. event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileRecordRequests%); -event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references"; ## Generated for a Modbus read file record response. ## @@ -207,7 +206,6 @@ event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders%) & ## ## refs: A vector of reference records. event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileRecordResponses%); -event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references"; ## Generated for a Modbus write file record request. ## @@ -219,7 +217,6 @@ event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders%) ## ## refs: A vector of reference records. event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileReferences%); -event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references"; ## Generated for a Modbus write file record response. ## @@ -231,7 +228,6 @@ event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders%) ## ## refs: A vector of reference records. event modbus_write_file_record_response%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileReferences%); -event modbus_write_file_record_response%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references"; ## Generated for a Modbus mask write register request. ## diff --git a/src/analyzer/protocol/mysql/events.bif b/src/analyzer/protocol/mysql/events.bif index ec5fa61ae6..a102842e05 100644 --- a/src/analyzer/protocol/mysql/events.bif +++ b/src/analyzer/protocol/mysql/events.bif @@ -84,6 +84,57 @@ event mysql_server_version%(c: connection, ver: string%); ## ## username: The username supplied by the client ## -## .. zeek:see:: mysql_command_request mysql_error mysql_ok mysql_server_version +## .. zeek:see:: mysql_command_request mysql_error mysql_ok mysql_server_version mysql_ssl_request event mysql_handshake%(c: connection, username: string%); +## Generated for a short client handshake response packet with the CLIENT_SSL +## flag set. Usually the client will initiate a TLS handshake afterwards. +# +## See the MySQL `documentation `__ +## for more information about the MySQL protocol. +## +## c: The connection. +## +## .. zeek:see:: mysql_handshake +event mysql_ssl_request%(c: connection%); + +## Generated for information about plugin authentication within handshake packets. +## +## c: The connection. +## +## is_orig: True if this is from the client, false if from the server. +## +## name: Name of the authentication plugin. +## +## data: The initial auth data. From the server, it is the concatenation of +## auth_plugin_data_part_1 and auth_plugin_data_part_2 in the handshake. +## For the client it is the auth_response in the handshake response. +## +## .. zeek:see:: mysql_handshake mysql_auth_switch_request mysql_auth_more_data +event mysql_auth_plugin%(c: connection, is_orig: bool, name: string, data: string%); + +## Generated for a server packet with an auth switch request. +## +## c: The connection. +## +## name: The plugin name. +## +## data: Initial authentication data for the plugin. +## +## .. zeek:see:: mysql_handshake mysql_auth_more_data +event mysql_auth_switch_request%(c: connection, name: string, data: string%); + +## Generated for opaque authentication data exchanged between client and server +## after the client's handshake packet, but before the server replied with +## an OK_Packet +## +## Data is specific to the plugin auth mechanism used by client and server. +## +## c: The connection. +## +## is_orig: True if this is from the client, false if from the server. +## +## data: More authentication data. +## +## .. zeek:see:: mysql_handshake mysql_auth_switch_request +event mysql_auth_more_data%(c: connection, is_orig: bool, data: string%); diff --git a/src/analyzer/protocol/mysql/mysql-analyzer.pac b/src/analyzer/protocol/mysql/mysql-analyzer.pac index 31addd2518..afb75a44f3 100644 --- a/src/analyzer/protocol/mysql/mysql-analyzer.pac +++ b/src/analyzer/protocol/mysql/mysql-analyzer.pac @@ -14,6 +14,28 @@ refine flow MySQL_Flow += { connection()->zeek_analyzer()->Conn(), zeek::make_intrusive(c_str(${msg.handshake9.server_version}))); } + + if ( mysql_auth_plugin ) + { + if ( ${msg.version} == 10 && (${msg.handshake10.capability_flags_2} << 16) & CLIENT_PLUGIN_AUTH ) + { + auto auth_plugin = zeek::make_intrusive(c_str(${msg.handshake10.auth_plugin})); + auto data_part_1 = ${msg.handshake10.auth_plugin_data_part_1}; + auto data_part_2 = ${msg.handshake10.auth_plugin_data_part_2}; + std::vector data_parts = { + zeek::data_chunk_t{data_part_1.length(), reinterpret_cast(data_part_1.begin())}, + zeek::data_chunk_t{data_part_2.length(), reinterpret_cast(data_part_2.begin())}, + }; + auto data = zeek::make_intrusive(zeek::concatenate(data_parts)); + + zeek::BifEvent::enqueue_mysql_auth_plugin(connection()->zeek_analyzer(), + connection()->zeek_analyzer()->Conn(), + false /*is_orig*/, + std::move(auth_plugin), + std::move(data)); + } + } + return true; %} @@ -23,23 +45,42 @@ refine flow MySQL_Flow += { connection()->zeek_analyzer()->AnalyzerConfirmation(); // If the client requested SSL and didn't provide credentials, switch to SSL - if ( ${msg.version} == 10 && ( ${msg.v10_response.cap_flags} & CLIENT_SSL ) && ${msg.v10_response.credentials}->empty() ) + if ( ${msg.version} == 10 && ( ${msg.v10_response.cap_flags} & CLIENT_SSL )) { connection()->zeek_analyzer()->StartTLS(); + + if ( mysql_ssl_request ) + zeek::BifEvent::enqueue_mysql_ssl_request(connection()->zeek_analyzer(), + connection()->zeek_analyzer()->Conn()); return true; } if ( mysql_handshake ) { - if ( ${msg.version} == 10 && ${msg.v10_response.credentials}->size() > 0 ) + if ( ${msg.version} == 10 ) zeek::BifEvent::enqueue_mysql_handshake(connection()->zeek_analyzer(), connection()->zeek_analyzer()->Conn(), - zeek::make_intrusive(c_str(${msg.v10_response.credentials[0].username}))); + zeek::make_intrusive(c_str(${msg.v10_response.plain.credentials.username}))); if ( ${msg.version} == 9 ) zeek::BifEvent::enqueue_mysql_handshake(connection()->zeek_analyzer(), connection()->zeek_analyzer()->Conn(), zeek::make_intrusive(c_str(${msg.v9_response.username}))); } + + if ( mysql_auth_plugin ) + { + if ( ${msg.version} == 10 && ${msg.v10_response.plain.cap_flags} & CLIENT_PLUGIN_AUTH ) + { + auto auth_plugin = zeek::make_intrusive(c_str(${msg.v10_response.plain.auth_plugin})); + auto data = to_stringval(${msg.v10_response.plain.credentials.password.val}); + zeek::BifEvent::enqueue_mysql_auth_plugin(connection()->zeek_analyzer(), + connection()->zeek_analyzer()->Conn(), + true /*is_orig*/, + std::move(auth_plugin), + std::move(data)); + } + } + return true; %} @@ -83,8 +124,8 @@ refine flow MySQL_Flow += { function proc_resultset(msg: Resultset): bool %{ - if ( ${msg.is_eof} ) - return true; // Raised through proc_eof_packet() + if ( ${msg.is_eof_or_ok} ) + return true; // Raised through proc_eof_packet() or proc_ok_packet() if ( ! mysql_result_row ) return true; @@ -112,6 +153,24 @@ refine flow MySQL_Flow += { return true; %} + function proc_auth_switch_request(msg: AuthSwitchRequest): bool + %{ + zeek::BifEvent::enqueue_mysql_auth_switch_request(connection()->zeek_analyzer(), + connection()->zeek_analyzer()->Conn(), + zeek::make_intrusive(c_str(${msg.name})), + to_stringval(${msg.data})); + return true; + %} + + function proc_auth_more_data(msg: AuthMoreData): bool + %{ + zeek::BifEvent::enqueue_mysql_auth_more_data(connection()->zeek_analyzer(), + connection()->zeek_analyzer()->Conn(), + ${is_orig}, + to_stringval(${msg.data})); + return true; + %} + }; refine typeattr Initial_Handshake_Packet += &let { @@ -141,3 +200,11 @@ refine typeattr EOF_Packet += &let { refine typeattr Resultset += &let { proc = $context.flow.proc_resultset(this); }; + +refine typeattr AuthSwitchRequest += &let { + proc = $context.flow.proc_auth_switch_request(this); +}; + +refine typeattr AuthMoreData += &let { + proc = $context.flow.proc_auth_more_data(this); +}; diff --git a/src/analyzer/protocol/mysql/mysql-protocol.pac b/src/analyzer/protocol/mysql/mysql-protocol.pac index e8415e3de0..aeee595887 100644 --- a/src/analyzer/protocol/mysql/mysql-protocol.pac +++ b/src/analyzer/protocol/mysql/mysql-protocol.pac @@ -140,6 +140,11 @@ enum state { COMMAND_PHASE = 1, }; +enum ConnectionExpected { + EXPECT_HANDSHAKE, + EXPECT_AUTH_DATA, +}; + enum Expected { NO_EXPECTATION, EXPECT_STATUS, @@ -158,12 +163,133 @@ enum EOFType { }; enum Client_Capabilities { + CLIENT_CONNECT_WITH_DB = 0x00000008, CLIENT_SSL = 0x00000800, + CLIENT_PLUGIN_AUTH = 0x00080000, + CLIENT_CONNECT_ATTRS = 0x00100000, # Expects an OK (instead of EOF) after the resultset rows of a Text Resultset. CLIENT_DEPRECATE_EOF = 0x01000000, + CLIENT_ZSTD_COMPRESSION_ALGORITHM = 0x04000000, + CLIENT_QUERY_ATTRIBUTES = 0x08000000, +}; + +# Binary Protocol Resultset encoding. +# +# https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_binary_resultset.html +# +# Values taken from here: https://dev.mysql.com/doc/dev/mysql-server/latest/namespaceclassic__protocol_1_1field__type.html +enum field_types { + TYPE_DECIMAL = 0x00, + TYPE_TINY = 0x01, + TYPE_SHORT = 0x02, + TYPE_LONG = 0x03, + TYPE_FLOAT = 0x04, + TYPE_DOUBLE = 0x05, + TYPE_NULL = 0x06, + TYPE_TIMESTAMP = 0x07, + TYPE_LONGLONG = 0x08, + TYPE_INT24 = 0x09, + TYPE_DATE = 0x0a, + TYPE_TIME = 0x0b, + TYPE_DATETIME = 0x0c, + TYPE_YEAR = 0x0d, + TYPE_VARCHAR = 0x0f, + TYPE_BIT = 0x10, + TYPE_TIMESTAMP2 = 0x11, + TYPE_JSON = 0xf5, + TYPE_NEWDECIMAL = 0xf6, + TYPE_ENUM = 0xf7, + TYPE_SET = 0xf8, + TYPE_TINYBLOB = 0xf9, + TYPE_MEDIUMBLOB = 0xfa, + TYPE_LONGBLOB = 0xfb, + TYPE_BLOB = 0xfc, + TYPE_VARSTRING = 0xfd, + TYPE_STRING = 0xfe, + TYPE_GEOMETRY = 0xff, +}; + +type Date = record { + year : int16; + month: int8; + day : int8; +}; + +type Time = record { + hour : int8; + minute: int8; + second: int8; +}; + +type BinaryDate = record { + len: uint8 &enforce(len == 0 || len == 4 || len == 7 || len == 11); + have_date: case ( len > 0 ) of { + true -> date : Date; + false -> none_1: empty; + }; + have_time: case ( len > 4 ) of { + true -> time : Time; + false -> none_2: empty; + }; + have_micros: case ( len > 7 ) of { + true -> micros: int32; + false -> none_3: empty; + }; +}; + +type DurationTime = record { + is_negative: int8 &enforce(is_negative == 0 || is_negative == 1); + days : int32; + time : Time; +}; + +type BinaryTime = record { + len: uint8 &enforce(len == 0 || len == 8 || len == 12); + have_time: case ( len > 0 ) of { + true -> time : DurationTime; + false -> none_1: empty; + }; + have_micros: case ( len > 8 ) of { + true -> micros: int32; + false -> none_2: empty; + }; +}; + +type BinaryValue(type: uint16) = record { + value: case ( type ) of { + TYPE_DECIMAL -> decimal_val: LengthEncodedInteger; + TYPE_TINY -> tiny_val: int8; + TYPE_SHORT -> short_val: int16; + TYPE_LONG -> long_val: int32; + TYPE_FLOAT -> float_val: bytestring &length=4; + TYPE_DOUBLE -> double_val: bytestring &length=8; + TYPE_NULL -> null_val: empty; # in null_bitmap + TYPE_TIMESTAMP -> timestamp_val: BinaryDate; + TYPE_LONGLONG -> longlong_val: int64; + TYPE_INT24 -> int24_val: int32; + TYPE_DATE -> date_val: BinaryDate; + TYPE_TIME -> time_val: BinaryTime; + TYPE_DATETIME -> datetime_val: BinaryDate; + TYPE_YEAR -> year_val: int16; + TYPE_VARCHAR -> varchar_val: LengthEncodedString; + TYPE_BIT -> bit_val: LengthEncodedString; + TYPE_TIMESTAMP2 -> timestamp2_val: BinaryDate; + TYPE_JSON -> json_val: LengthEncodedString; + TYPE_NEWDECIMAL -> newdecimal_val: LengthEncodedString; + TYPE_ENUM -> enum_val: LengthEncodedString; + TYPE_SET -> set_val: LengthEncodedString; + TYPE_TINYBLOB -> tinyblob_val: LengthEncodedString; + TYPE_MEDIUMBLOB -> mediumblob_val: LengthEncodedString; + TYPE_LONGBLOB -> longblob_val: LengthEncodedString; + TYPE_BLOB -> blob_val: LengthEncodedString; + TYPE_VARSTRING -> varstring_val: LengthEncodedString; + TYPE_STRING -> string_val: LengthEncodedString; + TYPE_GEOMETRY -> geometry_val: LengthEncodedString; + }; }; type NUL_String = RE/[^\0]*\0/; +type EmptyOrNUL_String = RE/([^\0]*\0)?/; # MySQL PDU @@ -193,7 +319,7 @@ type Server_Message(seq_id: uint8, pkt_len: uint32) = case is_initial of { }; type Client_Message(state: int) = case state of { - CONNECTION_PHASE -> connection_phase: Handshake_Response_Packet; + CONNECTION_PHASE -> connection_phase: Connection_Phase_Packets; COMMAND_PHASE -> command_phase : Command_Request_Packet; }; @@ -219,8 +345,24 @@ type Handshake_v10 = record { character_set : uint8; status_flags : uint16; capability_flags_2 : uint16; - auth_plugin_data_len : uint8; - auth_plugin_name : NUL_String; + auth_plugin_data_len : uint8 &enforce( auth_plugin_data_len==0 || auth_plugin_data_len >= 21); + reserved : padding[10]; + auth_plugin_data_part_2: bytestring &length=auth_plugin_data_part_2_len; + have_plugin : case ( ( capability_flags_2 << 16 ) & CLIENT_PLUGIN_AUTH ) of { + CLIENT_PLUGIN_AUTH -> auth_plugin: NUL_String; + 0x0 -> none : empty; + }; +} &let { + # The length of auth_plugin_data_part_2 is at least 13 bytes, + # or auth_plugin_data_len - 8 if that is larger, check for + # auth_plugin_data_len > 21 (8 + 13) to prevent underflow for + # when subtracting 8. + # + # https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_handshake_v10.html + auth_plugin_data_part_2_len = auth_plugin_data_len > 21 ? auth_plugin_data_len - 8 : 13; + update_auth_plugin: bool = $context.connection.set_auth_plugin(auth_plugin) + &if( ( capability_flags_2 << 16 ) & CLIENT_PLUGIN_AUTH ); + server_query_attrs: bool = $context.connection.set_server_query_attrs(( capability_flags_2 << 16 ) & CLIENT_QUERY_ATTRIBUTES); }; type Handshake_v9 = record { @@ -240,7 +382,45 @@ type Handshake_Response_Packet = case $context.connection.get_version() of { type Handshake_Credentials_v10 = record { username : NUL_String; - password : bytestring &restofdata; + password : LengthEncodedString; +}; + +type Connection_Attribute = record { + name : LengthEncodedString; + value : LengthEncodedString; +}; + +type Handshake_Connection_Attributes = record { + length : uint8; + attrs : Connection_Attribute[] &until($input.length() == 0); +} &length = length+1; + +type Handshake_Plain_v10(cap_flags: uint32) = record { + credentials: Handshake_Credentials_v10; + have_db : case ( cap_flags & CLIENT_CONNECT_WITH_DB ) of { + CLIENT_CONNECT_WITH_DB -> database: NUL_String; + 0x0 -> none_1 : empty; + }; + have_plugin : case ( cap_flags & CLIENT_PLUGIN_AUTH ) of { + CLIENT_PLUGIN_AUTH -> auth_plugin: EmptyOrNUL_String; + 0x0 -> none_2 : empty; + }; + have_attrs : case ( cap_flags & CLIENT_CONNECT_ATTRS ) of { + CLIENT_CONNECT_ATTRS -> conn_attrs: Handshake_Connection_Attributes; + 0x0 -> none_3 : empty; + }; + have_zstd : case ( cap_flags & CLIENT_ZSTD_COMPRESSION_ALGORITHM ) of { + CLIENT_ZSTD_COMPRESSION_ALGORITHM -> zstd_compression_level: uint8; + 0x0 -> none_4 : empty; + }; +} &let { + update_auth_plugin: bool = $context.connection.set_auth_plugin(auth_plugin) + &if( cap_flags & CLIENT_PLUGIN_AUTH ); + + # Switch client state into expecting more auth data. If the server responds + # with an OK_Packet before, will switch into COMMAND_PHASE. + update_conn_expectation: bool = $context.connection.set_next_conn_expected(EXPECT_AUTH_DATA) + &if( cap_flags & CLIENT_PLUGIN_AUTH ); }; type Handshake_Response_Packet_v10 = record { @@ -248,9 +428,13 @@ type Handshake_Response_Packet_v10 = record { max_pkt_size: uint32; char_set : uint8; pad : padding[23]; - credentials : Handshake_Credentials_v10[] &until($input.length() == 0); + use_ssl : case ( cap_flags & CLIENT_SSL ) of { + CLIENT_SSL -> none : empty; + default -> plain: Handshake_Plain_v10(cap_flags); + }; } &let { deprecate_eof: bool = $context.connection.set_deprecate_eof(cap_flags & CLIENT_DEPRECATE_EOF); + client_query_attrs: bool = $context.connection.set_client_query_attrs(cap_flags & CLIENT_QUERY_ATTRIBUTES); }; type Handshake_Response_Packet_v9 = record { @@ -258,17 +442,71 @@ type Handshake_Response_Packet_v9 = record { max_pkt_size : uint24le; username : NUL_String; auth_response: NUL_String; - have_db : case ( cap_flags & 0x8 ) of { - 0x8 -> database: NUL_String; + have_db : case ( cap_flags & CLIENT_CONNECT_WITH_DB ) of { + CLIENT_CONNECT_WITH_DB -> database: NUL_String; 0x0 -> none : empty; }; password : bytestring &restofdata; }; +# Connection Phase + +type Connection_Phase_Packets = case $context.connection.get_conn_expectation() of { + EXPECT_HANDSHAKE -> handshake_resp: Handshake_Response_Packet; + EXPECT_AUTH_DATA -> auth_data: AuthMoreData(true); +}; + +# Query attribute handling for COM_QUERY +# +type AttributeTypeAndName = record { + type: uint8; + unsigned_flag: uint8; + name: LengthEncodedString; +}; + +type AttributeValue(is_null: bool, type: uint8) = record { + null: case is_null of { + false -> val: BinaryValue(type); + true -> null_val: empty; + }; +} &let { + # Move parsing the next query attribute. + done = $context.connection.next_query_attr(); +}; + +type Attributes(count: int) = record { + null_bitmap : bytestring &length=(count + 7) / 8; + send_types_to_server: uint8 &enforce(send_types_to_server == 1); + names : AttributeTypeAndName[count]; + values : AttributeValue( + # Check if null_bitmap contains this attribute index. This + # will pass true if the attribute value is NULL and parsing + # skipped in AttributeValue above. + (null_bitmap[$context.connection.query_attr_idx() / 8] >> ($context.connection.query_attr_idx() % 8)) & 0x01, + names[$context.connection.query_attr_idx()].type + )[] &until($context.connection.query_attr_idx() >= count); +}; + +type Query_Attributes = record { + count : LengthEncodedInteger; + set_count: LengthEncodedInteger; + have_attr: case ( attr_count > 0 ) of { + true -> attrs: Attributes(attr_count); + false -> none: empty; + } &requires(new_query_attrs); +} &let { + attr_count: int = to_int()(count); + new_query_attrs = $context.connection.new_query_attrs(); +}; + # Command Request type Command_Request_Packet = record { command: uint8; + attrs : case ( command == COM_QUERY && $context.connection.get_client_query_attrs() && $context.connection.get_server_query_attrs() ) of { + true -> query_attrs: Query_Attributes; + false -> none: empty; + }; arg : bytestring &restofdata; } &let { update_expectation: bool = $context.connection.set_next_expected_from_command(command); @@ -292,6 +530,10 @@ type Command_Response_Status = record { pkt_type: uint8; response: case pkt_type of { 0x00 -> data_ok: OK_Packet; + # When still in the CONNECTION_PHASE, the server can reply + # with AuthMoreData which is 0x01 stuffed opaque payload. + # https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_auth_more_data.html + 0x01 -> auth_more_data: AuthMoreData(false); 0xfe -> data_eof: EOF_Packet(EOF_END); 0xff -> data_err: ERR_Packet; default -> unknown: empty; @@ -326,22 +568,22 @@ type ColumnDefinition = record { }; # Only used to indicate the end of a result, no intermediate eofs here. -type EOFOrOK = case $context.connection.get_deprecate_eof() of { +# MySQL spec says "You must check whether the packet length is less than 9 +# to make sure that it is a EOF_Packet packet" so the value of 13 here +# comes from that 9, plus a 4-byte header. +type EOFOrOK(pkt_len: uint32) = case ( $context.connection.get_deprecate_eof() || pkt_len > 13 ) of { false -> eof: EOF_Packet(EOF_END); true -> ok: OK_Packet; }; type ColumnDefinitionOrEOF(pkt_len: uint32) = record { marker : uint8; - def_or_eof: case is_eof of { - true -> eof: EOFOrOK; + def_or_eof: case is_eof_or_ok of { + true -> eof: EOFOrOK(pkt_len); false -> def: ColumnDefinition41(marker); - } &requires(is_eof); + } &requires(is_eof_or_ok); } &let { - # MySQL spec says "You must check whether the packet length is less than 9 - # to make sure that it is a EOF_Packet packet" so the value of 13 here - # comes from that 9, plus a 4-byte header. - is_eof: bool = (marker == 0xfe && pkt_len < 13); + is_eof_or_ok: bool = (marker == 0xfe); }; @@ -350,22 +592,19 @@ type EOFIfLegacyThenResultset(pkt_len: uint32) = case $context.connection.get_de true -> resultset: Resultset(pkt_len); } &let { update_result_seen: bool = $context.connection.set_results_seen(0); - update_expectation: bool = $context.connection.set_next_expected(EXPECT_RESULTSET); + update_expectation: bool = $context.connection.set_next_expected(EXPECT_RESULTSET) &if( ! $context.connection.get_deprecate_eof() ); }; type Resultset(pkt_len: uint32) = record { marker : uint8; - row_or_eof: case is_eof of { - true -> eof: EOFOrOK; + row_or_eof: case is_eof_or_ok of { + true -> eof: EOFOrOK(pkt_len); false -> row: ResultsetRow(marker); - } &requires(is_eof); + } &requires(is_eof_or_ok); } &let { - # MySQL spec says "You must check whether the packet length is less than 9 - # to make sure that it is a EOF_Packet packet" so the value of 13 here - # comes from that 9, plus a 4-byte header. - is_eof : bool = (marker == 0xfe && pkt_len < 13); + is_eof_or_ok : bool = (marker == 0xfe); update_result_seen: bool = $context.connection.inc_results_seen(); - update_expectation: bool = $context.connection.set_next_expected(is_eof ? NO_EXPECTATION : EXPECT_RESULTSET); + update_expectation: bool = $context.connection.set_next_expected(is_eof_or_ok ? NO_EXPECTATION : EXPECT_RESULTSET); }; type ResultsetRow(first_byte: uint8) = record { @@ -389,10 +628,20 @@ type ColumnDefinition41(first_byte: uint8) = record { filler : padding[2]; }; +# Opaque auth data exchanged during the connection phase between client and server. +type AuthMoreData(is_orig: bool) = record { + data : bytestring &restofdata; +}; + type AuthSwitchRequest = record { - status: uint8; + status: uint8 &enforce(status==254); name : NUL_String; data : bytestring &restofdata; +} &let { + update_auth_plugin : bool = $context.connection.set_auth_plugin(name); + update_conn_expectation: bool = $context.connection.set_next_conn_expected(EXPECT_AUTH_DATA); + # After an AuthSwitchRequest, server replies with OK_Packet, ERR_Packet or AuthMoreData. + update_expectation: bool = $context.connection.set_next_expected(EXPECT_STATUS); }; type ColumnDefinition320 = record { @@ -440,10 +689,15 @@ refine connection MySQL_Conn += { uint8 previous_seq_id_; int state_; Expected expected_; + ConnectionExpected conn_expected_; uint32 col_count_; uint32 remaining_cols_; uint32 results_seen_; bool deprecate_eof_; + bool server_query_attrs_; + bool client_query_attrs_; + std::string auth_plugin_; + int query_attr_idx_; %} %init{ @@ -451,10 +705,14 @@ refine connection MySQL_Conn += { previous_seq_id_ = 0; state_ = CONNECTION_PHASE; expected_ = EXPECT_STATUS; + conn_expected_ = EXPECT_HANDSHAKE; col_count_ = 0; remaining_cols_ = 0; results_seen_ = 0; deprecate_eof_ = false; + server_query_attrs_ = false; + client_query_attrs_ = false; + query_attr_idx_ = 0; %} function get_version(): uint8 @@ -487,6 +745,10 @@ refine connection MySQL_Conn += { function update_state(s: state): bool %{ state_ = s; + + if ( s == COMMAND_PHASE ) + conn_expected_ = EXPECT_HANDSHAKE; // Reset connection phase expectation + return true; %} @@ -501,6 +763,41 @@ refine connection MySQL_Conn += { return true; %} + function get_server_query_attrs(): bool + %{ + return server_query_attrs_; + %} + + function set_server_query_attrs(q: bool): bool + %{ + server_query_attrs_ = q; + return true; + %} + + function get_client_query_attrs(): bool + %{ + return client_query_attrs_; + %} + + function set_client_query_attrs(q: bool): bool + %{ + client_query_attrs_ = q; + return true; + %} + + function set_auth_plugin(a: bytestring): bool + %{ + // binpac::std_str() includes trailing \0 from parsing. + auto new_auth_plugin = std::string(binpac::c_str(a)); + if ( ! auth_plugin_.empty() && new_auth_plugin != auth_plugin_ ) + { + expected_ = EXPECT_AUTH_SWITCH; + } + + auth_plugin_ = std::move(new_auth_plugin); + return true; + %} + function get_expectation(): Expected %{ return expected_; @@ -512,6 +809,17 @@ refine connection MySQL_Conn += { return true; %} + function get_conn_expectation(): ConnectionExpected + %{ + return conn_expected_; + %} + + function set_next_conn_expected(c: ConnectionExpected): bool + %{ + conn_expected_ = c; + return true; + %} + function set_next_expected_from_command(cmd: uint8): bool %{ switch ( cmd ) { @@ -662,4 +970,21 @@ refine connection MySQL_Conn += { ++results_seen_; return true; %} + + function query_attr_idx(): int + %{ + return query_attr_idx_; + %} + + function new_query_attrs(): bool + %{ + query_attr_idx_ = 0; + return true; + %} + + function next_query_attr(): bool + %{ + query_attr_idx_++; + return true; + %} }; diff --git a/src/analyzer/protocol/smb/smb2-protocol.pac b/src/analyzer/protocol/smb/smb2-protocol.pac index b922c62f2b..5e7bf673b1 100644 --- a/src/analyzer/protocol/smb/smb2-protocol.pac +++ b/src/analyzer/protocol/smb/smb2-protocol.pac @@ -413,7 +413,7 @@ type SMB2_error_response(header: SMB2_Header) = record { byte_count : uint32; # This is implemented incorrectly and is disabled for now. #error_data : SMB2_error_data(header, byte_count); - stuff : bytestring &restofdata &transient; + stuff : bytestring &length=byte_count &transient; } &byteorder = littleendian; type SMB2_logoff_request(header: SMB2_Header) = record { diff --git a/src/broker/Manager.cc b/src/broker/Manager.cc index 015f53d10e..dec03afb8e 100644 --- a/src/broker/Manager.cc +++ b/src/broker/Manager.cc @@ -340,6 +340,43 @@ void Manager::InitPostScript() { bstate->subscriber.add_topic(broker::topic::store_events(), true); InitializeBrokerStoreForwarding(); + + num_peers_metric = + telemetry_mgr->GaugeInstance("zeek", "broker_peers", {}, "Current number of peers connected via broker", "", + []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = static_cast(broker_mgr->peer_count); + return metric; + }); + + num_stores_metric = + telemetry_mgr->GaugeInstance("zeek", "broker_stores", {}, "Current number of stores connected via broker", "", + []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = static_cast(broker_mgr->data_stores.size()); + return metric; + }); + + num_pending_queries_metric = + telemetry_mgr->GaugeInstance("zeek", "broker_pending_queries", {}, "Current number of pending broker queries", + "", []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = static_cast(broker_mgr->pending_queries.size()); + return metric; + }); + + num_events_incoming_metric = telemetry_mgr->CounterInstance("zeek", "broker_incoming_events", {}, + "Total number of incoming events via broker"); + num_events_outgoing_metric = telemetry_mgr->CounterInstance("zeek", "broker_outgoing_events", {}, + "Total number of outgoing events via broker"); + num_logs_incoming_metric = + telemetry_mgr->CounterInstance("zeek", "broker_incoming_logs", {}, "Total number of incoming logs via broker"); + num_logs_outgoing_metric = + telemetry_mgr->CounterInstance("zeek", "broker_outgoing_logs", {}, "Total number of outgoing logs via broker"); + num_ids_incoming_metric = + telemetry_mgr->CounterInstance("zeek", "broker_incoming_ids", {}, "Total number of incoming ids via broker"); + num_ids_outgoing_metric = + telemetry_mgr->CounterInstance("zeek", "broker_outgoing_ids", {}, "Total number of outgoing ids via broker"); } void Manager::InitializeBrokerStoreForwarding() { @@ -528,7 +565,7 @@ bool Manager::PublishEvent(string topic, std::string name, broker::vector args, DBG_LOG(DBG_BROKER, "Publishing event: %s", RenderEvent(topic, name, args).c_str()); broker::zeek::Event ev(std::move(name), std::move(args), broker::to_timestamp(ts)); bstate->endpoint.publish(std::move(topic), ev.move_data()); - ++statistics.num_events_outgoing; + num_events_outgoing_metric->Inc(); return true; } @@ -588,7 +625,7 @@ bool Manager::PublishIdentifier(std::string topic, std::string id) { broker::zeek::IdentifierUpdate msg(std::move(id), std::move(data.value_)); DBG_LOG(DBG_BROKER, "Publishing id-update: %s", RenderMessage(topic, msg.as_data()).c_str()); bstate->endpoint.publish(std::move(topic), msg.move_data()); - ++statistics.num_ids_outgoing; + num_ids_outgoing_metric->Inc(); return true; } @@ -715,8 +752,10 @@ bool Manager::PublishLogWrite(EnumVal* stream, EnumVal* writer, string path, int ++lb.message_count; lb.msgs[topic].add(std::move(msg)); - if ( lb.message_count >= log_batch_size ) - statistics.num_logs_outgoing += lb.Flush(bstate->endpoint, log_batch_size); + if ( lb.message_count >= log_batch_size ) { + auto outgoing_logs = static_cast(lb.Flush(bstate->endpoint, log_batch_size)); + num_logs_outgoing_metric->Inc(outgoing_logs); + } return true; } @@ -746,7 +785,8 @@ size_t Manager::FlushLogBuffers() { for ( auto& lb : log_buffers ) rval += lb.Flush(bstate->endpoint, log_batch_size); - statistics.num_logs_outgoing += rval; + num_logs_outgoing_metric->Inc(rval); + return rval; } @@ -1141,7 +1181,7 @@ void Manager::ProcessMessage(std::string_view topic, broker::zeek::Event& ev) { ts = run_state::network_time; DBG_LOG(DBG_BROKER, "Process event: %s (%.6f) %s", c_str_safe(name).c_str(), ts, RenderMessage(args).c_str()); - ++statistics.num_events_incoming; + num_events_incoming_metric->Inc(); auto handler = event_registry->Lookup(name); if ( ! handler ) @@ -1286,7 +1326,7 @@ bool Manager::ProcessMessage(std::string_view, broker::zeek::LogWrite& lw) { return false; } - ++statistics.num_logs_incoming; + num_logs_incoming_metric->Inc(); auto&& stream_id_name = lw.stream_id().name; // Get stream ID. @@ -1352,7 +1392,7 @@ bool Manager::ProcessMessage(std::string_view, broker::zeek::IdentifierUpdate& i return false; } - ++statistics.num_ids_incoming; + num_ids_incoming_metric->Inc(); auto id_name = c_str_safe(iu.id_name()); auto id_value = convert_if_broker_variant_or_move(iu.id_value()); const auto& id = zeek::detail::global_scope()->Find(id_name); @@ -1706,7 +1746,12 @@ const Stats& Manager::GetStatistics() { statistics.num_stores = data_stores.size(); statistics.num_pending_queries = pending_queries.size(); - // The other attributes are set as activity happens. + statistics.num_events_incoming = static_cast(num_events_incoming_metric->Value()); + statistics.num_events_outgoing = static_cast(num_events_outgoing_metric->Value()); + statistics.num_logs_incoming = static_cast(num_logs_incoming_metric->Value()); + statistics.num_logs_outgoing = static_cast(num_logs_outgoing_metric->Value()); + statistics.num_ids_incoming = static_cast(num_ids_incoming_metric->Value()); + statistics.num_ids_outgoing = static_cast(num_ids_outgoing_metric->Value()); return statistics; } diff --git a/src/broker/Manager.h b/src/broker/Manager.h index d373d8883e..43bd00fbc9 100644 --- a/src/broker/Manager.h +++ b/src/broker/Manager.h @@ -27,8 +27,11 @@ using VectorTypePtr = IntrusivePtr; using TableValPtr = IntrusivePtr; namespace telemetry { -class Manager; -} +class Gauge; +class Counter; +using GaugePtr = std::shared_ptr; +using CounterPtr = std::shared_ptr; +} // namespace telemetry namespace detail { class Frame; @@ -451,6 +454,16 @@ private: std::string zeek_table_db_directory; static int script_scope; + + telemetry::GaugePtr num_peers_metric; + telemetry::GaugePtr num_stores_metric; + telemetry::GaugePtr num_pending_queries_metric; + telemetry::CounterPtr num_events_incoming_metric; + telemetry::CounterPtr num_events_outgoing_metric; + telemetry::CounterPtr num_logs_incoming_metric; + telemetry::CounterPtr num_logs_outgoing_metric; + telemetry::CounterPtr num_ids_incoming_metric; + telemetry::CounterPtr num_ids_outgoing_metric; }; } // namespace Broker diff --git a/src/file_analysis/analyzer/x509/X509.cc b/src/file_analysis/analyzer/x509/X509.cc index c32a00f2ba..aefdf2738c 100644 --- a/src/file_analysis/analyzer/x509/X509.cc +++ b/src/file_analysis/analyzer/x509/X509.cc @@ -161,8 +161,9 @@ RecordValPtr X509::ParseCertificate(X509Val* cert_val, file_analysis::File* f) { #if ( OPENSSL_VERSION_NUMBER < 0x10100000L ) i2a_ASN1_OBJECT(bio, ssl_cert->sig_alg->algorithm); #else - const X509_ALGOR* sigalg = X509_get0_tbs_sigalg(ssl_cert); - i2a_ASN1_OBJECT(bio, sigalg->algorithm); + const ASN1_OBJECT* alg; + X509_ALGOR_get0(&alg, NULL, NULL, X509_get0_tbs_sigalg(ssl_cert)); + i2a_ASN1_OBJECT(bio, alg); #endif len = BIO_gets(bio, buf, sizeof(buf)); pX509Cert->Assign(13, make_intrusive(len, buf)); diff --git a/src/fuzzers/CMakeLists.txt b/src/fuzzers/CMakeLists.txt index 41a46dc64a..5a4b13f888 100644 --- a/src/fuzzers/CMakeLists.txt +++ b/src/fuzzers/CMakeLists.txt @@ -107,6 +107,8 @@ add_generic_analyzer_fuzz_target(dhcp udp) add_generic_analyzer_fuzz_target(dnp3_tcp) add_generic_analyzer_fuzz_target(dtls udp) add_generic_analyzer_fuzz_target(irc) +add_generic_analyzer_fuzz_target(ldap_udp udp) +add_generic_analyzer_fuzz_target(ldap_tcp tcp) add_generic_analyzer_fuzz_target(modbus) add_generic_analyzer_fuzz_target(mqtt) add_generic_analyzer_fuzz_target(mysql) diff --git a/src/fuzzers/corpora/ldap_tcp-corpus.zip b/src/fuzzers/corpora/ldap_tcp-corpus.zip new file mode 100644 index 0000000000..e95f0c4b53 Binary files /dev/null and b/src/fuzzers/corpora/ldap_tcp-corpus.zip differ diff --git a/src/fuzzers/corpora/ldap_udp-corpus.zip b/src/fuzzers/corpora/ldap_udp-corpus.zip new file mode 100644 index 0000000000..e95f0c4b53 Binary files /dev/null and b/src/fuzzers/corpora/ldap_udp-corpus.zip differ diff --git a/src/probabilistic/BloomFilter.cc b/src/probabilistic/BloomFilter.cc index e3a970c3c6..9d95cf5a8d 100644 --- a/src/probabilistic/BloomFilter.cc +++ b/src/probabilistic/BloomFilter.cc @@ -18,12 +18,6 @@ BloomFilter::BloomFilter(const detail::Hasher* arg_hasher) { hasher = arg_hasher BloomFilter::~BloomFilter() { delete hasher; } -broker::expected BloomFilter::Serialize() const { - if ( auto res = SerializeData() ) - return zeek::detail::BrokerDataAccess::Unbox(*res); - return {broker::make_error(broker::ec::serialization_failed)}; -} - std::optional BloomFilter::SerializeData() const { auto h = hasher->Serialize(); @@ -43,10 +37,6 @@ std::optional BloomFilter::SerializeData() const { return std::move(builder).Build(); } -std::unique_ptr BloomFilter::Unserialize(const broker::data& data) { - return UnserializeData(BrokerDataView{&data}); -} - std::unique_ptr BloomFilter::UnserializeData(BrokerDataView data) { if ( ! data.IsList() ) return nullptr; diff --git a/src/probabilistic/BloomFilter.h b/src/probabilistic/BloomFilter.h index b3cfb34eb6..f79b81a60e 100644 --- a/src/probabilistic/BloomFilter.h +++ b/src/probabilistic/BloomFilter.h @@ -105,9 +105,6 @@ public: */ virtual std::string InternalState() const = 0; - [[deprecated("Remove in v7.1: use SerializeData instead")]] broker::expected Serialize() const; - [[deprecated("Remove in v7.1: use UnserializeData instead")]] static std::unique_ptr Unserialize( - const broker::data& data); std::optional SerializeData() const; static std::unique_ptr UnserializeData(BrokerDataView data); diff --git a/src/rule-parse.y b/src/rule-parse.y index 33fa8dd0c5..e1294d6af9 100644 --- a/src/rule-parse.y +++ b/src/rule-parse.y @@ -196,22 +196,7 @@ rule_attr: if ( is_event($2) ) current_rule->AddAction(new zeek::detail::RuleActionEvent(nullptr, $2)); else - { - const char *msg = id_to_str($2); - if ( ! zeek::util::streq(msg, "") ) - zeek::reporter->Deprecation(zeek::util::fmt("Remove in v7.1: Using an identifier for msg is deprecated (%s:%d)", - current_rule_file, rules_line_number+1)); - current_rule->AddAction(new zeek::detail::RuleActionEvent(msg)); - } - } - - | TOK_EVENT TOK_IDENT TOK_IDENT - { - // Maybe remove in v7.1: Once we do not support msg as identifier, - // this extra messaging isn't all that useful anymore, but it - // beats a syntax error. - rules_error("custom event and identifier for msg unsupported"); - zeek::detail::rule_matcher->SetParseError(); + rules_error("identifier is not an event", $2); } | TOK_EVENT TOK_IDENT TOK_STRING diff --git a/src/script_opt/CPP/RuntimeInitSupport.cc b/src/script_opt/CPP/RuntimeInitSupport.cc index f3ab29295a..3674d7605d 100644 --- a/src/script_opt/CPP/RuntimeInitSupport.cc +++ b/src/script_opt/CPP/RuntimeInitSupport.cc @@ -131,13 +131,8 @@ void activate_bodies__CPP(const char* fn, const char* module, bool exported, Typ events.insert(cs.events.begin(), cs.events.end()); } - for ( const auto& e : events ) { - auto eh = event_registry->Register(e); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - eh->SetUsed(); -#pragma GCC diagnostic pop - } + for ( const auto& e : events ) + event_registry->Register(e); } IDPtr lookup_global__CPP(const char* g, const TypePtr& t, bool exported) { @@ -191,13 +186,8 @@ FuncValPtr lookup_func__CPP(string name, int num_bodies, vector has // This might register the same event more than once, // if it's used in multiple bodies, but that's okay as // the semantics for Register explicitly allow it. - for ( auto& e : f.events ) { - auto eh = event_registry->Register(e); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - eh->SetUsed(); -#pragma GCC diagnostic pop - } + for ( auto& e : f.events ) + event_registry->Register(e); } auto sf = make_intrusive(std::move(name), std::move(ft), std::move(bodies), std::move(priorities)); diff --git a/src/script_opt/Expr.cc b/src/script_opt/Expr.cc index 1ad24c5277..353604f1a7 100644 --- a/src/script_opt/Expr.cc +++ b/src/script_opt/Expr.cc @@ -1238,10 +1238,28 @@ ExprPtr EqExpr::Reduce(Reducer* c, StmtPtr& red_stmt) { if ( IsHasElementsTest() ) return BuildHasElementsTest()->Reduce(c, red_stmt); - if ( GetType()->Tag() == TYPE_BOOL && same_singletons(op1, op2) ) { - bool t = Tag() == EXPR_EQ; - auto res = with_location_of(make_intrusive(val_mgr->Bool(t)), this); - return res->Reduce(c, red_stmt); + if ( GetType()->Tag() == TYPE_BOOL ) { + if ( same_singletons(op1, op2) ) { + bool t = Tag() == EXPR_EQ; + auto res = with_location_of(make_intrusive(val_mgr->Bool(t)), this); + return res->Reduce(c, red_stmt); + } + + if ( op1->GetType()->Tag() == TYPE_BOOL ) { + if ( op1->Tag() == EXPR_CONST ) + std::swap(op1, op2); + + if ( op2->Tag() == EXPR_CONST ) { + bool t = Tag() == EXPR_EQ; + if ( op2->AsConstExpr()->Value()->IsZero() ) + t = ! t; + if ( t ) + return op1->Reduce(c, red_stmt); + + auto res = with_location_of(make_intrusive(op1), this); + return res->Reduce(c, red_stmt); + } + } } return BinaryExpr::Reduce(c, red_stmt); diff --git a/src/script_opt/ScriptOpt.cc b/src/script_opt/ScriptOpt.cc index c41e6cf429..d09e9dd25b 100644 --- a/src/script_opt/ScriptOpt.cc +++ b/src/script_opt/ScriptOpt.cc @@ -417,13 +417,8 @@ static void use_CPP() { f.SetBody(b); } - for ( auto& e : s->second.events ) { - auto h = event_registry->Register(e); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - h->SetUsed(); -#pragma GCC diagnostic pop - } + for ( auto& e : s->second.events ) + event_registry->Register(e); auto finish = s->second.finish_init_func; if ( finish ) diff --git a/src/script_opt/Stmt.cc b/src/script_opt/Stmt.cc index 618d52ebda..8ef72c60b4 100644 --- a/src/script_opt/Stmt.cc +++ b/src/script_opt/Stmt.cc @@ -245,6 +245,12 @@ StmtPtr IfStmt::DoReduce(Reducer* c) { red_e_stmt = cond_red_stmt; } + // Check again for negation given above reductions/replacements. + if ( e->Tag() == EXPR_NOT ) { + std::swap(s1, s2); + e = e->GetOp1(); + } + StmtPtr sl; if ( e->IsConst() ) { diff --git a/src/session/Manager.cc b/src/session/Manager.cc index 8f2fbc6881..d33182a705 100644 --- a/src/session/Manager.cc +++ b/src/session/Manager.cc @@ -46,9 +46,9 @@ public: ProtocolMap::iterator InitCounters(const std::string& protocol) { auto active_family = - telemetry_mgr->GaugeFamily("zeek", "active-sessions", {"protocol"}, "Active Zeek Sessions"); + telemetry_mgr->GaugeFamily("zeek", "active_sessions", {"protocol"}, "Active Zeek Sessions"); auto total_family = - telemetry_mgr->CounterFamily("zeek", "total-sessions", {"protocol"}, "Total number of sessions"); + telemetry_mgr->CounterFamily("zeek", "total_sessions", {"protocol"}, "Total number of sessions"); auto [it, inserted] = entries.insert({protocol, Protocol{active_family, total_family, protocol}}); @@ -75,7 +75,17 @@ private: } // namespace detail -Manager::Manager() { stats = new detail::ProtocolStats(); } +Manager::Manager() { + stats = new detail::ProtocolStats(); + ended_sessions_metric_family = telemetry_mgr->CounterFamily("zeek", "ended_sessions", {"reason"}, + "Number of sessions ended for specific reasons"); + ended_by_inactivity_metric = + ended_sessions_metric_family->GetOrAdd({{"reason", "inactivity"}}, []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.counter.value = static_cast(zeek::detail::killed_by_inactivity); + return metric; + }); +} Manager::~Manager() { Clear(); diff --git a/src/session/Manager.h b/src/session/Manager.h index 6bbb128d95..eb02e87498 100644 --- a/src/session/Manager.h +++ b/src/session/Manager.h @@ -13,6 +13,13 @@ namespace zeek { +namespace telemetry { +class CounterFamily; +using CounterFamilyPtr = std::shared_ptr; +class Counter; +using CounterPtr = std::shared_ptr; +} // namespace telemetry + namespace detail { class PacketFilter; } @@ -82,7 +89,7 @@ public: void Weird(const char* name, const Packet* pkt, const char* addl = "", const char* source = ""); void Weird(const char* name, const IP_Hdr* ip, const char* addl = ""); - unsigned int CurrentSessions() { return session_map.size(); } + size_t CurrentSessions() { return session_map.size(); } private: using SessionMap = std::unordered_map; @@ -96,6 +103,8 @@ private: SessionMap session_map; detail::ProtocolStats* stats; + telemetry::CounterFamilyPtr ended_sessions_metric_family; + telemetry::CounterPtr ended_by_inactivity_metric; }; } // namespace session diff --git a/src/spicy/manager.cc b/src/spicy/manager.cc index 1a9420e22a..7919380111 100644 --- a/src/spicy/manager.cc +++ b/src/spicy/manager.cc @@ -61,7 +61,6 @@ void Manager::registerSpicyModuleEnd() { } void Manager::registerProtocolAnalyzer(const std::string& name, hilti::rt::Protocol proto, - const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports, const std::string& parser_orig, const std::string& parser_resp, const std::string& replaces, const std::string& linker_scope) { SPICY_DEBUG(hilti::rt::fmt("Have Spicy protocol analyzer %s", name)); @@ -74,7 +73,6 @@ void Manager::registerProtocolAnalyzer(const std::string& name, hilti::rt::Proto info.name_zeek = hilti::rt::replace(name, "::", "_"); info.name_zeekygen = hilti::rt::fmt("", name); info.protocol = proto; - info.ports = ports; info.linker_scope = linker_scope; // We may have that analyzer already iff it was previously pre-registered @@ -701,25 +699,6 @@ void Manager::InitPostScript() { if ( ! tag ) reporter->InternalError("cannot get analyzer tag for '%s'", p.name_analyzer.c_str()); - for ( const auto& ports : p.ports ) { - const auto proto = ports.begin.protocol(); - - // Port ranges are closed intervals. - for ( auto port = ports.begin.port(); port <= ports.end.port(); ++port ) { - const auto port_ = hilti::rt::Port(port, proto); - SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port_)); - analyzer_mgr->RegisterAnalyzerForPort(tag, transport_protocol(port_), port); - - // Don't double register in case of single-port ranges. - if ( ports.begin.port() == ports.end.port() ) - break; - - // Explicitly prevent overflow. - if ( port == std::numeric_limits::max() ) - break; - } - } - if ( p.parser_resp ) { for ( auto port : p.parser_resp->ports ) { if ( port.direction != ::spicy::rt::Direction::Both && diff --git a/src/spicy/manager.h b/src/spicy/manager.h index 118e03b6c3..195ae3adf1 100644 --- a/src/spicy/manager.h +++ b/src/spicy/manager.h @@ -85,7 +85,6 @@ public: * * @param name name of the analyzer as defined in its EVT file * @param proto analyzer's transport-layer protocol - * @param prts well-known ports for the analyzer; it'll be activated automatically for these * @param parser_orig name of the Spicy parser for the originator side; must match the name that * Spicy registers the unit's parser with * @param parser_resp name of the Spicy parser for the originator side; must match the name that @@ -95,10 +94,9 @@ public: * @param linker_scope scope of current HLTO file, which will restrict visibility of the * registration */ - void registerProtocolAnalyzer(const std::string& name, hilti::rt::Protocol proto, - const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports, - const std::string& parser_orig, const std::string& parser_resp, - const std::string& replaces, const std::string& linker_scope); + void registerProtocolAnalyzer(const std::string& name, hilti::rt::Protocol proto, const std::string& parser_orig, + const std::string& parser_resp, const std::string& replaces, + const std::string& linker_scope); /** * Runtime method to register a file analyzer with its Zeek-side @@ -343,7 +341,6 @@ private: std::string name_parser_resp; std::string name_replaces; hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef; - hilti::rt::Vector<::zeek::spicy::rt::PortRange> ports; std::string linker_scope; // Computed and available once the analyzer has been registered. @@ -357,7 +354,7 @@ private: bool operator==(const ProtocolAnalyzerInfo& other) const { return name_analyzer == other.name_analyzer && name_parser_orig == other.name_parser_orig && name_parser_resp == other.name_parser_resp && name_replaces == other.name_replaces && - protocol == other.protocol && ports == other.ports && linker_scope == other.linker_scope; + protocol == other.protocol && linker_scope == other.linker_scope; } bool operator!=(const ProtocolAnalyzerInfo& other) const { return ! (*this == other); } diff --git a/src/spicy/runtime-support.cc b/src/spicy/runtime-support.cc index f5afd37461..8dbf0c39a7 100644 --- a/src/spicy/runtime-support.cc +++ b/src/spicy/runtime-support.cc @@ -26,12 +26,11 @@ void rt::register_spicy_module_begin(const std::string& name, const std::string& void rt::register_spicy_module_end() { spicy_mgr->registerSpicyModuleEnd(); } -void rt::register_protocol_analyzer(const std::string& name, hilti::rt::Protocol proto, - const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports, - const std::string& parser_orig, const std::string& parser_resp, - const std::string& replaces, const std::string& linker_scope) { +void rt::register_protocol_analyzer(const std::string& name, hilti::rt::Protocol proto, const std::string& parser_orig, + const std::string& parser_resp, const std::string& replaces, + const std::string& linker_scope) { auto _ = hilti::rt::profiler::start("zeek/rt/register_protocol_analyzer"); - spicy_mgr->registerProtocolAnalyzer(name, proto, ports, parser_orig, parser_resp, replaces, linker_scope); + spicy_mgr->registerProtocolAnalyzer(name, proto, parser_orig, parser_resp, replaces, linker_scope); } void rt::register_file_analyzer(const std::string& name, const hilti::rt::Vector& mime_types, diff --git a/src/spicy/runtime-support.h b/src/spicy/runtime-support.h index 9ffef2d9d0..c0bf9f4631 100644 --- a/src/spicy/runtime-support.h +++ b/src/spicy/runtime-support.h @@ -80,7 +80,7 @@ public: : ParameterMismatch(_fmt(have, want)) {} private: - std::string _fmt(const std::string_view& have, const TypePtr& want) { + static std::string _fmt(const std::string_view& have, const TypePtr& want) { ODesc d; want->Describe(&d); return hilti::rt::fmt("cannot convert Spicy value of type '%s' to Zeek value of type '%s'", have, @@ -106,10 +106,9 @@ void register_spicy_module_begin(const std::string& id, const std::string& descr * Registers a Spicy protocol analyzer with its EVT meta information with the * plugin's runtime. */ -void register_protocol_analyzer(const std::string& id, hilti::rt::Protocol proto, - const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports, - const std::string& parser_orig, const std::string& parser_resp, - const std::string& replaces, const std::string& linker_scope); +void register_protocol_analyzer(const std::string& id, hilti::rt::Protocol proto, const std::string& parser_orig, + const std::string& parser_resp, const std::string& replaces, + const std::string& linker_scope); /** * Registers a Spicy file analyzer with its EVT meta information with the diff --git a/src/spicy/spicyz/glue-compiler.cc b/src/spicy/spicyz/glue-compiler.cc index e9240ed245..c34d7e1f6b 100644 --- a/src/spicy/spicyz/glue-compiler.cc +++ b/src/spicy/spicyz/glue-compiler.cc @@ -260,79 +260,6 @@ static std::string extract_expr(const std::string& chunk, size_t* i) { return expr; } -static hilti::rt::Port extract_port(const std::string& chunk, size_t* i) { - eat_spaces(chunk, i); - - std::string s; - size_t j = *i; - - while ( j < chunk.size() && isdigit(chunk[j]) ) - ++j; - - if ( *i == j ) - throw ParseError("cannot parse port specification"); - - hilti::rt::Protocol proto; - uint64_t port = std::numeric_limits::max(); - - s = chunk.substr(*i, j - *i); - hilti::util::atoi_n(s.begin(), s.end(), 10, &port); - - if ( port > 65535 ) - throw ParseError("port outside of valid range"); - - *i = j; - - if ( chunk[*i] != '/' ) - throw ParseError("cannot parse port specification"); - - (*i)++; - - if ( looking_at(chunk, *i, "tcp") ) { - proto = hilti::rt::Protocol::TCP; - eat_token(chunk, i, "tcp"); - } - - else if ( looking_at(chunk, *i, "udp") ) { - proto = hilti::rt::Protocol::UDP; - eat_token(chunk, i, "udp"); - } - - else if ( looking_at(chunk, *i, "icmp") ) { - proto = hilti::rt::Protocol::ICMP; - eat_token(chunk, i, "icmp"); - } - - else - throw ParseError("cannot parse port specification"); - - return {static_cast(port), proto}; -} - -static ::zeek::spicy::rt::PortRange extract_port_range(const std::string& chunk, size_t* i) { - auto start = extract_port(chunk, i); - auto end = std::optional(); - - if ( looking_at(chunk, *i, "-") ) { - eat_token(chunk, i, "-"); - end = extract_port(chunk, i); - } - - if ( end ) { - if ( start.protocol() != end->protocol() ) - throw ParseError("start and end of port range must have same protocol"); - - if ( start.port() > end->port() ) - throw ParseError("start of port range cannot be after its end"); - } - - if ( ! end ) - // EVT port ranges are a closed. - end = hilti::rt::Port(start.port(), start.protocol()); - - return {start, *end}; -} - void GlueCompiler::init(Driver* driver, int zeek_version) { _driver = driver; _zeek_version = zeek_version; @@ -704,25 +631,11 @@ glue::ProtocolAnalyzer GlueCompiler::parseProtocolAnalyzer(const std::string& ch } } - else if ( looking_at(chunk, i, "ports") ) { - eat_token(chunk, &i, "ports"); - eat_token(chunk, &i, "{"); - - while ( true ) { - a.ports.push_back(extract_port_range(chunk, &i)); - - if ( looking_at(chunk, i, "}") ) { - eat_token(chunk, &i, "}"); - break; - } - - eat_token(chunk, &i, ","); - } - } - - else if ( looking_at(chunk, i, "port") ) { - eat_token(chunk, &i, "port"); - a.ports.push_back(extract_port_range(chunk, &i)); + else if ( looking_at(chunk, i, "ports") || looking_at(chunk, i, "port") ) { + throw ParseError(hilti::rt::fmt( + "Analyzer %s is using the removed 'port' or 'ports' keyword to register " + "well-known ports. Use Analyzer::register_for_ports() in the accompanying Zeek script instead.", + a.name)); } else if ( looking_at(chunk, i, "replaces") ) { @@ -739,14 +652,6 @@ glue::ProtocolAnalyzer GlueCompiler::parseProtocolAnalyzer(const std::string& ch eat_token(chunk, &i, ","); } - if ( ! a.ports.empty() ) - hilti::logger().warning( - hilti::rt:: - fmt("Remove in v7.1: Analyzer %s is using the deprecated 'port' or 'ports' keyword to register " - "well-known ports. Use Analyzer::register_for_ports() in the accompanying Zeek script instead.", - a.name), - a.location); - return a; } @@ -1034,13 +939,6 @@ bool GlueCompiler::compile() { preinit_body.addCall("zeek_rt::register_protocol_analyzer", {builder()->stringMutable(a.name.str()), builder()->id(protocol), - builder()->vector( - hilti::util::transform(a.ports, - [this](const auto& p) -> hilti::Expression* { - return builder()->call("zeek_rt::make_port_range", - {builder()->port(p.begin), - builder()->port(p.end)}); - })), builder()->stringMutable(a.unit_name_orig.str()), builder()->stringMutable(a.unit_name_resp.str()), builder()->stringMutable(a.replaces), builder()->scope()}); diff --git a/src/spicy/spicyz/glue-compiler.h b/src/spicy/spicyz/glue-compiler.h index 58e42909f3..22ffcdc332 100644 --- a/src/spicy/spicyz/glue-compiler.h +++ b/src/spicy/spicyz/glue-compiler.h @@ -45,7 +45,6 @@ struct ProtocolAnalyzer { hilti::Location location; /**< Location where the analyzer was defined. */ hilti::ID name; /**< Name of the analyzer. */ hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef; /**< The transport layer the analyzer uses. */ - std::vector<::zeek::spicy::rt::PortRange> ports; /**< The ports associated with the analyzer. */ hilti::ID unit_name_orig; /**< The fully-qualified name of the unit type to parse the originator side. */ hilti::ID unit_name_resp; /**< The fully-qualified name of the unit type to parse the originator diff --git a/src/stats.bif b/src/stats.bif index 50dcc5685f..5d410bc9bf 100644 --- a/src/stats.bif +++ b/src/stats.bif @@ -83,7 +83,7 @@ function get_conn_stats%(%): ConnStats r->Assign(n++, Connection::TotalConnections()); r->Assign(n++, Connection::CurrentConnections()); - r->Assign(n++, session_mgr->CurrentSessions()); + r->Assign(n++, static_cast(session_mgr->CurrentSessions())); session::Stats s; if ( session_mgr ) @@ -252,10 +252,10 @@ function get_dns_stats%(%): DNSStats r->Assign(n++, static_cast(dstats.successful)); r->Assign(n++, static_cast(dstats.failed)); r->Assign(n++, static_cast(dstats.pending)); - r->Assign(n++, static_cast(dstats.cached_hosts)); - r->Assign(n++, static_cast(dstats.cached_addresses)); - r->Assign(n++, static_cast(dstats.cached_texts)); - r->Assign(n++, static_cast(dstats.cached_total)); + r->Assign(n++, static_cast(dstats.cached.hosts)); + r->Assign(n++, static_cast(dstats.cached.addresses)); + r->Assign(n++, static_cast(dstats.cached.texts)); + r->Assign(n++, static_cast(dstats.cached.total)); return std::move(r); %} @@ -337,7 +337,7 @@ function get_thread_stats%(%): ThreadStats auto r = zeek::make_intrusive(ThreadStats); int n = 0; - r->Assign(n++, zeek::thread_mgr->NumThreads()); + r->Assign(n++, static_cast(zeek::thread_mgr->NumThreads())); return std::move(r); %} diff --git a/src/telemetry/CMakeLists.txt b/src/telemetry/CMakeLists.txt index d61cbdb671..a760dcd13c 100644 --- a/src/telemetry/CMakeLists.txt +++ b/src/telemetry/CMakeLists.txt @@ -9,6 +9,7 @@ zeek_add_subdir_library( ProcessStats.cc Utils.cc BIFS + consts.bif telemetry.bif) # We don't need to include the civetweb headers across the whole project, only diff --git a/src/telemetry/Counter.cc b/src/telemetry/Counter.cc index 8b34624254..ef3679329e 100644 --- a/src/telemetry/Counter.cc +++ b/src/telemetry/Counter.cc @@ -3,7 +3,7 @@ using namespace zeek::telemetry; Counter::Counter(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept - : handle(family->Add(labels)), labels(labels) { + : family(family), handle(family->Add(labels)), labels(labels) { if ( callback ) { handle.AddCollectCallback(std::move(callback)); has_callback = true; diff --git a/src/telemetry/Counter.h b/src/telemetry/Counter.h index f6c49315b7..a0186ad219 100644 --- a/src/telemetry/Counter.h +++ b/src/telemetry/Counter.h @@ -56,6 +56,7 @@ public: bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; } private: + FamilyType* family = nullptr; Handle& handle; prometheus::Labels labels; bool has_callback = false; diff --git a/src/telemetry/Gauge.cc b/src/telemetry/Gauge.cc index 273c9a57bf..114ada3811 100644 --- a/src/telemetry/Gauge.cc +++ b/src/telemetry/Gauge.cc @@ -15,7 +15,7 @@ double Gauge::Value() const noexcept { Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept - : handle(family->Add(labels)), labels(labels) { + : family(family), handle(family->Add(labels)), labels(labels) { if ( callback ) { handle.AddCollectCallback(std::move(callback)); has_callback = true; diff --git a/src/telemetry/Gauge.h b/src/telemetry/Gauge.h index 900cb7b784..652ff72667 100644 --- a/src/telemetry/Gauge.h +++ b/src/telemetry/Gauge.h @@ -74,6 +74,7 @@ public: bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; } private: + FamilyType* family = nullptr; Handle& handle; prometheus::Labels labels; bool has_callback = false; diff --git a/src/telemetry/Manager.cc b/src/telemetry/Manager.cc index 04c47ba3ef..7b19affc82 100644 --- a/src/telemetry/Manager.cc +++ b/src/telemetry/Manager.cc @@ -6,6 +6,7 @@ // CivetServer is from the civetweb submodule in prometheus-cpp #include +#include #include #include #include @@ -16,19 +17,32 @@ #include "zeek/3rdparty/doctest.h" #include "zeek/ID.h" +#include "zeek/RunState.h" #include "zeek/ZeekString.h" #include "zeek/broker/Manager.h" +#include "zeek/iosource/Manager.h" #include "zeek/telemetry/ProcessStats.h" #include "zeek/telemetry/Timer.h" +#include "zeek/telemetry/consts.bif.h" #include "zeek/telemetry/telemetry.bif.h" #include "zeek/threading/formatters/detail/json.h" namespace zeek::telemetry { -Manager::Manager() { prometheus_registry = std::make_shared(); } +/** + * Prometheus Collectable interface used to insert Zeek callback processing + * before the Prometheus registry's collection of metric data. + */ +class ZeekCollectable : public prometheus::Collectable { +public: + std::vector Collect() const override { + telemetry_mgr->WaitForPrometheusCallbacks(); + return {}; + } +}; + +Manager::Manager() : IOSource(true) { prometheus_registry = std::make_shared(); } -// This can't be defined as =default because of the use of unique_ptr with a forward-declared type -// in Manager.h Manager::~Manager() {} void Manager::InitPostScript() { @@ -75,7 +89,9 @@ void Manager::InitPostScript() { if ( ! getenv("ZEEKCTL_CHECK_CONFIG") ) { try { - prometheus_exposer = std::make_unique(prometheus_url, 2, callbacks); + prometheus_exposer = + std::make_unique(prometheus_url, BifConst::Telemetry::civetweb_threads, + callbacks); // CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here delete callbacks; @@ -84,19 +100,26 @@ void Manager::InitPostScript() { prometheus_url.c_str()); } + // This has to be inserted before the registry below. The exposer + // processes the collectors in order of insertion. We want to make + // sure that the callbacks get called and the values in the metrics + // are updated before prometheus-cpp scrapes them. + zeek_collectable = std::make_shared(); + prometheus_exposer->RegisterCollectable(zeek_collectable); + prometheus_exposer->RegisterCollectable(prometheus_registry); } } #ifdef HAVE_PROCESS_STAT_METRICS - static auto get_stats = [this]() -> const detail::process_stats* { + static auto get_stats = []() -> const detail::process_stats* { double now = util::current_time(); - if ( this->process_stats_last_updated < now - 0.01 ) { - this->current_process_stats = detail::get_process_stats(); - this->process_stats_last_updated = now; + if ( telemetry_mgr->process_stats_last_updated < now - 0.01 ) { + telemetry_mgr->current_process_stats = detail::get_process_stats(); + telemetry_mgr->process_stats_last_updated = now; } - return &this->current_process_stats; + return &telemetry_mgr->current_process_stats; }; rss_gauge = GaugeInstance("process", "resident_memory", {}, "Resident memory size", "bytes", []() -> prometheus::ClientMetric { @@ -114,13 +137,21 @@ void Manager::InitPostScript() { return metric; }); - cpu_gauge = GaugeInstance("process", "cpu", {}, "Total user and system CPU time spent", "seconds", - []() -> prometheus::ClientMetric { - auto* s = get_stats(); - prometheus::ClientMetric metric; - metric.gauge.value = s->cpu; - return metric; - }); + cpu_user_counter = CounterInstance("process", "cpu_user", {}, "Total user CPU time spent", "seconds", + []() -> prometheus::ClientMetric { + auto* s = get_stats(); + prometheus::ClientMetric metric; + metric.gauge.value = s->cpu_user; + return metric; + }); + + cpu_system_counter = CounterInstance("process", "cpu_system", {}, "Total system CPU time spent", "seconds", + []() -> prometheus::ClientMetric { + auto* s = get_stats(); + prometheus::ClientMetric metric; + metric.gauge.value = s->cpu_system; + return metric; + }); fds_gauge = GaugeInstance("process", "open_fds", {}, "Number of open file descriptors", "", []() -> prometheus::ClientMetric { @@ -130,6 +161,23 @@ void Manager::InitPostScript() { return metric; }); #endif + + if ( ! iosource_mgr->RegisterFd(collector_flare.FD(), this) ) { + reporter->FatalError("Failed to register telemetry collector descriptor"); + } +} + +void Manager::Terminate() { + // Notify the collector condition so that it doesn't hang waiting for + // a collector request to complete. + collector_cv.notify_all(); + + // Shut down the exposer first of all so we stop getting requests for + // data. This keeps us from getting a request on another thread while + // we're shutting down. + prometheus_exposer.reset(); + + iosource_mgr->UnregisterFd(collector_flare.FD(), this); } // -- collect metric stuff ----------------------------------------------------- @@ -545,6 +593,39 @@ HistogramPtr Manager::HistogramInstance(std::string_view prefix, std::string_vie return HistogramInstance(prefix, name, lbls, bounds_span, helptext, unit); } +void Manager::ProcessFd(int fd, int flags) { + std::unique_lock lk(collector_cv_mtx); + + collector_flare.Extinguish(); + + prometheus_registry->UpdateViaCallbacks(); + collector_response_idx = collector_request_idx; + + lk.unlock(); + collector_cv.notify_all(); +} + +void Manager::WaitForPrometheusCallbacks() { + std::unique_lock lk(collector_cv_mtx); + + ++collector_request_idx; + uint64_t expected_idx = collector_request_idx; + collector_flare.Fire(); + + // It should *not* take 5 seconds to go through all of the callbacks, but + // set this to have a timeout anyways just to avoid a deadlock. + bool res = collector_cv.wait_for(lk, + std::chrono::microseconds( + static_cast(BifConst::Telemetry::callback_timeout * 1000000)), + [expected_idx]() { + return telemetry_mgr->collector_response_idx >= expected_idx || + zeek::run_state::terminating; + }); + + if ( ! res ) + fprintf(stderr, "Timeout waiting for prometheus callbacks\n"); +} + } // namespace zeek::telemetry // -- unit tests --------------------------------------------------------------- @@ -552,18 +633,6 @@ HistogramPtr Manager::HistogramInstance(std::string_view prefix, std::string_vie using namespace std::literals; using namespace zeek::telemetry; -namespace { - -template -auto toVector(zeek::Span xs) { - std::vector> result; - for ( auto&& x : xs ) - result.emplace_back(x); - return result; -} - -} // namespace - SCENARIO("telemetry managers provide access to counter families") { GIVEN("a telemetry manager") { Manager mgr; diff --git a/src/telemetry/Manager.h b/src/telemetry/Manager.h index c4c2537f1a..26647b7cf7 100644 --- a/src/telemetry/Manager.h +++ b/src/telemetry/Manager.h @@ -9,8 +9,10 @@ #include #include +#include "zeek/Flare.h" #include "zeek/IntrusivePtr.h" #include "zeek/Span.h" +#include "zeek/iosource/IOSource.h" #include "zeek/telemetry/Counter.h" #include "zeek/telemetry/Gauge.h" #include "zeek/telemetry/Histogram.h" @@ -29,15 +31,16 @@ class Registry; namespace zeek::telemetry { +class ZeekCollectable; + /** * Manages a collection of metric families. */ -class Manager final { +class Manager final : public iosource::IOSource { public: Manager(); Manager(const Manager&) = delete; - Manager& operator=(const Manager&) = delete; ~Manager(); @@ -50,6 +53,8 @@ public: */ void InitPostScript(); + void Terminate(); + /** * @return A VectorVal containing all counter and gauge metrics and their values matching prefix and name. * @param prefix The prefix pattern to use for filtering. Supports globbing. @@ -88,8 +93,8 @@ public: * @param labels Values for all label dimensions of the metric. * @param helptext Short explanation of the metric. * @param unit Unit of measurement. - * @param callback Passing a callback method will enable asynchronous mode. The callback method will be called by - * the metrics subsystem whenever data is requested. + * @param callback Passing a callback method will enable asynchronous mode. The callback method will be called + * by the metrics subsystem whenever data is requested. */ CounterPtr CounterInstance(std::string_view prefix, std::string_view name, Span labels, std::string_view helptext, std::string_view unit = "", @@ -124,8 +129,8 @@ public: * @param labels Values for all label dimensions of the metric. * @param helptext Short explanation of the metric. * @param unit Unit of measurement. - * @param callback Passing a callback method will enable asynchronous mode. The callback method will be called by - * the metrics subsystem whenever data is requested. + * @param callback Passing a callback method will enable asynchronous mode. The callback method will be called + * by the metrics subsystem whenever data is requested. */ GaugePtr GaugeInstance(std::string_view prefix, std::string_view name, Span labels, std::string_view helptext, std::string_view unit = "", @@ -212,6 +217,12 @@ public: */ std::shared_ptr GetRegistry() const { return prometheus_registry; } + // IOSource interface + double GetNextTimeout() override { return -1.0; } + void Process() override {} + const char* Tag() override { return "Telemetry::Manager"; } + void ProcessFd(int fd, int flags) override; + protected: template static auto WithLabelNames(Span xs, F continuation) { @@ -231,6 +242,15 @@ protected: } } + friend class ZeekCollectable; + + /** + * Fires the flare for prometheus-cpp callback handling and waits for it to complete. + * This can be called from other threads to ensure the callback handling happens on + * the main thread. + */ + void WaitForPrometheusCallbacks(); + private: RecordValPtr GetMetricOptsRecord(const prometheus::MetricFamily& metric_family); void BuildClusterJson(); @@ -243,13 +263,22 @@ private: GaugePtr rss_gauge; GaugePtr vms_gauge; - GaugePtr cpu_gauge; + CounterPtr cpu_user_counter; + CounterPtr cpu_system_counter; GaugePtr fds_gauge; std::shared_ptr prometheus_registry; std::unique_ptr prometheus_exposer; std::string cluster_json; + + std::shared_ptr zeek_collectable; + zeek::detail::Flare collector_flare; + std::condition_variable collector_cv; + std::mutex collector_cv_mtx; + // Only modified under collector_cv_mtx! + uint64_t collector_request_idx = 0; + uint64_t collector_response_idx = 0; }; } // namespace zeek::telemetry diff --git a/src/telemetry/ProcessStats.cc b/src/telemetry/ProcessStats.cc index f2a0447b63..476efd4487 100644 --- a/src/telemetry/ProcessStats.cc +++ b/src/telemetry/ProcessStats.cc @@ -34,10 +34,10 @@ process_stats get_process_stats() { if ( task_info(mach_task_self(), TASK_THREAD_TIMES_INFO, reinterpret_cast(&info), &count) == KERN_SUCCESS ) { // Round to milliseconds. - result.cpu += info.user_time.seconds; - result.cpu += ceil(info.user_time.microseconds / 1000.0) / 1000.0; - result.cpu += info.system_time.seconds; - result.cpu += ceil(info.system_time.microseconds / 1000.0) / 1000.0; + result.cpu_user += info.user_time.seconds; + result.cpu_user += ceil(info.user_time.microseconds / 1000.0) / 1000.0; + result.cpu_system += info.system_time.seconds; + result.cpu_system += ceil(info.system_time.microseconds / 1000.0) / 1000.0; } } // Fetch open file handles. @@ -154,7 +154,8 @@ process_stats get_process_stats() { result.rss = rss_pages * page_size; result.vms = vmsize_bytes; - result.cpu = static_cast(utime_ticks + stime_ticks) / ticks_per_second; + result.cpu_user = static_cast(utime_ticks) / ticks_per_second; + result.cpu_system = static_cast(stime_ticks) / ticks_per_second; result.fds = count_entries_in_directory("/proc/self/fd"); } @@ -187,7 +188,10 @@ process_stats get_process_stats() { if ( kp ) { result.vms = kp->ki_size; result.rss = kp->ki_rssize * getpagesize(); - result.cpu = static_cast(kp->ki_runtime) / 1000000.0; + result.cpu_user = static_cast(kp->ki_rusage.ru_utime.tv_sec) + + (static_cast(kp->ki_rusage.ru_utime.tv_usec) / 1e6); + result.cpu_system = static_cast(kp->ki_rusage.ru_stime.tv_sec) + + (static_cast(kp->ki_rusage.ru_stime.tv_usec) / 1e6); struct procstat* procstat = procstat_open_sysctl(); struct filestat_list* files = procstat_getfiles(procstat, kp, 0); diff --git a/src/telemetry/ProcessStats.h b/src/telemetry/ProcessStats.h index 02581362cc..d79bb2cb5f 100644 --- a/src/telemetry/ProcessStats.h +++ b/src/telemetry/ProcessStats.h @@ -9,7 +9,8 @@ namespace zeek::telemetry::detail { struct process_stats { int64_t rss = 0; int64_t vms = 0; - double cpu = 0.0; + double cpu_user = 0.0; + double cpu_system = 0.0; int64_t fds = 0; }; diff --git a/src/telemetry/consts.bif b/src/telemetry/consts.bif new file mode 100644 index 0000000000..76c256dfa1 --- /dev/null +++ b/src/telemetry/consts.bif @@ -0,0 +1,2 @@ +const Telemetry::callback_timeout: interval; +const Telemetry::civetweb_threads: count; diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 5620a0bf80..8e67676643 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -2,12 +2,14 @@ #include #include +#include +#include #include "zeek/Event.h" #include "zeek/IPAddr.h" #include "zeek/NetVar.h" #include "zeek/RunState.h" -#include "zeek/iosource/Manager.h" +#include "zeek/telemetry/Manager.h" namespace zeek::threading { namespace detail { @@ -22,6 +24,8 @@ void HeartbeatTimer::Dispatch(double t, bool is_expire) { } // namespace detail +static std::vector pending_bucket_brackets = {1, 10, 100, 1000, 10000, std::numeric_limits::max()}; + Manager::Manager() { DBG_LOG(DBG_THREADING, "Creating thread manager ..."); @@ -36,6 +40,112 @@ Manager::~Manager() { Terminate(); } +void Manager::InitPostScript() { + static auto get_message_thread_stats = []() -> const BucketedMessages* { + if ( ! thread_mgr->terminating ) { + double now = util::current_time(); + if ( thread_mgr->bucketed_messages_last_updated < now - 1 ) { + thread_mgr->current_bucketed_messages.pending_in_total = 0; + thread_mgr->current_bucketed_messages.pending_out_total = 0; + for ( auto& m : thread_mgr->current_bucketed_messages.pending_in ) + m.second = 0; + for ( auto& m : thread_mgr->current_bucketed_messages.pending_out ) + m.second = 0; + + MsgThread::Stats thread_stats; + for ( const auto& t : thread_mgr->msg_threads ) { + t->GetStats(&thread_stats); + + thread_mgr->current_bucketed_messages.pending_in_total += thread_stats.pending_in; + thread_mgr->current_bucketed_messages.pending_out_total += thread_stats.pending_out; + + for ( auto upper_limit : pending_bucket_brackets ) { + if ( thread_stats.pending_in <= upper_limit ) + thread_mgr->current_bucketed_messages.pending_in[upper_limit]++; + + if ( thread_stats.pending_out <= upper_limit ) + thread_mgr->current_bucketed_messages.pending_out[upper_limit]++; + } + } + + thread_mgr->bucketed_messages_last_updated = now; + } + } + + return &thread_mgr->current_bucketed_messages; + }; + + num_threads_metric = + telemetry_mgr->GaugeInstance("zeek", "msgthread_active_threads", {}, "Number of active threads", "", + []() -> prometheus::ClientMetric { + prometheus::ClientMetric metric; + metric.gauge.value = + thread_mgr ? static_cast(thread_mgr->all_threads.size()) : 0.0; + return metric; + }); + + total_threads_metric = telemetry_mgr->CounterInstance("zeek", "msgthread_threads", {}, "Total number of threads"); + total_messages_in_metric = + telemetry_mgr->CounterInstance("zeek", "msgthread_in_messages", {}, "Number of inbound messages received", ""); + + total_messages_out_metric = + telemetry_mgr->CounterInstance("zeek", "msgthread_out_messages", {}, "Number of outbound messages sent", ""); + + pending_messages_in_metric = + telemetry_mgr->GaugeInstance("zeek", "msgthread_pending_in_messages", {}, "Pending number of inbound messages", + "", []() -> prometheus::ClientMetric { + auto* s = get_message_thread_stats(); + prometheus::ClientMetric metric; + metric.gauge.value = static_cast(s->pending_in_total); + return metric; + }); + pending_messages_out_metric = + telemetry_mgr->GaugeInstance("zeek", "msgthread_pending_out_messages", {}, + "Pending number of outbound messages", "", []() -> prometheus::ClientMetric { + auto* s = get_message_thread_stats(); + prometheus::ClientMetric metric; + metric.gauge.value = static_cast(s->pending_out_total); + return metric; + }); + + pending_message_in_buckets_fam = + telemetry_mgr->GaugeFamily("zeek", "msgthread_pending_messages_in_buckets", {"le"}, + "Number of threads with pending inbound messages split into buckets"); + pending_message_out_buckets_fam = + telemetry_mgr->GaugeFamily("zeek", "msgthread_pending_messages_out_buckets", {"le"}, + "Number of threads with pending outbound messages split into buckets"); + + for ( auto upper_limit : pending_bucket_brackets ) { + std::string upper_limit_str; + if ( upper_limit == std::numeric_limits::max() ) + upper_limit_str = "inf"; + else + upper_limit_str = std::to_string(upper_limit); + + current_bucketed_messages.pending_in[upper_limit] = 0; + current_bucketed_messages.pending_out[upper_limit] = 0; + + pending_message_in_buckets[upper_limit] = + pending_message_in_buckets_fam->GetOrAdd({{"le", upper_limit_str}}, + [upper_limit]() -> prometheus::ClientMetric { + auto* s = get_message_thread_stats(); + prometheus::ClientMetric metric; + metric.gauge.value = + static_cast(s->pending_in.at(upper_limit)); + return metric; + }); + pending_message_out_buckets[upper_limit] = + pending_message_out_buckets_fam->GetOrAdd({{"le", upper_limit_str}}, + [upper_limit]() -> prometheus::ClientMetric { + auto* s = get_message_thread_stats(); + prometheus::ClientMetric metric; + metric.gauge.value = + static_cast(s->pending_out.at(upper_limit)); + return metric; + }); + } +} + void Manager::Terminate() { DBG_LOG(DBG_THREADING, "Terminating thread manager ..."); terminating = true; @@ -78,6 +188,8 @@ void Manager::AddThread(BasicThread* thread) { if ( ! heartbeat_timer_running ) StartHeartbeatTimer(); + + total_threads_metric->Inc(); } void Manager::AddMsgThread(MsgThread* thread) { @@ -133,6 +245,10 @@ void Manager::StartHeartbeatTimer() { new detail::HeartbeatTimer(run_state::network_time + BifConst::Threading::heartbeat_interval)); } +void Manager::MessageIn() { total_messages_in_metric->Inc(); } + +void Manager::MessageOut() { total_messages_out_metric->Inc(); } + // Raise everything in here as warnings so it is passed to scriptland without // looking "fatal". In addition to these warnings, ReaderBackend will queue // one reporter message. diff --git a/src/threading/Manager.h b/src/threading/Manager.h index b075e6a70d..897068bee5 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -1,12 +1,23 @@ #pragma once #include +#include #include #include "zeek/Timer.h" #include "zeek/threading/MsgThread.h" namespace zeek { + +namespace telemetry { +class Gauge; +using GaugePtr = std::shared_ptr; +class GaugeFamily; +using GaugeFamilyPtr = std::shared_ptr; +class Counter; +using CounterPtr = std::shared_ptr; +} // namespace telemetry + namespace threading { namespace detail { @@ -46,6 +57,12 @@ public: */ ~Manager(); + /** + * Performs initialization that can only happen after script parsing has + * completed. + */ + void InitPostScript(); + /** * Terminates the manager's processor. The method signals all threads * to terminates and wait for them to do so. It then joins them and @@ -77,7 +94,7 @@ public: * threads that are not yet joined, including any potentially in * Terminating() state. */ - int NumThreads() const { return all_threads.size(); } + size_t NumThreads() const { return all_threads.size(); } /** * Signals a specific threads to terminate immediately. @@ -136,6 +153,16 @@ protected: */ void StartHeartbeatTimer(); + /** + * Called by MsgThread::SendIn() to update metrics. + */ + void MessageIn(); + + /** + * Called by MsgThread::SendOut() to update metrics. + */ + void MessageOut(); + private: using all_thread_list = std::list; all_thread_list all_threads; @@ -151,6 +178,27 @@ private: msg_stats_list stats; bool heartbeat_timer_running = false; + telemetry::GaugePtr num_threads_metric; + telemetry::CounterPtr total_threads_metric; + telemetry::CounterPtr total_messages_in_metric; + telemetry::CounterPtr total_messages_out_metric; + telemetry::GaugePtr pending_messages_in_metric; + telemetry::GaugePtr pending_messages_out_metric; + + telemetry::GaugeFamilyPtr pending_message_in_buckets_fam; + telemetry::GaugeFamilyPtr pending_message_out_buckets_fam; + std::map pending_message_in_buckets; + std::map pending_message_out_buckets; + + struct BucketedMessages { + uint64_t pending_in_total; + uint64_t pending_out_total; + std::map pending_in; + std::map pending_out; + }; + + BucketedMessages current_bucketed_messages; + double bucketed_messages_last_updated = 0.0; }; } // namespace threading diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index 022a8ce2b4..5bfb8ebeb4 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -9,6 +9,7 @@ #include "zeek/Obj.h" #include "zeek/RunState.h" #include "zeek/iosource/Manager.h" +#include "zeek/telemetry/Manager.h" #include "zeek/threading/Manager.h" // Set by Zeek's main signal handler. @@ -387,6 +388,8 @@ void MsgThread::SendIn(BasicInputMessage* msg, bool force) { queue_in.Put(msg); ++cnt_sent_in; + + zeek::thread_mgr->MessageIn(); } void MsgThread::SendOut(BasicOutputMessage* msg, bool force) { @@ -399,6 +402,8 @@ void MsgThread::SendOut(BasicOutputMessage* msg, bool force) { ++cnt_sent_out; + zeek::thread_mgr->MessageOut(); + if ( io_source ) io_source->Fire(); } diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 259e64b11f..0bb3dbaec4 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -3,8 +3,6 @@ #include #include "zeek/DebugLogger.h" -#include "zeek/Flare.h" -#include "zeek/iosource/IOSource.h" #include "zeek/threading/BasicThread.h" #include "zeek/threading/Queue.h" diff --git a/src/threading/formatters/JSON.cc b/src/threading/formatters/JSON.cc index 3231adfead..08a2c2a703 100644 --- a/src/threading/formatters/JSON.cc +++ b/src/threading/formatters/JSON.cc @@ -8,6 +8,8 @@ #define __STDC_LIMIT_MACROS #endif +#define RAPIDJSON_HAS_STDSTRING 1 + #include #include #include @@ -20,12 +22,6 @@ namespace zeek::threading::formatter { -// For deprecated NullDoubleWriter -JSON::NullDoubleWriter::NullDoubleWriter(rapidjson::StringBuffer& stream) - : writer(std::make_unique(stream)) {} - -bool JSON::NullDoubleWriter::Double(double d) { return writer->Double(d); } - JSON::JSON(MsgThread* t, TimeFormat tf, bool arg_include_unset_fields) : Formatter(t), timestamps(tf), include_unset_fields(arg_include_unset_fields) {} diff --git a/src/threading/formatters/JSON.h b/src/threading/formatters/JSON.h index d800ae986a..44dd78d198 100644 --- a/src/threading/formatters/JSON.h +++ b/src/threading/formatters/JSON.h @@ -4,12 +4,6 @@ #include -#define RAPIDJSON_HAS_STDSTRING 1 -// Remove in v7.1 when removing NullDoubleWriter below and also remove -// rapidjson include tweaks from CMake's dynamic_plugin_base target. -#include -#include - #include "zeek/threading/Formatter.h" namespace zeek::json::detail { @@ -39,16 +33,6 @@ public: Value* ParseValue(const std::string& s, const std::string& name, TypeTag type, TypeTag subtype = TYPE_ERROR) const override; - class NullDoubleWriter : public rapidjson::Writer { - public: - [[deprecated("Remove in v7.1 - This is an implementation detail.")]] NullDoubleWriter( - rapidjson::StringBuffer& stream); - bool Double(double d); - - private: - std::unique_ptr writer; - }; - private: void BuildJSON(zeek::json::detail::NullDoubleWriter& writer, Value* val, const std::string& name = "") const; diff --git a/src/zeek-setup.cc b/src/zeek-setup.cc index b05af74f20..8b740a27d8 100644 --- a/src/zeek-setup.cc +++ b/src/zeek-setup.cc @@ -376,6 +376,7 @@ static void terminate_zeek() { input_mgr->Terminate(); thread_mgr->Terminate(); broker_mgr->Terminate(); + telemetry_mgr->Terminate(); event_mgr.Drain(); @@ -716,6 +717,7 @@ SetupResult setup(int argc, char** argv, Options* zopts) { // when that variable is defined. auto early_shutdown = [] { broker_mgr->Terminate(); + telemetry_mgr->Terminate(); delete iosource_mgr; delete telemetry_mgr; }; @@ -800,6 +802,7 @@ SetupResult setup(int argc, char** argv, Options* zopts) { RecordType::InitPostScript(); telemetry_mgr->InitPostScript(); + thread_mgr->InitPostScript(); iosource_mgr->InitPostScript(); log_mgr->InitPostScript(); plugin_mgr->InitPostScript(); @@ -993,16 +996,6 @@ SetupResult setup(int argc, char** argv, Options* zopts) { if ( zeek_init ) event_mgr.Enqueue(zeek_init, Args{}); -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - EventRegistry::string_list dead_handlers = event_registry->UnusedHandlers(); -#pragma GCC diagnostic pop - - if ( ! dead_handlers.empty() && check_for_unused_event_handlers ) { - for ( const string& handler : dead_handlers ) - reporter->Warning("event handler never invoked: %s", handler.c_str()); - } - // Enable LeakSanitizer before zeek_init() and even before executing // top-level statements. Even though it's not bad if a leak happens only // once at initialization, we have to assume that script-layer code causing diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index aee483157a..5466f0ea13 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -146,6 +146,7 @@ scripts/base/init-frameworks-and-bifs.zeek scripts/base/frameworks/files/magic/__load__.zeek scripts/base/frameworks/telemetry/options.zeek build/scripts/base/bif/__load__.zeek + build/scripts/base/bif/consts.bif.zeek build/scripts/base/bif/telemetry.bif.zeek build/scripts/base/bif/zeekygen.bif.zeek build/scripts/base/bif/pcap.bif.zeek diff --git a/testing/btest/Baseline/coverage.bare-mode-errors/errors b/testing/btest/Baseline/coverage.bare-mode-errors/errors index 96bd73c166..b1bb951e92 100644 --- a/testing/btest/Baseline/coverage.bare-mode-errors/errors +++ b/testing/btest/Baseline/coverage.bare-mode-errors/errors @@ -1,15 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. ### NOTE: This file has been sorted with diff-sort. -warning in <...>/__load__.zeek, line 1: deprecated script loaded from <...>/__load__.zeek:2 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/__load__.zeek, line 1: deprecated script loaded from <...>/__load__.zeek:2 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/__load__.zeek, line 1: deprecated script loaded from <...>/__load__.zeek:2 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/__load__.zeek, line 1: deprecated script loaded from command line arguments "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/extracted_file_limits.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:147 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/extracted_file_limits.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:147 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/extracted_file_limits.zeek, line 1: deprecated script loaded from command line arguments "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/packet-fragments.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:148 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/packet-fragments.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:148 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/packet-fragments.zeek, line 1: deprecated script loaded from command line arguments "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/warnings.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:149 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/warnings.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:149 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/warnings.zeek, line 1: deprecated script loaded from command line arguments "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 40a1c5b84c..07dc32da6d 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -146,6 +146,7 @@ scripts/base/init-frameworks-and-bifs.zeek scripts/base/frameworks/files/magic/__load__.zeek scripts/base/frameworks/telemetry/options.zeek build/scripts/base/bif/__load__.zeek + build/scripts/base/bif/consts.bif.zeek build/scripts/base/bif/telemetry.bif.zeek build/scripts/base/bif/zeekygen.bif.zeek build/scripts/base/bif/pcap.bif.zeek diff --git a/testing/btest/Baseline/coverage.test-all-policy-cluster/.stderr b/testing/btest/Baseline/coverage.test-all-policy-cluster/.stderr index c27d68e2df..bff9a64e41 100644 --- a/testing/btest/Baseline/coverage.test-all-policy-cluster/.stderr +++ b/testing/btest/Baseline/coverage.test-all-policy-cluster/.stderr @@ -1,21 +1,5 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -warning in <...>/__load__.zeek, line 1: deprecated script loaded from <...>/__load__.zeek:2 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/extracted_file_limits.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:147 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/packet-fragments.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:148 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/warnings.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:149 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; received termination signal -warning in <...>/__load__.zeek, line 1: deprecated script loaded from <...>/__load__.zeek:2 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/extracted_file_limits.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:147 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/packet-fragments.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:148 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/warnings.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:149 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; received termination signal -warning in <...>/__load__.zeek, line 1: deprecated script loaded from <...>/__load__.zeek:2 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/extracted_file_limits.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:147 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/packet-fragments.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:148 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/warnings.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:149 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; received termination signal -warning in <...>/__load__.zeek, line 1: deprecated script loaded from <...>/__load__.zeek:2 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/extracted_file_limits.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:147 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/packet-fragments.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:148 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; -warning in <...>/warnings.zeek, line 1: deprecated script loaded from <...>/test-all-policy.zeek:149 "Remove in v7.1 The policy<...>/defaults package is deprecated. The options set here are now the defaults for Zeek in general."; received termination signal diff --git a/testing/btest/Baseline/language.deprecate-global/.stderr b/testing/btest/Baseline/language.deprecate-global/.stderr deleted file mode 100644 index ba832c6e5f..0000000000 --- a/testing/btest/Baseline/language.deprecate-global/.stderr +++ /dev/null @@ -1,3 +0,0 @@ -### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -warning in <...>/deprecate-global.zeek, line 22: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::test_function) -warning in <...>/deprecate-global.zeek, line 26: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::X) diff --git a/testing/btest/Baseline/language.global-colon-colon/.stderr b/testing/btest/Baseline/language.global-colon-colon/.stderr index a24c303643..49d861c74c 100644 --- a/testing/btest/Baseline/language.global-colon-colon/.stderr +++ b/testing/btest/Baseline/language.global-colon-colon/.stderr @@ -1,9 +1 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -warning in <...>/global-colon-colon.zeek, line 67: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::X) -warning in <...>/global-colon-colon.zeek, line 75: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::my_hook) -warning in <...>/global-colon-colon.zeek, line 82: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::func) -warning in <...>/global-colon-colon.zeek, line 89: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::funcX) -warning in <...>/global-colon-colon.zeek, line 110: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::X) -warning in <...>/global-colon-colon.zeek, line 118: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::my_hook) -warning in <...>/global-colon-colon.zeek, line 125: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::func) -warning in <...>/global-colon-colon.zeek, line 132: Remove in v7.1: Use :: instead of GLOBAL:: (GLOBAL::funcX) diff --git a/testing/btest/Baseline/language.global-colon-colon/out b/testing/btest/Baseline/language.global-colon-colon/out index 59ffb07de4..bc61803592 100644 --- a/testing/btest/Baseline/language.global-colon-colon/out +++ b/testing/btest/Baseline/language.global-colon-colon/out @@ -3,8 +3,6 @@ MyModule X (MyModule) print MyModule::X MyModule X -(MyModule) print GLOBAL::X - global X (MyModule) print ::X global X (MyModule) hook my_hook() @@ -13,32 +11,22 @@ (MyModule) hook MyModule::my_hook() MyModule::my_hook() (in GLOBAL) MyModule::my_hook() -(MyModule) hook GLOBAL::my_hook() - my_hook() (in GLOBAL) - ::my_hook() (in GLOBAL) - ::my_hook() (in MyModule using ::) (MyModule) hook ::my_hook() my_hook() (in GLOBAL) ::my_hook() (in GLOBAL) ::my_hook() (in MyModule using ::) (MyModule) call func() MyModule::func() -(MyModule) call GLOBAL::func() - GLOBAL::func() (MyModule) call ::func() GLOBAL::func() (MyModule) call funcX() ::funcX() (in MyModule) -(MyModule) call GLOBAL::funcX() - ::funcX() (in MyModule) (MyModule) call ::funcX() ::funcX() (in MyModule) (G) print X global X (G) print MyModule::X MyModule X -(G) print GLOBAL::X - global X (G) print ::X global X (G) hook my_hook() @@ -48,24 +36,16 @@ (G) MyModule::my_hook() MyModule::my_hook() (in GLOBAL) MyModule::my_hook() -(G) hook GLOBAL::my_hook() - my_hook() (in GLOBAL) - ::my_hook() (in GLOBAL) - ::my_hook() (in MyModule using ::) (G) hook ::my_hook() my_hook() (in GLOBAL) ::my_hook() (in GLOBAL) ::my_hook() (in MyModule using ::) (G) call func() GLOBAL::func() -(G) call GLOBAL::func() - GLOBAL::func() (G) call ::func() GLOBAL::func() (G) call funcX() ::funcX() (in MyModule) -(G) call GLOBAL::funcX() - ::funcX() (in MyModule) (G) call ::funcX() ::funcX() (in MyModule) MyModule::my_event() (in MyModule) diff --git a/testing/btest/Baseline/language.init-integration/out b/testing/btest/Baseline/language.init-integration/out index 12ca8b84ce..cc8864b11c 100644 --- a/testing/btest/Baseline/language.init-integration/out +++ b/testing/btest/Baseline/language.init-integration/out @@ -13,15 +13,15 @@ init_key2 in state2: 1 [worker-1] = [node_type=Cluster::WORKER, ip=127.0.0.1, p=5/udp, manager=manager-1] } { -[worker-4] = [node_type=Cluster::WORKER, ip=2.3.4.5, zone_id=, p=13/udp, interface=, manager=, time_machine=, id=, metrics_port=] +[worker-4] = [node_type=Cluster::WORKER, ip=2.3.4.5, zone_id=, p=13/udp, manager=, id=, metrics_port=] } { -[worker-4] = [node_type=Cluster::WORKER, ip=2.3.4.5, zone_id=, p=13/udp, interface=, manager=, time_machine=, id=, metrics_port=], -[worker-5] = [node_type=Cluster::WORKER, ip=3.4.5.6, zone_id=, p=15/tcp, interface=, manager=, time_machine=, id=, metrics_port=] +[worker-4] = [node_type=Cluster::WORKER, ip=2.3.4.5, zone_id=, p=13/udp, manager=, id=, metrics_port=], +[worker-5] = [node_type=Cluster::WORKER, ip=3.4.5.6, zone_id=, p=15/tcp, manager=, id=, metrics_port=] } { -[worker-4] = [node_type=Cluster::WORKER, ip=2.3.4.5, zone_id=, p=13/udp, interface=, manager=, time_machine=, id=, metrics_port=], -[worker-6] = [node_type=Cluster::WORKER, ip=4.5.6.7, zone_id=, p=17/udp, interface=, manager=, time_machine=, id=, metrics_port=] +[worker-4] = [node_type=Cluster::WORKER, ip=2.3.4.5, zone_id=, p=13/udp, manager=, id=, metrics_port=], +[worker-6] = [node_type=Cluster::WORKER, ip=4.5.6.7, zone_id=, p=17/udp, manager=, id=, metrics_port=] } { [3.0, 4] diff --git a/testing/btest/Baseline/language.when/out b/testing/btest/Baseline/language.when/out index 07f8c832ca..5e61f7ed4b 100644 --- a/testing/btest/Baseline/language.when/out +++ b/testing/btest/Baseline/language.when/out @@ -1,3 +1,4 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. done lookup successful +received termination signal diff --git a/testing/btest/Baseline/plugins.hooks/output b/testing/btest/Baseline/plugins.hooks/output index c3b551dc84..2514d98015 100644 --- a/testing/btest/Baseline/plugins.hooks/output +++ b/testing/btest/Baseline/plugins.hooks/output @@ -464,6 +464,7 @@ 0.000000 MetaHookPost LoadFile(0, ./comm.bif.zeek, <...>/comm.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, ./const.bif.zeek, <...>/const.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, ./consts.bif.zeek, <...>/consts.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, ./contents, <...>/contents.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, ./control, <...>/control.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, ./data.bif.zeek, <...>/data.bif.zeek) -> -1 @@ -758,6 +759,7 @@ 0.000000 MetaHookPost LoadFileExtended(0, ./comm.bif.zeek, <...>/comm.bif.zeek) -> (-1, ) 0.000000 MetaHookPost LoadFileExtended(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek) -> (-1, ) 0.000000 MetaHookPost LoadFileExtended(0, ./const.bif.zeek, <...>/const.bif.zeek) -> (-1, ) +0.000000 MetaHookPost LoadFileExtended(0, ./consts.bif.zeek, <...>/consts.bif.zeek) -> (-1, ) 0.000000 MetaHookPost LoadFileExtended(0, ./contents, <...>/contents.zeek) -> (-1, ) 0.000000 MetaHookPost LoadFileExtended(0, ./control, <...>/control.zeek) -> (-1, ) 0.000000 MetaHookPost LoadFileExtended(0, ./data.bif.zeek, <...>/data.bif.zeek) -> (-1, ) @@ -1384,6 +1386,7 @@ 0.000000 MetaHookPre LoadFile(0, ./comm.bif.zeek, <...>/comm.bif.zeek) 0.000000 MetaHookPre LoadFile(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek) 0.000000 MetaHookPre LoadFile(0, ./const.bif.zeek, <...>/const.bif.zeek) +0.000000 MetaHookPre LoadFile(0, ./consts.bif.zeek, <...>/consts.bif.zeek) 0.000000 MetaHookPre LoadFile(0, ./contents, <...>/contents.zeek) 0.000000 MetaHookPre LoadFile(0, ./control, <...>/control.zeek) 0.000000 MetaHookPre LoadFile(0, ./data.bif.zeek, <...>/data.bif.zeek) @@ -1678,6 +1681,7 @@ 0.000000 MetaHookPre LoadFileExtended(0, ./comm.bif.zeek, <...>/comm.bif.zeek) 0.000000 MetaHookPre LoadFileExtended(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek) 0.000000 MetaHookPre LoadFileExtended(0, ./const.bif.zeek, <...>/const.bif.zeek) +0.000000 MetaHookPre LoadFileExtended(0, ./consts.bif.zeek, <...>/consts.bif.zeek) 0.000000 MetaHookPre LoadFileExtended(0, ./contents, <...>/contents.zeek) 0.000000 MetaHookPre LoadFileExtended(0, ./control, <...>/control.zeek) 0.000000 MetaHookPre LoadFileExtended(0, ./data.bif.zeek, <...>/data.bif.zeek) @@ -2305,6 +2309,7 @@ 0.000000 | HookLoadFile ./comm.bif.zeek <...>/comm.bif.zeek 0.000000 | HookLoadFile ./communityid.bif.zeek <...>/communityid.bif.zeek 0.000000 | HookLoadFile ./const.bif.zeek <...>/const.bif.zeek +0.000000 | HookLoadFile ./consts.bif.zeek <...>/consts.bif.zeek 0.000000 | HookLoadFile ./contents <...>/contents.zeek 0.000000 | HookLoadFile ./control <...>/control.zeek 0.000000 | HookLoadFile ./data.bif.zeek <...>/data.bif.zeek @@ -2599,6 +2604,7 @@ 0.000000 | HookLoadFileExtended ./comm.bif.zeek <...>/comm.bif.zeek 0.000000 | HookLoadFileExtended ./communityid.bif.zeek <...>/communityid.bif.zeek 0.000000 | HookLoadFileExtended ./const.bif.zeek <...>/const.bif.zeek +0.000000 | HookLoadFileExtended ./consts.bif.zeek <...>/consts.bif.zeek 0.000000 | HookLoadFileExtended ./contents <...>/contents.zeek 0.000000 | HookLoadFileExtended ./control <...>/control.zeek 0.000000 | HookLoadFileExtended ./data.bif.zeek <...>/data.bif.zeek diff --git a/testing/btest/Baseline/scripts.base.frameworks.telemetry.basic/out b/testing/btest/Baseline/scripts.base.frameworks.telemetry.basic/out index d35b64e3d4..fdde7d52ff 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.telemetry.basic/out +++ b/testing/btest/Baseline/scripts.base.frameworks.telemetry.basic/out @@ -1,5 +1,6 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -### zeek_session_metrics |2| +### zeek_session_metrics |3| +Telemetry::COUNTER, zeek, zeek_ended_sessions_total, [reason], [inactivity], 0.0 Telemetry::COUNTER, zeek, zeek_total_sessions_total, [protocol], [tcp], 500.0 Telemetry::GAUGE, zeek, zeek_active_sessions, [protocol], [tcp], 500.0 ### bt* metrics |5| diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/conn.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/conn.log new file mode 100644 index 0000000000..27c56bc33b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/conn.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string count string count count count count set[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60126 127.0.1.1 389 tcp ldap_tcp 2.290081 289 1509 SF 0 ShADadFf 12 921 15 2297 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/ldap.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/ldap.log new file mode 100644 index 0000000000..cd94c49d5b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/ldap.log @@ -0,0 +1,13 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id version opcode result diagnostic_message object argument +#types time string addr port addr port int int string string string string string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60126 127.0.1.1 389 1 3 bind SASL SASL bind in progress SASL(0): successful result: - NTLM +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60126 127.0.1.1 389 2 3 bind SASL success - - NTLM +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60126 127.0.1.1 389 4 - unbind - - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/ldap_search.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/ldap_search.log new file mode 100644 index 0000000000..3ff2f3b1a6 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-ntlm/ldap_search.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap_search +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id scope deref_aliases base_object result_count result diagnostic_message filter attributes +#types time string addr port addr port int string string string count string string string vector[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60126 127.0.1.1 389 3 tree never dc=example,dc=com 9 success - (objectclass=*) - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/conn.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/conn.log new file mode 100644 index 0000000000..5fcce64ab8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/conn.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string count string count count count count set[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 59552 127.0.1.1 389 tcp ldap_tcp 2.231680 353 1772 SF 0 ShADadFf 11 933 15 2560 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/ldap.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/ldap.log new file mode 100644 index 0000000000..7c3478b262 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/ldap.log @@ -0,0 +1,13 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id version opcode result diagnostic_message object argument +#types time string addr port addr port int int string string string string string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 59552 127.0.1.1 389 1 3 bind SASL SASL bind in progress SASL(0): successful result: user: sasladmin@slapd.ldap property: slapAuthzDN not found in sasldb - SCRAM-SHA-512 +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 59552 127.0.1.1 389 2 3 bind SASL success - - SCRAM-SHA-512 +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 59552 127.0.1.1 389 4 - unbind - - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/ldap_search.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/ldap_search.log new file mode 100644 index 0000000000..edcf38ced5 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-scram-sha-512/ldap_search.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap_search +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id scope deref_aliases base_object result_count result diagnostic_message filter attributes +#types time string addr port addr port int string string string count string string string vector[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 59552 127.0.1.1 389 3 tree never dc=example,dc=com 9 success - (objectclass=*) - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-srp-who-am-i/conn.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-srp-who-am-i/conn.log new file mode 100644 index 0000000000..2638ca3cba --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-srp-who-am-i/conn.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string count string count count count count set[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60648 127.0.1.1 389 tcp ldap_tcp 2.114467 548 1020 SF 0 ShADadFf 9 1024 6 1340 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-srp-who-am-i/ldap.log b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-srp-who-am-i/ldap.log new file mode 100644 index 0000000000..facaf46bc7 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.sasl-srp-who-am-i/ldap.log @@ -0,0 +1,12 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id version opcode result diagnostic_message object argument +#types time string addr port addr port int int string string string string string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60648 127.0.1.1 389 1 3 bind SASL SASL bind in progress SASL(0): successful result: user: zeek@ubuntu-01.example.com property: slapAuthzDN not found in sasldb - SRP +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 60648 127.0.1.1 389 2 3 bind SASL success - - SRP +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.spnego-ntlmssp/conn.log b/testing/btest/Baseline/scripts.base.protocols.ldap.spnego-ntlmssp/conn.log new file mode 100644 index 0000000000..84a495d3fb --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.spnego-ntlmssp/conn.log @@ -0,0 +1,12 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string count string count count count count set[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 192.168.1.105 50041 192.168.1.108 389 tcp ldap_tcp 0.004745 93 283 RSTR 0 ShADdFar 5 305 4 455 - +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 192.168.1.107 50041 192.168.1.108 389 tcp ldap_tcp 0.005883 93 283 RSTR 0 ShADdFar 5 305 4 455 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.spnego-ntlmssp/ldap.log b/testing/btest/Baseline/scripts.base.protocols.ldap.spnego-ntlmssp/ldap.log new file mode 100644 index 0000000000..07355e0e78 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.spnego-ntlmssp/ldap.log @@ -0,0 +1,14 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id version opcode result diagnostic_message object argument +#types time string addr port addr port int int string string string string string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 192.168.1.105 50041 192.168.1.108 389 160 3 bind SASL SASL bind in progress - - GSS-SPNEGO +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 192.168.1.105 50041 192.168.1.108 389 161 - unbind - - - - +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 192.168.1.107 50041 192.168.1.108 389 427 3 bind SASL SASL bind in progress - - GSS-SPNEGO +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 192.168.1.107 50041 192.168.1.108 389 428 - unbind - - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/conn.log b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/conn.log new file mode 100644 index 0000000000..db789c02c1 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/conn.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string count string count count count count set[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 45936 127.0.1.1 389 tcp ldap_tcp,ssl 0.016922 683 3002 RSTO 0 ShADadFR 14 1407 14 3738 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/ldap.log b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/ldap.log new file mode 100644 index 0000000000..95a084dab8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/ldap.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id version opcode result diagnostic_message object argument +#types time string addr port addr port int int string string string string string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 45936 127.0.1.1 389 1 - extended success - 1.3.6.1.4.1.1466.20037 (StartTLS) - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/out b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/out new file mode 100644 index 0000000000..08e6ccc9f2 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/out @@ -0,0 +1,4 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +CHhAvVGS1DHFjwGM9, extended_request, 1.3.6.1.4.1.1466.20037 (StartTLS), +CHhAvVGS1DHFjwGM9, extended_response, LDAP::ResultCode_SUCCESS, , +CHhAvVGS1DHFjwGM9, LDAP::starttls diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/ssl.log b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/ssl.log new file mode 100644 index 0000000000..19fdd43528 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.starttls/ssl.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ssl +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name resumed last_alert next_protocol established ssl_history cert_chain_fps client_cert_chain_fps sni_matches_cert +#types time string addr port addr port string string string string bool string string bool string vector[string] vector[string] bool +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 45936 127.0.1.1 389 TLSv13 TLS_AES_256_GCM_SHA384 secp256r1 ubuntu-01.example.com F - - T CsiI - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/conn.log b/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/conn.log new file mode 100644 index 0000000000..9914fbe2dc --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/conn.log @@ -0,0 +1,11 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string count string count count count count set[string] +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 48122 127.0.1.1 389 tcp ldap_tcp 0.001192 83 59 SF 0 ShADadFf 8 507 5 327 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/ldap.log b/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/ldap.log new file mode 100644 index 0000000000..80da834eba --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/ldap.log @@ -0,0 +1,13 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ldap +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p message_id version opcode result diagnostic_message object argument +#types time string addr port addr port int int string string string string string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 48122 127.0.1.1 389 1 3 bind simple success - cn=admin,dc=example,dc=com REDACTED +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 48122 127.0.1.1 389 2 - extended success - 1.3.6.1.4.1.4203.1.11.3 (whoami) - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 48122 127.0.1.1 389 3 - unbind - - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/out b/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/out new file mode 100644 index 0000000000..c4dbc10489 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ldap.who-am-i/out @@ -0,0 +1,3 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +CHhAvVGS1DHFjwGM9, extended_request, 1.3.6.1.4.1.4203.1.11.3 (whoami), +CHhAvVGS1DHFjwGM9, extended_response, LDAP::ResultCode_SUCCESS, , dn:cn=admin,dc=example,dc=com diff --git a/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_big/coverage b/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_big/coverage index c9dd1e6e71..f8af25eb8c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_big/coverage +++ b/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_big/coverage @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -5 of 36 events triggered by trace +5 of 32 events triggered by trace diff --git a/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_small/coverage b/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_small/coverage index c9dd1e6e71..f8af25eb8c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_small/coverage +++ b/testing/btest/Baseline/scripts.base.protocols.modbus.coil_parsing_small/coverage @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -5 of 36 events triggered by trace +5 of 32 events triggered by trace diff --git a/testing/btest/Baseline/scripts.base.protocols.modbus.events/coverage b/testing/btest/Baseline/scripts.base.protocols.modbus.events/coverage index eb747a6f71..e7693345ad 100644 --- a/testing/btest/Baseline/scripts.base.protocols.modbus.events/coverage +++ b/testing/btest/Baseline/scripts.base.protocols.modbus.events/coverage @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -22 of 36 events triggered by trace +22 of 32 events triggered by trace diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password-after-auth-switch/mysql.log b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password-after-auth-switch/mysql.log new file mode 100644 index 0000000000..26e609fe2a --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password-after-auth-switch/mysql.log @@ -0,0 +1,16 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path mysql +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cmd arg success rows response +#types time string addr port addr port string string bool count string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 35928 127.0.0.1 3306 login root T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 35928 127.0.0.1 3306 query select @@version_comment limit 1 T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 35928 127.0.0.1 3306 query select DATABASE(), USER() limit 1 T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 35928 127.0.0.1 3306 query select @@character_set_client, @@character_set_connection, @@character_set_server, @@character_set_database limit 1 T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 35928 127.0.0.1 3306 statistics (empty) - - - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 35928 127.0.0.1 3306 quit (empty) - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password-after-auth-switch/out b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password-after-auth-switch/out new file mode 100644 index 0000000000..947e3add03 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password-after-auth-switch/out @@ -0,0 +1,19 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +mysql auth plugin, F, caching_sha2_password, Vz\x08w+^\x04p\x02Tv\x01"~\x114\x14RP6\x00, 21 +mysql handshake, root +mysql auth plugin, T, mysql_native_password, , 0 +mysql auth switch request, caching_sha2_password, Vz\x08w+^\x04p\x02Tv\x01"~\x114\x14RP6\x00, 21 +mysql auth more data, T, \xf7dS\x9eXe\xc4\xd6\xa9\xa7 \xfbC\xa6p\xaf\xdf\x9dB[B\x80\xa7\x80\xef\x0c\x95BC9#\x82, 32 +mysql auth more data, F, \x03, 1 +mysql ok, 0 +mysql request, 3, select @@version_comment limit 1 +mysql result row, [MySQL Community Server - GPL] +mysql ok, 0 +mysql request, 3, select DATABASE(), USER() limit 1 +mysql result row, [, root@localhost] +mysql ok, 0 +mysql request, 3, select @@character_set_client, @@character_set_connection, @@character_set_server, @@character_set_database limit 1 +mysql result row, [utf8mb4, utf8mb4, utf8mb4, utf8mb4] +mysql ok, 0 +mysql request, 9, +mysql request, 1, diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password/mysql.log b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password/mysql.log new file mode 100644 index 0000000000..bb46a96482 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password/mysql.log @@ -0,0 +1,23 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path mysql +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cmd arg success rows response +#types time string addr port addr port string string bool count string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 56494 127.0.0.1 3306 login root F - Got an error reading communication packets +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 127.0.0.1 49352 127.0.0.1 3306 login root T 0 - +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 127.0.0.1 49352 127.0.0.1 3306 query show databases T 0 - +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 127.0.0.1 49352 127.0.0.1 3306 query show tables T 0 - +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 127.0.0.1 49352 127.0.0.1 3306 field_list t T 0 - +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 127.0.0.1 49352 127.0.0.1 3306 query select @@version_comment limit 1 T 0 - +XXXXXXXXXX.XXXXXX ClEkJM2Vm5giqnMf4h 127.0.0.1 49352 127.0.0.1 3306 quit (empty) - - - +XXXXXXXXXX.XXXXXX C4J4Th3PJpwUYZZ6gc 127.0.0.1 40950 127.0.0.1 3306 login root T 0 - +XXXXXXXXXX.XXXXXX C4J4Th3PJpwUYZZ6gc 127.0.0.1 40950 127.0.0.1 3306 query show databases T 0 - +XXXXXXXXXX.XXXXXX C4J4Th3PJpwUYZZ6gc 127.0.0.1 40950 127.0.0.1 3306 query show tables T 0 - +XXXXXXXXXX.XXXXXX C4J4Th3PJpwUYZZ6gc 127.0.0.1 40950 127.0.0.1 3306 field_list t T 0 - +XXXXXXXXXX.XXXXXX C4J4Th3PJpwUYZZ6gc 127.0.0.1 40950 127.0.0.1 3306 query select @@version_comment limit 1 T 0 - +XXXXXXXXXX.XXXXXX C4J4Th3PJpwUYZZ6gc 127.0.0.1 40950 127.0.0.1 3306 quit (empty) - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password/out b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password/out new file mode 100644 index 0000000000..a9dd402e6c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.caching_sha2_password/out @@ -0,0 +1,51 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +mysql auth plugin, F, caching_sha2_password, s.\x13\x01>\x05m\x04~Lq)%\x0fLL\x01\x08Xj\x00, 21 +mysql handshake, root +mysql auth plugin, T, caching_sha2_password, \x98\xa0Ex\x8a\xeb`\xf3\xc7)\xa6\xaf\xf1\xa4]-\xa0\xdf\x959\xa1\xc5\xd6\xb8\xf3\xd6}\xb2\xa8\x033~, 32 +mysql auth more data, F, \x04, 1 +mysql error, 1158, Got an error reading communication packets +mysql auth plugin, F, caching_sha2_password, 4x`?e\x04i'k&-P%LID\x17/\x0f{\x00, 21 +mysql handshake, root +mysql auth plugin, T, caching_sha2_password, y.\x91:\x11\x87i\x17\xdfI_\xd2\xec\x9a"\xc2%sB\x10\x90\xbd\x15C\xf4w\xc0\x09p}\x8eE, 32 +mysql auth more data, F, \x04, 1 +mysql auth more data, T, \x02, 1 +mysql auth more data, F, -----BEGIN PUBLIC KEY-----\x0aMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0VACy/bY60MRuPW6aCxZ\x0abi+o0EgCgxzFObbyzDfnTnVJegOXbrdcbu1qIlEjPyn7UMBfjQr+VueiJvPjz2M8\x0ad/6GX1h4fYuwW4bEXBVo4HGxM8N0IyO1BYjafOaoUeL/NI+bLifH70KorIcSUR+h\x0a879DAQ0zlKz5vwpDYN2LVxidjFvy5baSPi/csDMqi2jitBAzbNW992O/v9CPnh5f\x0akdRMa2lMPKxRaPeqAw9U7CAmRqAaHZAfdI5kYnj3vsOFvKL2dkE+ckY8sh5H2uto\x0a37+mg6oll5PsydMbSuvFHLc0JZm++oem5z2WsZBdxmohqJ8Foc43W8IOtxs+YAOw\x0avwIDAQAB\x0a-----END PUBLIC KEY-----\x0a, 451 +mysql auth more data, T, \xca3\x89.M\x9d\xc0\xcb\xd6'2Zo*\xda8\xd2\xba\xb1\xabI\xcb\x1es%R\x1fo\xd0\xa6\xb8\x90\xf56\x0e\xd9\xd8p\x9eX\x84K\xb5\x1a\xe5\xfa\x18\xc1*\xfc\xa9W\xd6p\x1a\xcfv\xe8%\xe0\xb9\xfe\x98\x1b\xb3\x938\x85\xf4O\xf0c2b\xae\x81F\x1e\xb9\x1f\xbd\xdf\x16C\x91\xd5\x08\xa6\x82\xb6y\xf7\xa3u= 0.63. +mysql ssl request, CHhAvVGS1DHFjwGM9 diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.encrypted-aws-rds/tls-13.out b/testing/btest/Baseline/scripts.base.protocols.mysql.encrypted-aws-rds/tls-13.out new file mode 100644 index 0000000000..dd294ea217 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.encrypted-aws-rds/tls-13.out @@ -0,0 +1,2 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +mysql ssl request, CHhAvVGS1DHFjwGM9 diff --git a/testing/btest/Baseline/language.deprecate-global/out b/testing/btest/Baseline/scripts.base.protocols.mysql.encrypted/out similarity index 73% rename from testing/btest/Baseline/language.deprecate-global/out rename to testing/btest/Baseline/scripts.base.protocols.mysql.encrypted/out index 3987e838f5..dd294ea217 100644 --- a/testing/btest/Baseline/language.deprecate-global/out +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.encrypted/out @@ -1,4 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -X, shadows ::X (42) -::X, 42 -GLOBAL::X, 42 +mysql ssl request, CHhAvVGS1DHFjwGM9 diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.many-query-attr/mysql.log b/testing/btest/Baseline/scripts.base.protocols.mysql.many-query-attr/mysql.log new file mode 100644 index 0000000000..1f6d2899e8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.many-query-attr/mysql.log @@ -0,0 +1,54 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path mysql +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cmd arg success rows response +#types time string addr port addr port string string bool count string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 login root T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 query show databases T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 query show tables T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list columns_priv T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list component T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list db T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list default_roles T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list engine_cost T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list func T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list general_log T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list global_grants T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list gtid_executed T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list help_category T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list help_keyword T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list help_relation T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list help_topic T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list innodb_index_stats T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list innodb_table_stats T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list ndb_binlog_index T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list password_history T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list plugin T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list procs_priv T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list proxies_priv T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list replication_asynchronous_connection_failover T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list replication_asynchronous_connection_failover_managed T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list replication_group_configuration_version T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list replication_group_member_actions T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list role_edges T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list server_cost T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list servers T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list slave_master_info T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list slave_relay_log_info T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list slave_worker_info T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list slow_log T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list tables_priv T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list time_zone T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list time_zone_leap_second T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list time_zone_name T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list time_zone_transition T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list time_zone_transition_type T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 field_list user T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 query select @@version_comment limit 1 T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 query SELECT mysql_query_attribute_string('n1'), mysql_query_attribute_string('n2') T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33592 127.0.0.1 3306 quit (empty) - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.many-query-attr/out b/testing/btest/Baseline/scripts.base.protocols.mysql.many-query-attr/out new file mode 100644 index 0000000000..a064e5f357 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.many-query-attr/out @@ -0,0 +1,132 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +mysql handshake, root +mysql ok, 0 +mysql request, 3, show databases +mysql result row, [information_schema] +mysql result row, [mysql] +mysql result row, [performance_schema] +mysql result row, [sys] +mysql ok, 0 +mysql request, 3, show tables +mysql result row, [columns_priv] +mysql result row, [component] +mysql result row, [db] +mysql result row, [default_roles] +mysql result row, [engine_cost] +mysql result row, [func] +mysql result row, [general_log] +mysql result row, [global_grants] +mysql result row, [gtid_executed] +mysql result row, [help_category] +mysql result row, [help_keyword] +mysql result row, [help_relation] +mysql result row, [help_topic] +mysql result row, [innodb_index_stats] +mysql result row, [innodb_table_stats] +mysql result row, [ndb_binlog_index] +mysql result row, [password_history] +mysql result row, [plugin] +mysql result row, [procs_priv] +mysql result row, [proxies_priv] +mysql result row, [replication_asynchronous_connection_failover] +mysql result row, [replication_asynchronous_connection_failover_managed] +mysql result row, [replication_group_configuration_version] +mysql result row, [replication_group_member_actions] +mysql result row, [role_edges] +mysql result row, [server_cost] +mysql result row, [servers] +mysql result row, [slave_master_info] +mysql result row, [slave_relay_log_info] +mysql result row, [slave_worker_info] +mysql result row, [slow_log] +mysql result row, [tables_priv] +mysql result row, [time_zone] +mysql result row, [time_zone_leap_second] +mysql result row, [time_zone_name] +mysql result row, [time_zone_transition] +mysql result row, [time_zone_transition_type] +mysql result row, [user] +mysql ok, 0 +mysql request, 4, columns_priv\x00 +mysql ok, 0 +mysql request, 4, component\x00 +mysql ok, 0 +mysql request, 4, db\x00 +mysql ok, 0 +mysql request, 4, default_roles\x00 +mysql ok, 0 +mysql request, 4, engine_cost\x00 +mysql ok, 0 +mysql request, 4, func\x00 +mysql ok, 0 +mysql request, 4, general_log\x00 +mysql ok, 0 +mysql request, 4, global_grants\x00 +mysql ok, 0 +mysql request, 4, gtid_executed\x00 +mysql ok, 0 +mysql request, 4, help_category\x00 +mysql ok, 0 +mysql request, 4, help_keyword\x00 +mysql ok, 0 +mysql request, 4, help_relation\x00 +mysql ok, 0 +mysql request, 4, help_topic\x00 +mysql ok, 0 +mysql request, 4, innodb_index_stats\x00 +mysql ok, 0 +mysql request, 4, innodb_table_stats\x00 +mysql ok, 0 +mysql request, 4, ndb_binlog_index\x00 +mysql ok, 0 +mysql request, 4, password_history\x00 +mysql ok, 0 +mysql request, 4, plugin\x00 +mysql ok, 0 +mysql request, 4, procs_priv\x00 +mysql ok, 0 +mysql request, 4, proxies_priv\x00 +mysql ok, 0 +mysql request, 4, replication_asynchronous_connection_failover\x00 +mysql ok, 0 +mysql request, 4, replication_asynchronous_connection_failover_managed\x00 +mysql ok, 0 +mysql request, 4, replication_group_configuration_version\x00 +mysql ok, 0 +mysql request, 4, replication_group_member_actions\x00 +mysql ok, 0 +mysql request, 4, role_edges\x00 +mysql ok, 0 +mysql request, 4, server_cost\x00 +mysql ok, 0 +mysql request, 4, servers\x00 +mysql ok, 0 +mysql request, 4, slave_master_info\x00 +mysql ok, 0 +mysql request, 4, slave_relay_log_info\x00 +mysql ok, 0 +mysql request, 4, slave_worker_info\x00 +mysql ok, 0 +mysql request, 4, slow_log\x00 +mysql ok, 0 +mysql request, 4, tables_priv\x00 +mysql ok, 0 +mysql request, 4, time_zone\x00 +mysql ok, 0 +mysql request, 4, time_zone_leap_second\x00 +mysql ok, 0 +mysql request, 4, time_zone_name\x00 +mysql ok, 0 +mysql request, 4, time_zone_transition\x00 +mysql ok, 0 +mysql request, 4, time_zone_transition_type\x00 +mysql ok, 0 +mysql request, 4, user\x00 +mysql ok, 0 +mysql request, 3, select @@version_comment limit 1 +mysql result row, [MySQL Community Server - GPL] +mysql ok, 0 +mysql request, 3, SELECT mysql_query_attribute_string('n1'), mysql_query_attribute_string('n2') +mysql result row, [42, v2] +mysql ok, 0 +mysql request, 1, diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr-non-string/mysql.log b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr-non-string/mysql.log new file mode 100644 index 0000000000..8ae14a6dc2 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr-non-string/mysql.log @@ -0,0 +1,14 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path mysql +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cmd arg success rows response +#types time string addr port addr port string string bool count string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33754 127.0.0.1 3306 login root T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33754 127.0.0.1 3306 ping (empty) T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33754 127.0.0.1 3306 query SELECT version() T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 33754 127.0.0.1 3306 quit (empty) - - - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr-non-string/out b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr-non-string/out new file mode 100644 index 0000000000..0924f49140 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr-non-string/out @@ -0,0 +1,10 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +mysql handshake, root +mysql ok, 0 +mysql request, 14, +mysql ok, 0 +mysql request, 3, SELECT version() +mysql eof, T +mysql result row, [9.0.0] +mysql eof, F +mysql request, 1, diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr/mysql.log b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr/mysql.log new file mode 100644 index 0000000000..3fd06ec55f --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr/mysql.log @@ -0,0 +1,14 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path mysql +#open XXXX-XX-XX-XX-XX-XX +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cmd arg success rows response +#types time string addr port addr port string string bool count string +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 51682 127.0.0.1 3306 login ykg T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 51682 127.0.0.1 3306 query select @@version_comment limit 1 T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 51682 127.0.0.1 3306 query select now() T 0 - +XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 51682 127.0.0.1 3306 query select now() T 0 - +#close XXXX-XX-XX-XX-XX-XX diff --git a/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr/out b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr/out new file mode 100644 index 0000000000..dce5524739 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.mysql.query-attr/out @@ -0,0 +1,12 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +mysql handshake, ykg +mysql ok, 0 +mysql request, 3, select @@version_comment limit 1 +mysql result row, [Source distribution] +mysql ok, 0 +mysql request, 3, select now() +mysql result row, [2022-07-13 10:45:41] +mysql ok, 0 +mysql request, 3, select now() +mysql result row, [2022-07-13 10:45:43] +mysql ok, 0 diff --git a/testing/btest/Baseline/scripts.base.protocols.smb.smb2-zero-byte-error-ioctl/out b/testing/btest/Baseline/scripts.base.protocols.smb.smb2-zero-byte-error-ioctl/out new file mode 100644 index 0000000000..f803db64f2 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.smb.smb2-zero-byte-error-ioctl/out @@ -0,0 +1,13 @@ +### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +smb2_close_request, [credit_charge=1, status=0, command=6, credits=256, flags=0, message_id=8, process_id=65279, tree_id=3905704575, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00] +smb2_close_response, [credit_charge=1, status=0, command=6, credits=256, flags=1, message_id=8, process_id=65279, tree_id=3905704575, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00], [alloc_size=0, eof=0, times=[modified=0.0, modified_raw=116444736000000000, accessed=0.0, accessed_raw=116444736000000000, created=0.0, created_raw=116444736000000000, changed=0.0, changed_raw=116444736000000000], attrs=[read_only=F, hidden=F, system=F, directory=F, archive=F, normal=F, temporary=F, sparse_file=F, reparse_point=F, compressed=F, offline=F, not_content_indexed=F, encrypted=F, integrity_stream=F, no_scrub_data=F]] +smb2_close_request, [credit_charge=1, status=0, command=6, credits=256, flags=0, message_id=21, process_id=65279, tree_id=900627714, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00] +smb2_close_response, [credit_charge=1, status=0, command=6, credits=256, flags=1, message_id=21, process_id=65279, tree_id=900627714, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00], [alloc_size=0, eof=0, times=[modified=0.0, modified_raw=116444736000000000, accessed=0.0, accessed_raw=116444736000000000, created=0.0, created_raw=116444736000000000, changed=0.0, changed_raw=116444736000000000], attrs=[read_only=F, hidden=F, system=F, directory=F, archive=F, normal=F, temporary=F, sparse_file=F, reparse_point=F, compressed=F, offline=F, not_content_indexed=F, encrypted=F, integrity_stream=F, no_scrub_data=F]] +smb2_close_request, [credit_charge=1, status=0, command=6, credits=256, flags=4, message_id=25, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00] +smb2_close_response, [credit_charge=1, status=0, command=6, credits=768, flags=5, message_id=25, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00], [alloc_size=0, eof=0, times=[modified=0.0, modified_raw=116444736000000000, accessed=0.0, accessed_raw=116444736000000000, created=0.0, created_raw=116444736000000000, changed=0.0, changed_raw=116444736000000000], attrs=[read_only=F, hidden=F, system=F, directory=F, archive=F, normal=F, temporary=F, sparse_file=F, reparse_point=F, compressed=F, offline=F, not_content_indexed=F, encrypted=F, integrity_stream=F, no_scrub_data=F]] +smb2_close_request, [credit_charge=1, status=0, command=6, credits=256, flags=4, message_id=28, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00] +smb2_close_response, [credit_charge=1, status=0, command=6, credits=768, flags=5, message_id=28, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00], [alloc_size=0, eof=0, times=[modified=0.0, modified_raw=116444736000000000, accessed=0.0, accessed_raw=116444736000000000, created=0.0, created_raw=116444736000000000, changed=0.0, changed_raw=116444736000000000], attrs=[read_only=F, hidden=F, system=F, directory=F, archive=F, normal=F, temporary=F, sparse_file=F, reparse_point=F, compressed=F, offline=F, not_content_indexed=F, encrypted=F, integrity_stream=F, no_scrub_data=F]] +smb2_close_request, [credit_charge=1, status=0, command=6, credits=256, flags=4, message_id=31, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00] +smb2_close_response, [credit_charge=1, status=0, command=6, credits=768, flags=5, message_id=31, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00], [alloc_size=0, eof=0, times=[modified=0.0, modified_raw=116444736000000000, accessed=0.0, accessed_raw=116444736000000000, created=0.0, created_raw=116444736000000000, changed=0.0, changed_raw=116444736000000000], attrs=[read_only=F, hidden=F, system=F, directory=F, archive=F, normal=F, temporary=F, sparse_file=F, reparse_point=F, compressed=F, offline=F, not_content_indexed=F, encrypted=F, integrity_stream=F, no_scrub_data=F]] +smb2_close_request, [credit_charge=1, status=0, command=6, credits=256, flags=4, message_id=34, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00] +smb2_close_response, [credit_charge=1, status=0, command=6, credits=768, flags=5, message_id=34, process_id=65279, tree_id=1248644238, session_id=66137014, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00], [alloc_size=0, eof=0, times=[modified=0.0, modified_raw=116444736000000000, accessed=0.0, accessed_raw=116444736000000000, created=0.0, created_raw=116444736000000000, changed=0.0, changed_raw=116444736000000000], attrs=[read_only=F, hidden=F, system=F, directory=F, archive=F, normal=F, temporary=F, sparse_file=F, reparse_point=F, compressed=F, offline=F, not_content_indexed=F, encrypted=F, integrity_stream=F, no_scrub_data=F]] diff --git a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered index c7b26a1f28..b5c04c3f44 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered +++ b/testing/btest/Baseline/scripts.policy.frameworks.telemetry.log/telemetry.log.filtered @@ -1,5 +1,7 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. +XXXXXXXXXX.XXXXXX zeek counter zeek_ended_sessions_total reason inactivity 0.0 XXXXXXXXXX.XXXXXX zeek counter zeek_total_sessions_total protocol tcp 1.0 XXXXXXXXXX.XXXXXX zeek gauge zeek_active_sessions protocol tcp 1.0 +XXXXXXXXXX.XXXXXX zeek counter zeek_ended_sessions_total reason inactivity 0.0 XXXXXXXXXX.XXXXXX zeek counter zeek_total_sessions_total protocol tcp 500.0 XXXXXXXXXX.XXXXXX zeek gauge zeek_active_sessions protocol tcp 500.0 diff --git a/testing/btest/Baseline/signatures.custom-event-errors/.stderr b/testing/btest/Baseline/signatures.custom-event-errors/.stderr index 68f7bf5815..97f17fa2a5 100644 --- a/testing/btest/Baseline/signatures.custom-event-errors/.stderr +++ b/testing/btest/Baseline/signatures.custom-event-errors/.stderr @@ -1,9 +1,10 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -warning in <...>/custom-event-errors.zeek, line 9: Wrong number of arguments for function. Expected 3, got 2. (event(state:signature_state, data:string)) +warning in <...>/custom-event-errors.zeek, line 7: Wrong number of arguments for function. Expected 3, got 2. (event(state:signature_state, data:string)) error: wrong event parameters for 'wrong_signature2' -warning in <...>/custom-event-errors.zeek, line 11: Wrong number of arguments for function. Expected 2, got 3. (event(state:signature_state, msg:string, data:string)) +warning in <...>/custom-event-errors.zeek, line 9: Wrong number of arguments for function. Expected 2, got 3. (event(state:signature_state, msg:string, data:string)) error: wrong event parameters for 'wrong_signature3' -warning in <...>/custom-event-errors.zeek, line 13: Type mismatch in function argument #1. Expected string, got count. (event(state:signature_state, msg:count, data:string)) +warning in <...>/custom-event-errors.zeek, line 11: Type mismatch in function argument #1. Expected string, got count. (event(state:signature_state, msg:count, data:string)) error: wrong event parameters for 'wrong_signature4' -error: Error in signature (./id.sig:19): unknown script-level identifier (non_existing_event) -error: Error in signature (./id2.sig:4): custom event and identifier for msg unsupported +error: Error in signature (./id.sig:19): identifier is not an event (non_existing_event) +error: Error in signature (./id.sig:24): identifier is not an event (cat) +error: Error in signature (./id.sig:29): identifier is not an event (ignore_checksums) diff --git a/testing/btest/Baseline/signatures.custom-event/.stderr b/testing/btest/Baseline/signatures.custom-event/.stderr index 82d88c6d62..49d861c74c 100644 --- a/testing/btest/Baseline/signatures.custom-event/.stderr +++ b/testing/btest/Baseline/signatures.custom-event/.stderr @@ -1,2 +1 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -warning: Remove in v7.1: Using an identifier for msg is deprecated (./id.sig:9) diff --git a/testing/btest/Baseline/signatures.custom-event/out b/testing/btest/Baseline/signatures.custom-event/out index e6984c7450..fca040926f 100644 --- a/testing/btest/Baseline/signatures.custom-event/out +++ b/testing/btest/Baseline/signatures.custom-event/out @@ -1,4 +1,3 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. signature_match2 [orig_h=127.0.0.1, orig_p=30000/udp, resp_h=127.0.0.1, resp_p=13000/udp] -signature_match [orig_h=127.0.0.1, orig_p=30000/udp, resp_h=127.0.0.1, resp_p=13000/udp] - message from identifier (cannot be changed) signature_match3 [orig_h=127.0.0.1, orig_p=30000/udp, resp_h=127.0.0.1, resp_p=13000/udp] - message diff --git a/testing/btest/Baseline/spicy.replaces-conflicts/output b/testing/btest/Baseline/spicy.replaces-conflicts/output index 8cf95b5195..e5e9334522 100644 --- a/testing/btest/Baseline/spicy.replaces-conflicts/output +++ b/testing/btest/Baseline/spicy.replaces-conflicts/output @@ -1,2 +1,2 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -fatal error: redefinition of protocol analyzer spicy::SSH_1 +fatal error: spicy::SSH_2: protocol analyzer SSH is already mapped to a different analyzer; cannot replace an analyzer multiple times diff --git a/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-1.node.out b/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-1.node.out index 8837c1f15f..4a385b6968 100644 --- a/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-1.node.out +++ b/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-1.node.out @@ -1,5 +1,5 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. supervised node zeek_init() 1024, cluster_nodes! -[node_type=Cluster::WORKER, ip=127.0.0.1, zone_id=, p=0/tcp, interface=, manager=, time_machine=, id=, metrics_port=] +[node_type=Cluster::WORKER, ip=127.0.0.1, zone_id=, p=0/tcp, manager=, id=, metrics_port=] supervised node zeek_done() diff --git a/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-32.node.out b/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-32.node.out index 8837c1f15f..4a385b6968 100644 --- a/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-32.node.out +++ b/testing/btest/Baseline/supervisor.large-cluster/zeek.bare-32.node.out @@ -1,5 +1,5 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. supervised node zeek_init() 1024, cluster_nodes! -[node_type=Cluster::WORKER, ip=127.0.0.1, zone_id=, p=0/tcp, interface=, manager=, time_machine=, id=, metrics_port=] +[node_type=Cluster::WORKER, ip=127.0.0.1, zone_id=, p=0/tcp, manager=, id=, metrics_port=] supervised node zeek_done() diff --git a/testing/btest/Traces/README b/testing/btest/Traces/README index bc5a304436..43942584f2 100644 --- a/testing/btest/Traces/README +++ b/testing/btest/Traces/README @@ -18,3 +18,6 @@ Trace Index/Sources: - one `\x30` byte in the ciphertext changed to `\x00` - ldap/issue-32.pcapng: Provided by GH user martinvanhensbergen, +- ldap/ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap: Harvested from CTU-SME-11 + (Experiment-VM-Microsoft-Windows7AD-1) dataset, filtering on tcp port 389 and port 50041. + https://zenodo.org/records/7958259 (DOI 10.5281/zenodo.7958258). diff --git a/testing/btest/Traces/ldap/ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap b/testing/btest/Traces/ldap/ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap new file mode 100644 index 0000000000..dd2b34ce4a Binary files /dev/null and b/testing/btest/Traces/ldap/ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap differ diff --git a/testing/btest/Traces/ldap/ldap-starttls.pcap b/testing/btest/Traces/ldap/ldap-starttls.pcap new file mode 100644 index 0000000000..0cb6035125 Binary files /dev/null and b/testing/btest/Traces/ldap/ldap-starttls.pcap differ diff --git a/testing/btest/Traces/ldap/ldap-who-am-i.pcap b/testing/btest/Traces/ldap/ldap-who-am-i.pcap new file mode 100644 index 0000000000..75dae01d4e Binary files /dev/null and b/testing/btest/Traces/ldap/ldap-who-am-i.pcap differ diff --git a/testing/btest/Traces/ldap/sasl-ntlm.pcap b/testing/btest/Traces/ldap/sasl-ntlm.pcap new file mode 100644 index 0000000000..ef2fb91ab4 Binary files /dev/null and b/testing/btest/Traces/ldap/sasl-ntlm.pcap differ diff --git a/testing/btest/Traces/ldap/sasl-scram-sha-512.pcap b/testing/btest/Traces/ldap/sasl-scram-sha-512.pcap new file mode 100644 index 0000000000..70fd3d6b0d Binary files /dev/null and b/testing/btest/Traces/ldap/sasl-scram-sha-512.pcap differ diff --git a/testing/btest/Traces/ldap/sasl-srp-who-am-i.pcap b/testing/btest/Traces/ldap/sasl-srp-who-am-i.pcap new file mode 100644 index 0000000000..9e70f2d1b0 Binary files /dev/null and b/testing/btest/Traces/ldap/sasl-srp-who-am-i.pcap differ diff --git a/testing/btest/Traces/mysql/caching_sha2_password-after-auth-switch.pcapng b/testing/btest/Traces/mysql/caching_sha2_password-after-auth-switch.pcapng new file mode 100644 index 0000000000..3f4407eb49 Binary files /dev/null and b/testing/btest/Traces/mysql/caching_sha2_password-after-auth-switch.pcapng differ diff --git a/testing/btest/Traces/mysql/caching_sha2_password.trace b/testing/btest/Traces/mysql/caching_sha2_password.trace new file mode 100644 index 0000000000..184ed83254 Binary files /dev/null and b/testing/btest/Traces/mysql/caching_sha2_password.trace differ diff --git a/testing/btest/Traces/mysql/many-query-attrs.pcap b/testing/btest/Traces/mysql/many-query-attrs.pcap new file mode 100644 index 0000000000..175e5a9644 Binary files /dev/null and b/testing/btest/Traces/mysql/many-query-attrs.pcap differ diff --git a/testing/btest/Traces/mysql/mysql-9.0.0-query-attributes.pcap b/testing/btest/Traces/mysql/mysql-9.0.0-query-attributes.pcap new file mode 100644 index 0000000000..684dbcad38 Binary files /dev/null and b/testing/btest/Traces/mysql/mysql-9.0.0-query-attributes.pcap differ diff --git a/testing/btest/Traces/mysql/query-attr.pcap b/testing/btest/Traces/mysql/query-attr.pcap new file mode 100644 index 0000000000..144134b89c Binary files /dev/null and b/testing/btest/Traces/mysql/query-attr.pcap differ diff --git a/testing/btest/Traces/smb/smb2-zero-byte-error-ioctl.pcap b/testing/btest/Traces/smb/smb2-zero-byte-error-ioctl.pcap new file mode 100644 index 0000000000..3ffab0867a Binary files /dev/null and b/testing/btest/Traces/smb/smb2-zero-byte-error-ioctl.pcap differ diff --git a/testing/btest/core/check-unused-event-handlers.test b/testing/btest/core/check-unused-event-handlers.test deleted file mode 100644 index b5f9cf4aae..0000000000 --- a/testing/btest/core/check-unused-event-handlers.test +++ /dev/null @@ -1,10 +0,0 @@ -# This test should print a warning that the event handler is never invoked. -# @TEST-REQUIRES: test "${ZEEK_USE_CPP}" != "1" -# @TEST-REQUIRES: $SCRIPTS/have-spicy # This test logs uninvoked event handlers, so disable it if Spicy and its plugin is unavailable. -# @TEST-EXEC: zeek -b %INPUT check_for_unused_event_handlers=T -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort-and-remove-abspath btest-diff .stderr - -event this_is_never_used() - { - print "not even once"; - } diff --git a/testing/btest/core/script-args.zeek b/testing/btest/core/script-args.zeek index b229c87ada..7484bb9441 100644 --- a/testing/btest/core/script-args.zeek +++ b/testing/btest/core/script-args.zeek @@ -2,6 +2,10 @@ # the script differently, leading to complaints that there are no scripts. # @TEST-REQUIRES: test "${ZEEK_USE_CPP}" != "1" +# TSAN may re-execute the executable when the memory layout doesn't fullfill +# requirements, causing argument confusion when that happens (see #3774). +# @TEST-REQUIRES: ! have-tsan + # @TEST-EXEC: printf '#!' > test.zeek # @TEST-EXEC: printf "$BUILD/src/zeek -b --\n" >> test.zeek # @TEST-EXEC: cat %INPUT >> test.zeek diff --git a/testing/btest/coverage/bare-mode-errors.test b/testing/btest/coverage/bare-mode-errors.test index be243bcdc0..ea5b66955e 100644 --- a/testing/btest/coverage/bare-mode-errors.test +++ b/testing/btest/coverage/bare-mode-errors.test @@ -9,4 +9,4 @@ # # @TEST-EXEC: test -d $DIST/scripts # @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.zeek`; do zeek -b --parse-only $script >>errors 2>&1; done -# @TEST-EXEC: TEST_DIFF_CANONIFIER="grep -v -e 'load-balancing.zeek.*deprecated script loaded' | grep -v -e 'prometheus.zeek.*deprecated script loaded' | $SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-sort" btest-diff errors +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-sort" btest-diff errors diff --git a/testing/btest/coverage/test-all-policy-cluster.test b/testing/btest/coverage/test-all-policy-cluster.test index 9d88868063..dc5e14e385 100644 --- a/testing/btest/coverage/test-all-policy-cluster.test +++ b/testing/btest/coverage/test-all-policy-cluster.test @@ -9,7 +9,7 @@ # @TEST-EXEC: CLUSTER_NODE=logger-1 zeek %INPUT # @TEST-EXEC: CLUSTER_NODE=proxy-1 zeek %INPUT # @TEST-EXEC: CLUSTER_NODE=worker-1 zeek %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER='grep -v "load-balancing.zeek.*deprecated script" | grep -v "prometheus.zeek.*deprecated script" | $SCRIPTS/diff-remove-abspath' btest-diff .stderr +# @TEST-EXEC: TEST_DIFF_CANONIFIER='$SCRIPTS/diff-remove-abspath' btest-diff .stderr @load base/frameworks/cluster @load misc/loaded-scripts diff --git a/testing/btest/language/deprecate-global.zeek b/testing/btest/language/deprecate-global.zeek deleted file mode 100644 index 236cc3bf19..0000000000 --- a/testing/btest/language/deprecate-global.zeek +++ /dev/null @@ -1,27 +0,0 @@ -# @TEST-DOC: Adapt in v7.1 to check for errors upon GLOBAL accesses. - -# @TEST-EXEC: zeek -b %INPUT >out -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr - -module GLOBAL; - -function test_function() { } - -global X = 42; - - -module MyModule; - -global X = fmt("shadows ::X (%s)", ::X); - -event zeek_init() - { - test_function(); - ::test_function(); - GLOBAL::test_function(); - - print "X", X; - print "::X", ::X; - print "GLOBAL::X", GLOBAL::X; - } diff --git a/testing/btest/language/global-colon-colon.zeek b/testing/btest/language/global-colon-colon.zeek index f1abef9066..79e3d25bb9 100644 --- a/testing/btest/language/global-colon-colon.zeek +++ b/testing/btest/language/global-colon-colon.zeek @@ -30,7 +30,7 @@ hook my_hook() &priority=9 print " MyModule::my_hook()"; } -# This implements GLOBAL::my_hook() +# This implements a global my_hook() hook ::my_hook() &priority=8 { print " ::my_hook() (in MyModule using ::)"; @@ -63,37 +63,29 @@ event zeek_init() &priority=5 print fmt(" %s", X); print "(MyModule) print MyModule::X"; print fmt(" %s", MyModule::X); - print "(MyModule) print GLOBAL::X"; - print fmt(" %s", GLOBAL::X); print "(MyModule) print ::X"; print fmt(" %s", ::X); print "(MyModule) hook my_hook()"; hook my_hook(); # This uses MyModule::my_hook(); print "(MyModule) hook MyModule::my_hook()"; hook MyModule::my_hook(); # This uses MyModule::hook(); - print "(MyModule) hook GLOBAL::my_hook()"; - hook GLOBAL::my_hook(); print "(MyModule) hook ::my_hook()"; hook ::my_hook(); print "(MyModule) call func()"; func(); - print "(MyModule) call GLOBAL::func()"; - GLOBAL::func(); print "(MyModule) call ::func()"; ::func(); print "(MyModule) call funcX()"; funcX(); - print "(MyModule) call GLOBAL::funcX()"; - GLOBAL::funcX(); print "(MyModule) call ::funcX()"; ::funcX(); # This schedules MyEvent::my_event() event my_event(); - # This schedules the GLOBAL::my_event(); + # This schedules the global ::my_event() event ::my_event(); } @@ -106,30 +98,22 @@ event zeek_init() &priority=5 print fmt(" %s", X); print "(G) print MyModule::X"; print fmt(" %s", MyModule::X); - print "(G) print GLOBAL::X"; - print fmt(" %s", GLOBAL::X); print "(G) print ::X"; print fmt(" %s", ::X); print "(G) hook my_hook()"; - hook my_hook(); # This uses GLOBAL::my_hook(); + hook my_hook(); # This uses global my_hook(); print "(G) MyModule::my_hook()"; hook MyModule::my_hook(); # This uses MyModule::hook(); - print "(G) hook GLOBAL::my_hook()"; - hook GLOBAL::my_hook(); print "(G) hook ::my_hook()"; hook ::my_hook(); print "(G) call func()"; func(); - print "(G) call GLOBAL::func()"; - GLOBAL::func(); print "(G) call ::func()"; ::func(); print "(G) call funcX()"; funcX(); - print "(G) call GLOBAL::funcX()"; - GLOBAL::funcX(); print "(G) call ::funcX()"; ::funcX(); } diff --git a/testing/btest/language/global-type-clash.zeek b/testing/btest/language/global-type-clash.zeek index 9eac7fb53a..e361046b07 100644 --- a/testing/btest/language/global-type-clash.zeek +++ b/testing/btest/language/global-type-clash.zeek @@ -9,7 +9,7 @@ type r: record { b: count; }; event zeek_init() { - local x: GLOBAL::r; + local x: ::r; x$a = 5; local y: test::r; diff --git a/testing/btest/language/when.zeek b/testing/btest/language/when.zeek index d0d07b44fa..e32785abc7 100644 --- a/testing/btest/language/when.zeek +++ b/testing/btest/language/when.zeek @@ -1,6 +1,6 @@ # @TEST-EXEC: btest-bg-run test1 zeek -b %INPUT # @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: mv test1/.stdout out +# @TEST-EXEC: cat test1/.stdout test1/.stderr >> out # @TEST-EXEC: btest-diff out redef exit_only_after_terminate = T; @@ -26,6 +26,13 @@ event zeek_init() when [h] ( local hname3 = lookup_addr(h) ) {} timeout to + 2sec {} + # The following used to generate a spurious warning, so it's here + # as a regression test. + when ( local res = lookup_addr(127.0.0.1) ) + { + return; + } + print "done"; } diff --git a/testing/btest/scripts/base/protocols/ldap/sasl-ntlm.zeek b/testing/btest/scripts/base/protocols/ldap/sasl-ntlm.zeek new file mode 100644 index 0000000000..0504ba7a0a --- /dev/null +++ b/testing/btest/scripts/base/protocols/ldap/sasl-ntlm.zeek @@ -0,0 +1,12 @@ +# Copyright (c) 2024 by the Zeek Project. See LICENSE for details. + +# @TEST-REQUIRES: have-spicy +# @TEST-EXEC: zeek -C -r ${TRACES}/ldap/sasl-ntlm.pcap %INPUT +# @TEST-EXEC: cat conn.log | zeek-cut -Cn local_orig local_resp > conn.log2 && mv conn.log2 conn.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ldap.log +# @TEST-EXEC: btest-diff ldap_search.log +# @TEST-EXEC: ! test -f dpd.log +# @TEST-EXEC: ! test -f analyzer.log +# +# @TEST-DOC: This broke after #3826 got merged diff --git a/testing/btest/scripts/base/protocols/ldap/sasl-scram-sha-512.zeek b/testing/btest/scripts/base/protocols/ldap/sasl-scram-sha-512.zeek new file mode 100644 index 0000000000..9db41f96bc --- /dev/null +++ b/testing/btest/scripts/base/protocols/ldap/sasl-scram-sha-512.zeek @@ -0,0 +1,12 @@ +# Copyright (c) 2024 by the Zeek Project. See LICENSE for details. + +# @TEST-REQUIRES: have-spicy +# @TEST-EXEC: zeek -C -r ${TRACES}/ldap/sasl-scram-sha-512.pcap %INPUT +# @TEST-EXEC: cat conn.log | zeek-cut -Cn local_orig local_resp > conn.log2 && mv conn.log2 conn.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ldap.log +# @TEST-EXEC: btest-diff ldap_search.log +# @TEST-EXEC: ! test -f dpd.log +# @TEST-EXEC: ! test -f analyzer.log +# +# @TEST-DOC: This broke after #3826 got merged diff --git a/testing/btest/scripts/base/protocols/ldap/sasl-srp-who-am-i.zeek b/testing/btest/scripts/base/protocols/ldap/sasl-srp-who-am-i.zeek new file mode 100644 index 0000000000..b467dbe484 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ldap/sasl-srp-who-am-i.zeek @@ -0,0 +1,11 @@ +# Copyright (c) 2024 by the Zeek Project. See LICENSE for details. + +# @TEST-REQUIRES: have-spicy +# @TEST-EXEC: zeek -C -r ${TRACES}/ldap/sasl-srp-who-am-i.pcap %INPUT +# @TEST-EXEC: cat conn.log | zeek-cut -Cn local_orig local_resp > conn.log2 && mv conn.log2 conn.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ldap.log +# @TEST-EXEC: ! test -f dpd.log +# @TEST-EXEC: ! test -f analyzer.log +# +# @TEST-DOC: SASL authentication using SRP (Secure Remote Password) diff --git a/testing/btest/scripts/base/protocols/ldap/spnego-ntlmssp.zeek b/testing/btest/scripts/base/protocols/ldap/spnego-ntlmssp.zeek new file mode 100644 index 0000000000..e936332b47 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ldap/spnego-ntlmssp.zeek @@ -0,0 +1,15 @@ +# Copyright (c) 2024 by the Zeek Project. See LICENSE for details. +# +# The ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap file was harvested +# from the CTU-SME-11 (Experiment-VM-Microsoft-Windows7AD-1) dataset +# at https://zenodo.org/records/7958259 (DOI 10.5281/zenodo.7958258). + +# @TEST-REQUIRES: have-spicy +# @TEST-EXEC: zeek -C -r ${TRACES}/ldap/ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap +# @TEST-EXEC: cat conn.log | zeek-cut -Cn local_orig local_resp > conn.log2 && mv conn.log2 conn.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ldap.log +# @TEST-EXEC: ! test -f dpd.log +# @TEST-EXEC: ! test -f analyzer.log +# +# @TEST-DOC: SASL bindRequest with SPNEGO NTLMSSP. diff --git a/testing/btest/scripts/base/protocols/ldap/starttls.zeek b/testing/btest/scripts/base/protocols/ldap/starttls.zeek new file mode 100644 index 0000000000..df94315210 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ldap/starttls.zeek @@ -0,0 +1,25 @@ +# Copyright (c) 2024 by the Zeek Project. See LICENSE for details. + +# @TEST-REQUIRES: have-spicy +# @TEST-EXEC: zeek -C -r ${TRACES}/ldap/ldap-starttls.pcap %INPUT >out +# @TEST-EXEC: cat conn.log | zeek-cut -Cn local_orig local_resp > conn.log2 && mv conn.log2 conn.log +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ldap.log +# @TEST-EXEC: btest-diff ssl.log +# @TEST-EXEC: ! test -f dpd.log +# @TEST-EXEC: ! test -f analyzer.log +# +# @TEST-DOC: LDAP supports StartTLS through extendedRequest 1.3.6.1.4.1.1466.20037 + +event LDAP::extended_request(c: connection, message_id: int, request_name: string, request_value: string) { + print c$uid, "extended_request", fmt("%s (%s)", request_name, LDAP::EXTENDED_REQUESTS[request_name]), request_value; +} + +event LDAP::extended_response(c: connection, message_id: int, result: LDAP::ResultCode, response_name: string, response_value: string) { + print c$uid, "extended_response", result, response_name, response_value; +} + +event LDAP::starttls(c: connection) { + print c$uid, "LDAP::starttls"; +} diff --git a/testing/btest/scripts/base/protocols/ldap/who-am-i.zeek b/testing/btest/scripts/base/protocols/ldap/who-am-i.zeek new file mode 100644 index 0000000000..6026add5cc --- /dev/null +++ b/testing/btest/scripts/base/protocols/ldap/who-am-i.zeek @@ -0,0 +1,20 @@ +# Copyright (c) 2024 by the Zeek Project. See LICENSE for details. + +# @TEST-REQUIRES: have-spicy +# @TEST-EXEC: zeek -C -r ${TRACES}/ldap/ldap-who-am-i.pcap %INPUT >out +# @TEST-EXEC: cat conn.log | zeek-cut -Cn local_orig local_resp > conn.log2 && mv conn.log2 conn.log +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ldap.log +# @TEST-EXEC: ! test -f dpd.log +# @TEST-EXEC: ! test -f analyzer.log +# +# @TEST-DOC: Testing OpenLDAP's ldapwhoami utility with simple authentication. + +event LDAP::extended_request(c: connection, message_id: int, request_name: string, request_value: string) { + print c$uid, "extended_request", fmt("%s (%s)", request_name, LDAP::EXTENDED_REQUESTS[request_name]), request_value; +} + +event LDAP::extended_response(c: connection, message_id: int, result: LDAP::ResultCode, response_name: string, response_value: string) { + print c$uid, "extended_response", result, response_name, response_value; +} diff --git a/testing/btest/scripts/base/protocols/mysql/caching_sha2_password-after-auth-switch.test b/testing/btest/scripts/base/protocols/mysql/caching_sha2_password-after-auth-switch.test new file mode 100644 index 0000000000..bcc4a5a870 --- /dev/null +++ b/testing/btest/scripts/base/protocols/mysql/caching_sha2_password-after-auth-switch.test @@ -0,0 +1,50 @@ +# @TEST-EXEC: zeek -b -C -r $TRACES/mysql/caching_sha2_password-after-auth-switch.pcapng %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff mysql.log + +@load base/protocols/mysql + +event mysql_ok(c: connection, affected_rows: count) + { + print "mysql ok", affected_rows; + } + +event mysql_eof(c: connection, is_intermediate: bool) + { + print "mysql eof", is_intermediate; + } + +event mysql_result_row(c: connection, row: string_vec) + { + print "mysql result row", row; + } + +event mysql_error(c: connection, code: count, msg: string) + { + print "mysql error", code, msg; + } + +event mysql_command_request(c: connection, command: count, arg: string) + { + print "mysql request", command, arg; + } + +event mysql_handshake(c: connection, username: string) + { + print "mysql handshake", username; + } + +event mysql_auth_plugin(c: connection, is_orig: bool, name: string, data: string) + { + print "mysql auth plugin", is_orig, name, data, |data|; + } + +event mysql_auth_switch_request(c: connection, name: string, data: string) + { + print "mysql auth switch request", name, data, |data|; + } + +event mysql_auth_more_data(c: connection, is_orig: bool, data: string) + { + print "mysql auth more data", is_orig, data, |data|; + } diff --git a/testing/btest/scripts/base/protocols/mysql/caching_sha2_password.test b/testing/btest/scripts/base/protocols/mysql/caching_sha2_password.test new file mode 100644 index 0000000000..414d0d2c30 --- /dev/null +++ b/testing/btest/scripts/base/protocols/mysql/caching_sha2_password.test @@ -0,0 +1,50 @@ +# @TEST-EXEC: zeek -b -C -r $TRACES/mysql/caching_sha2_password.trace %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff mysql.log + +@load base/protocols/mysql + +event mysql_ok(c: connection, affected_rows: count) + { + print "mysql ok", affected_rows; + } + +event mysql_eof(c: connection, is_intermediate: bool) + { + print "mysql eof", is_intermediate; + } + +event mysql_result_row(c: connection, row: string_vec) + { + print "mysql result row", row; + } + +event mysql_error(c: connection, code: count, msg: string) + { + print "mysql error", code, msg; + } + +event mysql_command_request(c: connection, command: count, arg: string) + { + print "mysql request", command, arg; + } + +event mysql_handshake(c: connection, username: string) + { + print "mysql handshake", username; + } + +event mysql_auth_plugin(c: connection, is_orig: bool, name: string, data: string) + { + print "mysql auth plugin", is_orig, name, data, |data|; + } + +event mysql_auth_switch_request(c: connection, name: string, data: string) + { + print "mysql auth switch request", name, data, |data|; + } + +event mysql_auth_more_data(c: connection, is_orig: bool, data: string) + { + print "mysql auth more data", is_orig, data, |data|; + } diff --git a/testing/btest/scripts/base/protocols/mysql/encrypted-aws-rds.test b/testing/btest/scripts/base/protocols/mysql/encrypted-aws-rds.test index d653608aa4..7f336edf80 100644 --- a/testing/btest/scripts/base/protocols/mysql/encrypted-aws-rds.test +++ b/testing/btest/scripts/base/protocols/mysql/encrypted-aws-rds.test @@ -1,15 +1,17 @@ # Just two traces with MySQL running in Amazon RDS tls1.3 and tls1.2 -# @TEST-EXEC: zeek -b -r $TRACES/mysql/tls-12-amazon-rds.trace %INPUT -# @TEST-EXEC: mkdir tls-12 && mv *log tls-12 +# @TEST-EXEC: zeek -b -r $TRACES/mysql/tls-12-amazon-rds.trace %INPUT >out +# @TEST-EXEC: mkdir tls-12 && mv *log out tls-12 # -# @TEST-EXEC: zeek -b -r $TRACES/mysql/tls-13-amazon-rds.trace %INPUT -# @TEST-EXEC: mkdir tls-13 && mv *log tls-13 +# @TEST-EXEC: zeek -b -r $TRACES/mysql/tls-13-amazon-rds.trace %INPUT >out +# @TEST-EXEC: mkdir tls-13 && mv *log out tls-13 # +# @TEST-EXEC: btest-diff tls-12/out # @TEST-EXEC: btest-diff tls-12/conn.log # @TEST-EXEC: btest-diff tls-12/ssl.log # @TEST-EXEC: btest-diff tls-12/x509.log # +# @TEST-EXEC: btest-diff tls-13/out # @TEST-EXEC: btest-diff tls-13/conn.log # @TEST-EXEC: btest-diff tls-13/ssl.log # @TEST-EXEC: ! test -f tls-13/x509.log @@ -17,3 +19,8 @@ @load base/protocols/conn @load base/protocols/mysql @load base/protocols/ssl + +event mysql_ssl_request(c: connection) + { + print "mysql ssl request", c$uid; + } diff --git a/testing/btest/scripts/base/protocols/mysql/encrypted.test b/testing/btest/scripts/base/protocols/mysql/encrypted.test index 1f43ec7da6..808bed3cfb 100644 --- a/testing/btest/scripts/base/protocols/mysql/encrypted.test +++ b/testing/btest/scripts/base/protocols/mysql/encrypted.test @@ -3,8 +3,9 @@ # empty mysql.log file. # @TEST-EXEC: touch mysql.log -# @TEST-EXEC: zeek -b -r $TRACES/mysql/encrypted.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/mysql/encrypted.trace %INPUT >out # @TEST-EXEC: btest-diff mysql.log +# @TEST-EXEC: btest-diff out # # Ensure the connection was handed off by peaking into some other logs. # @TEST-EXEC: btest-diff conn.log @@ -14,3 +15,8 @@ @load base/protocols/conn @load base/protocols/mysql @load base/protocols/ssl + +event mysql_ssl_request(c: connection) + { + print "mysql ssl request", c$uid; + } diff --git a/testing/btest/scripts/base/protocols/mysql/many-query-attr.test b/testing/btest/scripts/base/protocols/mysql/many-query-attr.test new file mode 100644 index 0000000000..5ff2e4cb92 --- /dev/null +++ b/testing/btest/scripts/base/protocols/mysql/many-query-attr.test @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -b -C -r $TRACES/mysql/many-query-attrs.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff mysql.log + +@load base/protocols/mysql + +event mysql_ok(c: connection, affected_rows: count) + { + print "mysql ok", affected_rows; + } + +event mysql_eof(c: connection, is_intermediate: bool) + { + print "mysql eof", is_intermediate; + } + +event mysql_result_row(c: connection, row: string_vec) + { + print "mysql result row", row; + } + +event mysql_error(c: connection, code: count, msg: string) + { + print "mysql error", code, msg; + } + +event mysql_command_request(c: connection, command: count, arg: string) + { + print "mysql request", command, arg; + } + +event mysql_handshake(c: connection, username: string) + { + print "mysql handshake", username; + } diff --git a/testing/btest/scripts/base/protocols/mysql/query-attr-non-string.test b/testing/btest/scripts/base/protocols/mysql/query-attr-non-string.test new file mode 100644 index 0000000000..ef6ef4d8b7 --- /dev/null +++ b/testing/btest/scripts/base/protocols/mysql/query-attr-non-string.test @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -b -C -r $TRACES/mysql/mysql-9.0.0-query-attributes.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff mysql.log + +@load base/protocols/mysql + +event mysql_ok(c: connection, affected_rows: count) + { + print "mysql ok", affected_rows; + } + +event mysql_eof(c: connection, is_intermediate: bool) + { + print "mysql eof", is_intermediate; + } + +event mysql_result_row(c: connection, row: string_vec) + { + print "mysql result row", row; + } + +event mysql_error(c: connection, code: count, msg: string) + { + print "mysql error", code, msg; + } + +event mysql_command_request(c: connection, command: count, arg: string) + { + print "mysql request", command, arg; + } + +event mysql_handshake(c: connection, username: string) + { + print "mysql handshake", username; + } diff --git a/testing/btest/scripts/base/protocols/mysql/query-attr.test b/testing/btest/scripts/base/protocols/mysql/query-attr.test new file mode 100644 index 0000000000..0f9d33d70b --- /dev/null +++ b/testing/btest/scripts/base/protocols/mysql/query-attr.test @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -b -C -r $TRACES/mysql/query-attr.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff mysql.log + +@load base/protocols/mysql + +event mysql_ok(c: connection, affected_rows: count) + { + print "mysql ok", affected_rows; + } + +event mysql_eof(c: connection, is_intermediate: bool) + { + print "mysql eof", is_intermediate; + } + +event mysql_result_row(c: connection, row: string_vec) + { + print "mysql result row", row; + } + +event mysql_error(c: connection, code: count, msg: string) + { + print "mysql error", code, msg; + } + +event mysql_command_request(c: connection, command: count, arg: string) + { + print "mysql request", command, arg; + } + +event mysql_handshake(c: connection, username: string) + { + print "mysql handshake", username; + } diff --git a/testing/btest/scripts/base/protocols/smb/smb2-zero-byte-error-ioctl.test b/testing/btest/scripts/base/protocols/smb/smb2-zero-byte-error-ioctl.test new file mode 100644 index 0000000000..058d17c29e --- /dev/null +++ b/testing/btest/scripts/base/protocols/smb/smb2-zero-byte-error-ioctl.test @@ -0,0 +1,16 @@ +# @TEST-DOC: Tests handling of PDUs containing error ioctls with byte lengths of zero +# @TEST-EXEC: zeek -b -r $TRACES/smb/smb2-zero-byte-error-ioctl.pcap %INPUT 2>&1 >out +# @TEST-EXEC: ! test -f analyzer.log +# @TEST-EXEC: btest-diff out + +@load base/protocols/smb + +event smb2_close_request(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID) +{ + print "smb2_close_request", hdr; +} + +event smb2_close_response(c: connection, hdr: SMB2::Header, response: SMB2::CloseResponse) +{ + print "smb2_close_response", hdr, response; +} diff --git a/testing/btest/scripts/site/local-compat.test b/testing/btest/scripts/site/local-compat.test index 0d287bff2e..edff8da24d 100644 --- a/testing/btest/scripts/site/local-compat.test +++ b/testing/btest/scripts/site/local-compat.test @@ -27,9 +27,6 @@ redef digest_salt = "Please change this value."; # This script logs which scripts were loaded during each run. @load misc/loaded-scripts -# Apply the default tuning scripts for common tuning settings. -@load tuning/defaults - # Estimate and log capture loss. @load misc/capture-loss diff --git a/testing/btest/signatures/custom-event-errors.zeek b/testing/btest/signatures/custom-event-errors.zeek index 445d68f443..9a6d80eb05 100644 --- a/testing/btest/signatures/custom-event-errors.zeek +++ b/testing/btest/signatures/custom-event-errors.zeek @@ -1,7 +1,6 @@ # @TEST-DOC: Using the wrong paramters for custom signature events. # # @TEST-EXEC-FAIL: zeek -b -s id -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >id.out -# @TEST-EXEC-FAIL: zeek -b -s id2 -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >id.out # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr @TEST-START-FILE id.sig @@ -24,13 +23,15 @@ signature udp-proto4 { ip-proto == 17 event non_existing_event } -@TEST-END-FILE -@TEST-START-FILE id2.sig -# Using two identifiers is not supported. -signature udp-proto-msg-id { +signature udp-proto5 { ip-proto == 17 - event signature_match message_as_id + event cat # builtin function +} + +signature udp-proto6 { + ip-proto == 17 + event ignore_checksums # variable } @TEST-END-FILE diff --git a/testing/btest/signatures/custom-event.zeek b/testing/btest/signatures/custom-event.zeek index 9b9a750a78..2a824eba7f 100644 --- a/testing/btest/signatures/custom-event.zeek +++ b/testing/btest/signatures/custom-event.zeek @@ -10,11 +10,6 @@ signature udp-proto { event my_signature_match3 "message" } -signature udp-proto-msg-id2 { - ip-proto == 17 - event message_as_id -} - signature udp-stuff { dst-ip == mynets event my_signature_match2 @@ -22,8 +17,6 @@ signature udp-stuff { @TEST-END-FILE -const message_as_id = "message from identifier (cannot be changed)"; - const mynets: set[subnet] = { 192.168.1.0/24, 10.0.0.0/8, diff --git a/testing/btest/spicy/event-user-type b/testing/btest/spicy/event-user-type index 75f99b4042..14b0883d5f 100644 --- a/testing/btest/spicy/event-user-type +++ b/testing/btest/spicy/event-user-type @@ -25,8 +25,7 @@ type Y = unit { # @TEST-START-FILE foo.evt protocol analyzer spicy::foo over UDP: - parse with foo::X, - ports { 12345/udp, 31337/udp }; + parse with foo::X; import foo; @@ -36,6 +35,13 @@ on foo::X -> event foo::X($conn, $is_orig, self.y); # @TEST-END-FILE # @TEST-START-FILE foo.zeek +const foo_ports = { 12345/udp, 31337/udp}; + +event zeek_init() +{ + Analyzer::register_for_ports(Analyzer::ANALYZER_SPICY_FOO, foo_ports); +} + event foo::X(c: connection, is_orig: bool, y: foo::Y) { print fmt("is_orig=%d y=%s", is_orig, y); diff --git a/testing/btest/spicy/port-deprecated.evt b/testing/btest/spicy/port-deprecated.evt deleted file mode 100644 index 220a9d1faf..0000000000 --- a/testing/btest/spicy/port-deprecated.evt +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-REQUIRES: have-spicy -# -# @TEST-EXEC: spicyz -d -o test.hlto ./udp-test.evt 2>out.stderr -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out.stderr -# -# @TEST-DOC: Remove with v7.1: Specifying ports is deprecated. - -module Test; - -import zeek; - -public type Message = unit { - data: bytes &eod {} -}; - -# @TEST-START-FILE udp-test.evt -protocol analyzer spicy::TEST over UDP: - parse with Test::Message, - port 11337/udp-11340/udp, - ports {31337/udp-31340/udp}; -# @TEST-END-FILE diff --git a/testing/btest/spicy/port-fail.evt b/testing/btest/spicy/port-fail.evt deleted file mode 100644 index e51ca0fb79..0000000000 --- a/testing/btest/spicy/port-fail.evt +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-REQUIRES: have-spicy -# -# @TEST-EXEC-FAIL: spicyz %INPUT -d -o x.hlto >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=diff-canonifier-spicy btest-diff output -# -# @TEST-DOC: Remove with v7.1 - -protocol analyzer spicy::SSH over TCP: - port 123456/udp; - -@TEST-START-NEXT - -protocol analyzer spicy::SSH over TCP: - port -1/udp; - -@TEST-START-NEXT - -protocol analyzer spicy::SSH over TCP: - port 1/udp-2/tcp; - -@TEST-START-NEXT - -protocol analyzer spicy::SSH over TCP: - port 2/udp-1/udp; diff --git a/testing/btest/spicy/port-range-one-port.zeek b/testing/btest/spicy/port-range-one-port.zeek deleted file mode 100644 index 95c32f2b27..0000000000 --- a/testing/btest/spicy/port-range-one-port.zeek +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-REQUIRES: have-spicy -# -# @TEST-EXEC: spicyz -o test.hlto udp-test.spicy ./udp-test.evt -# @TEST-EXEC: HILTI_DEBUG=zeek zeek -Cr ${TRACES}/udp-packet.pcap test.hlto %INPUT >out 2>&1 -# @TEST-EXEC: grep -e 'Scheduling analyzer' -e 'error during parsing' < out > out.filtered -# @TEST-EXEC: btest-diff out.filtered - -# @TEST-DOC: Remove with v7.1. Expect a single 'Scheduling analyzer ...' message in the debug output and no parsing errors. There was a bug that 'port 31336/udp' would be wrongly interpreted as a 31336/udp-31337/udp port range. Regression test for #3278. - -# @TEST-START-FILE udp-test.spicy -module UDPTest; - -public type Message = unit { - data: bytes &eod { - assert False: "not reached"; - } -}; -# @TEST-END-FILE - -# @TEST-START-FILE udp-test.evt -protocol analyzer spicy::UDP_TEST over UDP: - parse with UDPTest::Message, - port 31336/udp; -# @TEST-END-FILE diff --git a/testing/btest/spicy/replaces-conflicts.evt b/testing/btest/spicy/replaces-conflicts.evt index dbca6d637e..5ca7610503 100644 --- a/testing/btest/spicy/replaces-conflicts.evt +++ b/testing/btest/spicy/replaces-conflicts.evt @@ -16,7 +16,7 @@ protocol analyzer spicy::SSH_1 over TCP: parse with SSH::Banner, replaces SSH; -protocol analyzer spicy::SSH_1 over UDP: +protocol analyzer spicy::SSH_2 over UDP: parse with SSH::Banner, replaces SSH; diff --git a/testing/coverage/lcov_html.sh b/testing/coverage/lcov_html.sh index ba8c8a37df..be65e4add4 100755 --- a/testing/coverage/lcov_html.sh +++ b/testing/coverage/lcov_html.sh @@ -116,7 +116,7 @@ verify_run "which lcov" \ # 4. Create a "tracefile" through lcov, which is necessary to create output later on. echo -n "Creating tracefile for output generation... " -verify_run "lcov --no-external --capture --directory . --output-file $COVERAGE_FILE" +verify_run "lcov --no-external --capture --directory . --exclude 'testing/btest/.tmp/*' --output-file $COVERAGE_FILE" # 5. Remove a number of 3rdparty and "extra" files that shouldn't be included in the # Zeek coverage numbers. diff --git a/testing/external/commit-hash.zeek-testing-cluster b/testing/external/commit-hash.zeek-testing-cluster index 5d84d38106..b8df5d15d0 100644 --- a/testing/external/commit-hash.zeek-testing-cluster +++ b/testing/external/commit-hash.zeek-testing-cluster @@ -1 +1 @@ -ded009fb7a0cdee6f36d5b40a6394788b760fa06 +9f875d86000602661fbfc9bb471d1c598917ebc9 diff --git a/testing/scripts/have-tsan b/testing/scripts/have-tsan new file mode 100755 index 0000000000..8030c8d248 --- /dev/null +++ b/testing/scripts/have-tsan @@ -0,0 +1,7 @@ +#!/bin/sh + +if grep -q "ZEEK_SANITIZERS:STRING=.*thread.*" "${BUILD}"/CMakeCache.txt; then + exit 0 +fi + +exit 1