mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 14:48:21 +00:00
Merge remote-tracking branch 'origin/master' into topic/johanna/spicy-tls
* origin/master: (93 commits) spicyz: Add back message about removed support for port / ports in evt rule-parse: Remove id_to_str() lookup to squelch coverity warning Update doc submodule [nomail] [skip ci] Update zeekctl submodule [nomail] btest: Skip core.script-args under TSAN Update doc submodule [nomail] [skip ci] Update zeekctl submodule Add note to NEWS about the removal of OpaqueVal::DoSerialize and OpaqueVal::DoUnserialize Remove deprecated port/ports fields for spicy analyzers Remove deprecated Cluster::Node::interface field Remove deprecated signature definition format Return an error if GLOBAL:: prefix is used Remove deprecated BloomFilter serialization methods Remove deprecated OpaqueVal serialization methods Remove deprecated DECLARE_OPAQUE_VALUE macro Make TypePtr::Capture member variables private Remove deprecated Trigger constructor Remove deprecated Controller::auto_assign_ports and Controller::auto_assign_start_port Remove deprecated load-balacing policy script Remove deprecated prometheus telemetry policy script ...
This commit is contained in:
commit
1e282989fe
202 changed files with 2903 additions and 1097 deletions
|
@ -14,9 +14,9 @@ config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WOR
|
||||||
no_spicy_config: &NO_SPICY_CONFIG --build-type=release --disable-broker-tests --disable-spicy --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
no_spicy_config: &NO_SPICY_CONFIG --build-type=release --disable-broker-tests --disable-spicy --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||||
static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||||
binary_config: &BINARY_CONFIG --prefix=$CIRRUS_WORKING_DIR/install --libdir=$CIRRUS_WORKING_DIR/install/lib --binary-package --enable-static-broker --enable-static-binpac --disable-broker-tests --build-type=Release --ccache --enable-werror
|
binary_config: &BINARY_CONFIG --prefix=$CIRRUS_WORKING_DIR/install --libdir=$CIRRUS_WORKING_DIR/install/lib --binary-package --enable-static-broker --enable-static-binpac --disable-broker-tests --build-type=Release --ccache --enable-werror
|
||||||
asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --disable-spicy --ccache
|
asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --ccache --enable-werror
|
||||||
ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --disable-spicy --ccache --enable-werror
|
ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --ccache --enable-werror
|
||||||
tsan_sanitizer_config: &TSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=thread --enable-fuzzers --disable-spicy --ccache --enable-werror
|
tsan_sanitizer_config: &TSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=thread --enable-fuzzers --ccache --enable-werror
|
||||||
|
|
||||||
resources_template: &RESOURCES_TEMPLATE
|
resources_template: &RESOURCES_TEMPLATE
|
||||||
cpu: *CPUS
|
cpu: *CPUS
|
||||||
|
@ -35,8 +35,7 @@ macos_environment: &MACOS_ENVIRONMENT
|
||||||
|
|
||||||
freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE
|
freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE
|
||||||
cpu: 8
|
cpu: 8
|
||||||
# Not allowed to request less than 8GB for an 8 CPU FreeBSD VM.
|
memory: *MEMORY
|
||||||
memory: 8GB
|
|
||||||
# For greediness, see https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4
|
# For greediness, see https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4
|
||||||
greedy: true
|
greedy: true
|
||||||
|
|
||||||
|
|
418
CHANGES
418
CHANGES
|
@ -1,3 +1,421 @@
|
||||||
|
7.1.0-dev.141 | 2024-08-12 11:07:32 +0200
|
||||||
|
|
||||||
|
* spicyz: Add back message about removed support for port / ports in evt (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
spicy-dhcp, spicy-http and spicy-dns all have this still in their .evt files,
|
||||||
|
so it seems popular. Be more helpful than "unexpected token" to users.
|
||||||
|
|
||||||
|
7.1.0-dev.139 | 2024-08-09 20:57:23 +0200
|
||||||
|
|
||||||
|
* rule-parse: Remove id_to_str() lookup to squelch coverity warning (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Coverity didn't like that id_to_str() allocates memory and we didn't
|
||||||
|
free it. Remote its usage wholesale.
|
||||||
|
|
||||||
|
* GH-3774: btest: Skip core.script-args under TSAN (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
TSAN may re-execute the executable when the memory layout doesn't
|
||||||
|
fullfill requirements, causing argument confusion when that happens.
|
||||||
|
|
||||||
|
Closes #3774.
|
||||||
|
|
||||||
|
* Update zeekctl submodule [nomail] (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.133 | 2024-08-08 10:44:31 +0200
|
||||||
|
|
||||||
|
* Update zeekctl submodule (Tim Wojtulewicz)
|
||||||
|
|
||||||
|
* Add note to NEWS about the removal of OpaqueVal::DoSerialize and OpaqueVal::DoUnserialize (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.129 | 2024-08-07 12:20:22 -0700
|
||||||
|
|
||||||
|
* Remove deprecated port/ports fields for spicy analyzers (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated Cluster::Node::interface field (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated signature definition format (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Return an error if GLOBAL:: prefix is used (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated BloomFilter serialization methods (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated OpaqueVal serialization methods (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated DECLARE_OPAQUE_VALUE macro (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Make TypePtr::Capture member variables private (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
The public versions were marked as deprecated for 7.0, and accessors
|
||||||
|
should be used to manage them now.
|
||||||
|
|
||||||
|
* Remove deprecated Trigger constructor (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated Controller::auto_assign_ports and Controller::auto_assign_start_port (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated load-balacing policy script (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated prometheus telemetry policy script (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated policy/tuning/default package (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated time machine settings (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated json NullDoubleWriter class (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove deprecated modbus event definitions (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove Connection::AppendAddl (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove STMT_ANY statement type (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Remove EventRegistry::Used and EventRegistry::SetUsed (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.109 | 2024-08-07 14:10:54 +0200
|
||||||
|
|
||||||
|
* ldap: Promote uint8 to uint64 before shifting (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Relates to zeek/spicy#1829
|
||||||
|
|
||||||
|
7.1.0-dev.107 | 2024-08-07 11:43:15 +0200
|
||||||
|
|
||||||
|
* ci/ubuntu-24.04: Use ccache 4.10.2 (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The ccache version shipped with Ubuntu 24.04 does not yet recognize
|
||||||
|
--fprofile-update=atomic, install one that does.
|
||||||
|
|
||||||
|
Now that the asan_sanitizer build also includes building Spicy and
|
||||||
|
running the spicyz test suite, ccache is quite important.
|
||||||
|
|
||||||
|
Reference ccache/ccache#1408 and zeek/zeek#3777.
|
||||||
|
|
||||||
|
7.1.0-dev.105 | 2024-08-07 10:54:10 +0200
|
||||||
|
|
||||||
|
* telemetry/Manager: Check RegisterFd() return value (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Please coverity.
|
||||||
|
|
||||||
|
* telemetry/Manager: Track sent_in and sent_out totals without callback (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
For terminated threads, the totals would go down once the threads are
|
||||||
|
removed, which isn't great. Move tracking of sent in and sent out
|
||||||
|
messages from callback to explicit `Inc()` calls.
|
||||||
|
|
||||||
|
Also fixes total_messages_in_metric being initialized twice rather
|
||||||
|
than total_messages_out_metric.
|
||||||
|
|
||||||
|
* threading/Manager: Switch inf bucket from infinity() to max() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
For uint64_t, std::numeric_limits<T>::has_infinity is false and infinity()
|
||||||
|
actually returns 0. Use uint64_t's max() instead. We could cast to double
|
||||||
|
and use the double infinity, but this seems reasonable, too.
|
||||||
|
|
||||||
|
This was found while trying to provoke some pending messages and being
|
||||||
|
confused why all but the "inf" bucket increased.
|
||||||
|
|
||||||
|
* threading/Manager: "lt" to "le" and do not break (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The buckets are specified as lower-equal (changed from lower-than now),
|
||||||
|
which means we shouldn't break: The larger "le" bucket contains all previous
|
||||||
|
buckets, too. The "inf" bucket represents the current number of threads.
|
||||||
|
|
||||||
|
For example, with a total of 10 threads, 5 threads with 0 messages pending,
|
||||||
|
another 4 threads with 50 messages, and on with 2000 messages, the metrics
|
||||||
|
would end end up as follows:
|
||||||
|
|
||||||
|
pending_buckets{le=1} = 5
|
||||||
|
pending_buckets{le=10} = 5
|
||||||
|
pending_buckets{le=100} = 9
|
||||||
|
pending_buckets{le=1000} = 9
|
||||||
|
pending_buckets{le=10000} = 10
|
||||||
|
pending_buckets{le=inf} = 10
|
||||||
|
|
||||||
|
This might be strange initially, but aligns with the Prometheus
|
||||||
|
histogram approach (though we're using gauges here).
|
||||||
|
|
||||||
|
7.1.0-dev.99 | 2024-08-06 20:08:37 +0200
|
||||||
|
|
||||||
|
* Bump auxil/spicy to latest development snapshot (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* spicy/runtime-support: Switch ParameterMismatch::_fmt to static (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
UBSAN's vptr sanitize isn't happy with the call to _fmt()
|
||||||
|
in its member initializer list.
|
||||||
|
|
||||||
|
$ zeek -r Traces/ssh/single-conn.trace .tmp/spicy.event-args-mismatch/test.hlto .tmp/spicy.event-args-mismatch/event-args-mismatch.zeek
|
||||||
|
<...>/src/include/zeek/spicy/runtime-support.h:80:29: runtime error: member call on address 0x511000369540 which does not point to an object of type 'zeek::spicy::rt::ParameterMismatch'
|
||||||
|
0x511000369540: note: object has invalid vptr
|
||||||
|
00 00 00 00 be be be be be be be be be be be be be be be be be be be be be be be be be be be be
|
||||||
|
^~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
invalid vptr
|
||||||
|
#0 0x7f9c9977b019 in zeek::spicy::rt::ParameterMismatch::ParameterMismatch(std::basic_string_view<char, std::char_traits<char>>, zeek::IntrusivePtr<zeek::Type> const&, std::basic_string_view<char, std::char_traits<char>>) <...>/src/include/zeek/spicy/runtime-support.h:80:29
|
||||||
|
#1 0x7f9c9977a6a2 in zeek::spicy::rt::to_val(hilti::rt::Bytes const&, zeek::IntrusivePtr<zeek::Type> const&) <...>/src/include/zeek/spicy/runtime-support.h:562:15
|
||||||
|
|
||||||
|
* coverage/lcov_html: Ignore testing/btest/.tmp (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
gcda/gcno files in the btest/.tmp directory are from .htlo files
|
||||||
|
referencing ephemeral cc files. No need to include these.
|
||||||
|
|
||||||
|
* cirrus: Do not disable Spicy for sanitizer builds (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* ldap: Avoid unset m$opcode (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Initial fuzzing caused a bind response to arrive before a bind request,
|
||||||
|
resulting in an unset field expression error:
|
||||||
|
|
||||||
|
expression error in base/protocols/ldap/main.zeek, line 270: field value missing (LDAP::m$opcode)
|
||||||
|
|
||||||
|
Prevent this by ensuring m$opcode is set and raising instead.
|
||||||
|
|
||||||
|
* GH-3860: fuzzers: Add LDAP fuzzing (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
LDAP supports both, UDP and TCP as separate analyzers. The corpus
|
||||||
|
is identical, however. Started to hit the TLS analyzer fairly
|
||||||
|
quickly, too.
|
||||||
|
|
||||||
|
Closes #3860
|
||||||
|
|
||||||
|
7.1.0-dev.92 | 2024-08-06 09:01:40 -0700
|
||||||
|
|
||||||
|
* Don't install empty ZAM directories (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* btest/spicy: Make replaces-conflicts trigger replaces code path (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The current test attempts to instantiate two spicy::SSH_1 protocol
|
||||||
|
analyzers in the .evt file. The intention likely was to use two
|
||||||
|
distinct protocol analyzer both trying to replace the builtin SSH
|
||||||
|
analyzer.
|
||||||
|
|
||||||
|
Coincidentally, fixing this happens to workaround TSAN errors tickled
|
||||||
|
by the FatalError() call while loading the .hlto with two identically
|
||||||
|
named analyzers.
|
||||||
|
|
||||||
|
$ cat .tmp/spicy.replaces-conflicts/output
|
||||||
|
error: redefinition of protocol analyzer spicy::SSH_1
|
||||||
|
ThreadSanitizer: main thread finished with ignores enabled
|
||||||
|
One of the following ignores was not ended (in order of probability)
|
||||||
|
Ignore was enabled at:
|
||||||
|
#0 __llvm_gcov_init __linker___d192e45c25d5ee23-484d3e0fc2caf5b4.cc (ssh.hlto+0x34036) (BuildId: 091934ca4da885e7)
|
||||||
|
#1 __llvm_gcov_init __linker___d192e45c25d5ee23-484d3e0fc2caf5b4.cc (ssh.hlto+0x34036) (BuildId: 091934ca4da885e7)
|
||||||
|
...
|
||||||
|
|
||||||
|
I was tempted to replace FatalError() with Error() and rely on
|
||||||
|
zeek-setup.cc's early exiting on any reporter errors, but this
|
||||||
|
seems easier for now.
|
||||||
|
|
||||||
|
Relates to #3865.
|
||||||
|
|
||||||
|
7.1.0-dev.87 | 2024-08-05 14:39:56 -0700
|
||||||
|
|
||||||
|
* Remove some unnecessary #includes (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Avoid capturing 'this' for callback in telemetry::Manager (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Make telemetry metrics out of MsgThread statistics (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Split cpu time metric into user/system components like prof.log (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
The total can be calculated from the two parts via Prometheus/Grafana
|
||||||
|
if desired, so it's more informative to pass them as separate parts.
|
||||||
|
|
||||||
|
* Move broker statistics to be telemetry metrics (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Add timer counts as telemetry metrics (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Move thread manager stats to telemetry metric (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Add extra metrics to session_mgr (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
- Sessions killed by activity
|
||||||
|
- Current number of sessions across all types
|
||||||
|
|
||||||
|
* Move dns_mgr stats to telemetry instruments (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Move trigger stats to telemetry instruments (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Move pulling of global state inside 'expensive' check for stats (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.74 | 2024-08-02 15:49:40 -0700
|
||||||
|
|
||||||
|
* Process metric callbacks from the main-loop thread (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
This avoids the callbacks from being processed on the worker thread
|
||||||
|
spawned by Civetweb. It fixes data race issues with lookups involving
|
||||||
|
global variables, amongst other threading issues.
|
||||||
|
|
||||||
|
* CI: Use 16GB of memory for FreeBSD builds (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.68 | 2024-07-26 10:09:48 -0700
|
||||||
|
|
||||||
|
* Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight)
|
||||||
|
|
||||||
|
This in particular pulls in a fix for zeek/spicy#1808.
|
||||||
|
|
||||||
|
7.1.0-dev.66 | 2024-07-26 15:14:36 +0200
|
||||||
|
|
||||||
|
* GH-3853: ldap: Recognize SASL+SPNEGO+NTLMSSP (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap file was harvested
|
||||||
|
from the CTU-SME-11 (Experiment-VM-Microsoft-Windows7AD-1) dataset
|
||||||
|
at https://zenodo.org/records/7958259 (DOI 10.5281/zenodo.7958258).
|
||||||
|
|
||||||
|
Closes #3853
|
||||||
|
|
||||||
|
7.1.0-dev.63 | 2024-07-25 09:19:20 -0700
|
||||||
|
|
||||||
|
* Use accessor to reach into X509_ALGOR (Theo Buehler)
|
||||||
|
|
||||||
|
Despite already having an accessor, X509_ALGOR wasn't made opaque
|
||||||
|
during OpenSSL 1.1.0 development. It would be nice if this could be
|
||||||
|
fixed at some point, so avoid reaching into that struct by using the
|
||||||
|
accessor
|
||||||
|
|
||||||
|
7.1.0-dev.61 | 2024-07-25 12:02:51 +0200
|
||||||
|
|
||||||
|
* mysql: Simplify length computation (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Thanks Tim!
|
||||||
|
|
||||||
|
* mysql: Improve date and time parsing (Fupeng Zhao)
|
||||||
|
|
||||||
|
* btest/mysql: Clean query-attr.pcapng (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
It contained some unrelated IntelliJ traffic.
|
||||||
|
|
||||||
|
* mysql: Support non-string query attributes (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The query attributes aren't exposed to script layer right now, but this
|
||||||
|
should at least parse over them once encountered and some fixups.
|
||||||
|
|
||||||
|
* btest/mysql: Add pcap with non-string query attributes (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Pcap was generated as follows. Doesn't seem wireshark even parses
|
||||||
|
this properly right now.
|
||||||
|
|
||||||
|
with common.get_connection() as c:
|
||||||
|
with c.cursor() as cur:
|
||||||
|
date1 = datetime.date(1987, 10, 18)
|
||||||
|
datetime1 = datetime.datetime(1990, 9, 26, 12, 13, 14)
|
||||||
|
cur.add_attribute("number1", 42)
|
||||||
|
cur.add_attribute("string1", "a string")
|
||||||
|
cur.add_attribute("date1", date1)
|
||||||
|
cur.add_attribute("datetime1", datetime1)
|
||||||
|
cur.execute("SELECT version()")
|
||||||
|
result = cur.fetchall()
|
||||||
|
print("result", result)
|
||||||
|
|
||||||
|
* mysql: Introduce mysql_ssl_request event (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This should've been added with fa48c885 for completion. Do it now.
|
||||||
|
The MySQL spec calls it SSLRequest packet, so keep SSL in the name for
|
||||||
|
consistency.
|
||||||
|
|
||||||
|
* mysql: Fix EOFIfLegacyThenResultSet (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Only expect a result next if an EOF was consumed.
|
||||||
|
|
||||||
|
* mysql: Add data parameter to mysql_auth_plugin (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This may contain salt from the server or a hashed password from the client.
|
||||||
|
|
||||||
|
* mysql: Add mysql_auth_plugin, mysql_auth_more_data and mysql_auth_switch_request events (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Remove caching_sha2_password parsing/state from the analyzer and implement
|
||||||
|
the generic events. If we actually want to peak into the authentication
|
||||||
|
mechanism, we could write a separate analyzer for it. For now, treat it
|
||||||
|
as opaque values that are exposed to script land.
|
||||||
|
|
||||||
|
The added tests show the --get-server-public-key in use where
|
||||||
|
mysql_auth_more_data contains an RSA public key.
|
||||||
|
|
||||||
|
* mysql: AuthSwitchRequest: &enforce a 0xfe / 254 status (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* mysql: Make auth_plugin_ a std::string (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* mysql: Fix auth_plugin_data_part2 length computation (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* Refactored connection phase state handling (Fupeng Zhao)
|
||||||
|
|
||||||
|
Added `ConnectionExpected` enum for expected packet types during the connection phase.
|
||||||
|
|
||||||
|
* Add support for "auth switch" and "query attrs" (Fupeng Zhao)
|
||||||
|
|
||||||
|
Also fix the issue where Resultset could not correctly distinguish between EOF_Packet and OK_Packet.
|
||||||
|
|
||||||
|
* Add support for parsing the "caching_sha2_password" auth plugin (Fupeng Zhao)
|
||||||
|
|
||||||
|
7.1.0-dev.45 | 2024-07-24 15:28:59 -0700
|
||||||
|
|
||||||
|
* Update binpac submodule to better format output code [nomail] (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.43 | 2024-07-24 13:25:52 -0700
|
||||||
|
|
||||||
|
* Update 7.0 NEWS with blurb about multi-PDU parsing causing increased load [nomail] [skip ci] (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Fix handling of zero-length SMB2 error responses (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.40 | 2024-07-24 11:18:03 +0200
|
||||||
|
|
||||||
|
* minor optimization of boolean comparisons (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* GH-3839: fix & regression test for GH-3839 (spurious warnings for "when" constructs) (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.37 | 2024-07-23 19:18:37 -0700
|
||||||
|
|
||||||
|
* Bump zeek-testing-cluster to reflect deprecation of prometheus.zeek (Christian Kreibich, Corelight)
|
||||||
|
|
||||||
|
* Update doc submodule [nomail] [skip ci] (zeek-bot)
|
||||||
|
|
||||||
|
7.1.0-dev.34 | 2024-07-23 08:54:23 -0700
|
||||||
|
|
||||||
|
* Update Mozilla CA list and CT list (Johanna Amann, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.32 | 2024-07-23 08:51:11 -0700
|
||||||
|
|
||||||
|
* Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight)
|
||||||
|
|
||||||
|
7.1.0-dev.30 | 2024-07-23 12:38:54 +0200
|
||||||
|
|
||||||
|
* ldap: Add heuristic for wrap tokens (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Instead of dissecting the GSSAPI handshake, add another heuristic
|
||||||
|
into MaybeEncrypted to check for the WRAP token identifier.
|
||||||
|
|
||||||
|
After this change, the pcap on the following ticket is processed
|
||||||
|
nicely: https://gitlab.com/wireshark/migration-test/-/issues/9398
|
||||||
|
|
||||||
|
* ldap: Ignore ec/rrc for sealed wrap tokens (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
It shouldn't matter for the encrypted payload that we'll
|
||||||
|
just consume and ignore.
|
||||||
|
|
||||||
|
* ldap: Add LDAP sample with SASL-SRP mechanism (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This is what @dopheide-esnet actually saw. Produced with a custom
|
||||||
|
cyrus-sasl and openldap build :-(
|
||||||
|
|
||||||
|
* ldap: Reintroduce encryption after SASL heuristic (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
@dopheide-esnet provided sample captures where SASL SRP is used as
|
||||||
|
a SASL mechanism and the follow-up LDAP messages are encrypted. It's
|
||||||
|
not clear how to determine whether encryption will or will not happen,
|
||||||
|
so re-add a heuristic to determine this based on the first byte of
|
||||||
|
the first message *after* the successful bindResponse handshake. If
|
||||||
|
that byte is 0x30, assume cleartext.
|
||||||
|
|
||||||
|
I haven't been able to produce such pcaps, unfortunately, but the
|
||||||
|
cleartext path is tested via the existing sasl-ntlm.pcap.
|
||||||
|
|
||||||
|
* ldap: Fix assuming GSS-SPNEGO for all bindResponses (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
In retrospect that's an obvious bug.
|
||||||
|
|
||||||
|
* ldap: Implement extended request/response and StartTLS support (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
PCAP was produced with a local OpenLDAP server configured to support StartTLS.
|
||||||
|
|
||||||
|
This puts the Zeek calls into a separate ldap_zeek.spicy file/module
|
||||||
|
to separate it from LDAP.
|
||||||
|
|
||||||
7.1.0-dev.23 | 2024-07-23 10:02:52 +0200
|
7.1.0-dev.23 | 2024-07-23 10:02:52 +0200
|
||||||
|
|
||||||
* telemetry: Deprecate prometheus.zeek policy script (Arne Welzel, Corelight)
|
* telemetry: Deprecate prometheus.zeek policy script (Arne Welzel, Corelight)
|
||||||
|
|
|
@ -340,7 +340,6 @@ add_zeek_dynamic_plugin_build_interface_include_directories(
|
||||||
${PROJECT_SOURCE_DIR}/auxil/binpac/lib
|
${PROJECT_SOURCE_DIR}/auxil/binpac/lib
|
||||||
${PROJECT_SOURCE_DIR}/auxil/broker/libbroker
|
${PROJECT_SOURCE_DIR}/auxil/broker/libbroker
|
||||||
${PROJECT_SOURCE_DIR}/auxil/paraglob/include
|
${PROJECT_SOURCE_DIR}/auxil/paraglob/include
|
||||||
${PROJECT_SOURCE_DIR}/auxil/rapidjson/include
|
|
||||||
${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include
|
${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include
|
||||||
${CMAKE_BINARY_DIR}/src
|
${CMAKE_BINARY_DIR}/src
|
||||||
${CMAKE_BINARY_DIR}/src/include
|
${CMAKE_BINARY_DIR}/src/include
|
||||||
|
@ -348,10 +347,6 @@ add_zeek_dynamic_plugin_build_interface_include_directories(
|
||||||
${CMAKE_BINARY_DIR}/auxil/broker/libbroker
|
${CMAKE_BINARY_DIR}/auxil/broker/libbroker
|
||||||
${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include)
|
${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include)
|
||||||
|
|
||||||
# threading/formatters/JSON.h includes rapidjson headers and may be used
|
|
||||||
# by external plugins, extend the include path.
|
|
||||||
target_include_directories(zeek_dynamic_plugin_base SYSTEM
|
|
||||||
INTERFACE $<INSTALL_INTERFACE:include/zeek/3rdparty/rapidjson/include>)
|
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
zeek_dynamic_plugin_base SYSTEM
|
zeek_dynamic_plugin_base SYSTEM
|
||||||
INTERFACE $<INSTALL_INTERFACE:include/zeek/3rdparty/prometheus-cpp/include>)
|
INTERFACE $<INSTALL_INTERFACE:include/zeek/3rdparty/prometheus-cpp/include>)
|
||||||
|
@ -1010,9 +1005,6 @@ include(BuiltInSpicyAnalyzer)
|
||||||
include_directories(BEFORE ${PCAP_INCLUDE_DIR} ${BIND_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
|
include_directories(BEFORE ${PCAP_INCLUDE_DIR} ${BIND_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
|
||||||
${ZLIB_INCLUDE_DIR} ${JEMALLOC_INCLUDE_DIR})
|
${ZLIB_INCLUDE_DIR} ${JEMALLOC_INCLUDE_DIR})
|
||||||
|
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/rapidjson/include/rapidjson
|
|
||||||
DESTINATION include/zeek/3rdparty/rapidjson/include)
|
|
||||||
|
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc
|
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc
|
||||||
DESTINATION include/zeek/3rdparty/)
|
DESTINATION include/zeek/3rdparty/)
|
||||||
|
|
||||||
|
|
18
NEWS
18
NEWS
|
@ -9,11 +9,22 @@ Zeek 7.1.0
|
||||||
Breaking Changes
|
Breaking Changes
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
|
* The ``OpaqueVal::DoSerialize`` and ``OpaqueVal::DoUnserialize`` methods were
|
||||||
|
marked as deprecated in v7.0 and have now been removed as per the Zeek
|
||||||
|
deprecation policy. Plugins that were overriding these methods and were not
|
||||||
|
updated will fail to compile. Those plugins should be updated to override the
|
||||||
|
new ``OpaqueVal::DoSerializeData`` and ``OpaqueVal::DoUnserializeData``
|
||||||
|
methods.
|
||||||
|
|
||||||
New Functionality
|
New Functionality
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
* The LDAP analyzer now supports handling of non-sealed GSS-API WRAP tokens.
|
* The LDAP analyzer now supports handling of non-sealed GSS-API WRAP tokens.
|
||||||
|
|
||||||
|
* StartTLS support was added to the LDAP analyzer. The SSL analyzer is enabled
|
||||||
|
for connections where client and server negotiate to TLS through the extended
|
||||||
|
request/response mechanism.
|
||||||
|
|
||||||
Changed Functionality
|
Changed Functionality
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
@ -21,6 +32,10 @@ Changed Functionality
|
||||||
made more strict and predictable. Please provide input if this results in
|
made more strict and predictable. Please provide input if this results in
|
||||||
less visibility in your environment.
|
less visibility in your environment.
|
||||||
|
|
||||||
|
* The MySQL analyzer has been improved to better support plugin authentication
|
||||||
|
mechanisms, like caching_sha2_password, as well as recognizing MySQL query
|
||||||
|
attributes.
|
||||||
|
|
||||||
Removed Functionality
|
Removed Functionality
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
@ -109,7 +124,8 @@ New Functionality
|
||||||
environment variable configures the addition.
|
environment variable configures the addition.
|
||||||
|
|
||||||
- SMB2 packets containing multiple PDUs now correctly parse all of the headers,
|
- SMB2 packets containing multiple PDUs now correctly parse all of the headers,
|
||||||
instead of just the first one and ignoring the rest.
|
instead of just the first one and ignoring the rest. This may cause increased
|
||||||
|
CPU load on SMB2-heavy networks.
|
||||||
|
|
||||||
- The new built-in function ``lookup_connection_analyzer_id()`` retrieves the
|
- The new built-in function ``lookup_connection_analyzer_id()`` retrieves the
|
||||||
numeric identifier of an analyzer associated with a connection. This enables
|
numeric identifier of an analyzer associated with a connection. This enables
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
7.1.0-dev.23
|
7.1.0-dev.141
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit a5c8f19fb49c60171622536fa6d369fa168f19e0
|
Subproject commit 6e494ed5b3d0a121cd1e1dd18b18e40d7e937675
|
|
@ -1 +1 @@
|
||||||
Subproject commit 2fec7205d1a9cb4829b86c943d599696d53de85c
|
Subproject commit 4649065e2a1dd21c81e41cd6007dce5486b77fc0
|
|
@ -1 +1 @@
|
||||||
Subproject commit 4a1b43ef07d1305a7e88a4f0866068dc49de9d06
|
Subproject commit 7cddc357ff83175984e19037f1f8062a69cf2030
|
|
@ -1 +1 @@
|
||||||
Subproject commit 39c0ee1e1742bb28dff57632ee4620f905b892e7
|
Subproject commit 7e1a8448083ef0013f15e67ce001836e680589a2
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION 20240528
|
ENV DOCKERFILE_VERSION 20240807
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y install \
|
RUN apt-get update && apt-get -y install \
|
||||||
bc \
|
bc \
|
||||||
|
@ -41,3 +41,25 @@ RUN apt-get update && apt-get -y install \
|
||||||
|
|
||||||
RUN pip3 install --break-system-packages junit2html
|
RUN pip3 install --break-system-packages junit2html
|
||||||
RUN gem install coveralls-lcov
|
RUN gem install coveralls-lcov
|
||||||
|
|
||||||
|
# Download a newer pre-built ccache version that recognizes -fprofile-update=atomic
|
||||||
|
# which is used when building with --coverage.
|
||||||
|
#
|
||||||
|
# This extracts the tarball into /opt/ccache-<version>-<platform> and
|
||||||
|
# symlinks the executable to /usr/local/bin/ccache.
|
||||||
|
#
|
||||||
|
# See: https://ccache.dev/download.html
|
||||||
|
ENV CCACHE_VERSION=4.10.2
|
||||||
|
ENV CCACHE_PLATFORM=linux-x86_64
|
||||||
|
ENV CCACHE_URL=https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}-${CCACHE_PLATFORM}.tar.xz
|
||||||
|
ENV CCACHE_SHA256=80cab87bd510eca796467aee8e663c398239e0df1c4800a0b5dff11dca0b4f18
|
||||||
|
RUN cd /opt \
|
||||||
|
&& if [ "$(uname -p)" != "x86_64" ]; then echo "cannot use ccache pre-built for x86_64!" >&2; exit 1 ; fi \
|
||||||
|
&& curl -L --fail --max-time 30 $CCACHE_URL -o ccache.tar.xz \
|
||||||
|
&& sha256sum ./ccache.tar.xz >&2 \
|
||||||
|
&& echo "${CCACHE_SHA256} ccache.tar.xz" | sha256sum -c - \
|
||||||
|
&& tar xvf ./ccache.tar.xz \
|
||||||
|
&& ln -s $(pwd)/ccache-${CCACHE_VERSION}-${CCACHE_PLATFORM}/ccache /usr/local/bin/ccache \
|
||||||
|
&& test "$(command -v ccache)" = "/usr/local/bin/ccache" \
|
||||||
|
&& test "$(ccache --print-version)" = "${CCACHE_VERSION}" \
|
||||||
|
&& rm ./ccache.tar.xz
|
||||||
|
|
2
doc
2
doc
|
@ -1 +1 @@
|
||||||
Subproject commit f65820ff0faf2887799fe691a443b5db39eeed54
|
Subproject commit f450f803d3e69cb2fd474a919b7a6c6885f1f433
|
|
@ -40,10 +40,6 @@ export {
|
||||||
## worker nodes in a cluster. Used with broker-enabled cluster communication.
|
## worker nodes in a cluster. Used with broker-enabled cluster communication.
|
||||||
const worker_topic = "zeek/cluster/worker" &redef;
|
const worker_topic = "zeek/cluster/worker" &redef;
|
||||||
|
|
||||||
## The topic name used for exchanging messages that are relevant to
|
|
||||||
## time machine nodes in a cluster. Used with broker-enabled cluster communication.
|
|
||||||
const time_machine_topic = "zeek/cluster/time_machine" &redef &deprecated="Remove in v7.1: Unused.";
|
|
||||||
|
|
||||||
## A set of topic names to be used for broadcasting messages that are
|
## A set of topic names to be used for broadcasting messages that are
|
||||||
## relevant to all nodes in a cluster. Currently, there is not a common
|
## relevant to all nodes in a cluster. Currently, there is not a common
|
||||||
## topic to broadcast to, because enabling implicit Broker forwarding would
|
## topic to broadcast to, because enabling implicit Broker forwarding would
|
||||||
|
@ -53,9 +49,6 @@ export {
|
||||||
manager_topic,
|
manager_topic,
|
||||||
proxy_topic,
|
proxy_topic,
|
||||||
worker_topic,
|
worker_topic,
|
||||||
@pragma push ignore-deprecations
|
|
||||||
time_machine_topic,
|
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
};
|
};
|
||||||
|
|
||||||
## The topic prefix used for exchanging messages that are relevant to
|
## The topic prefix used for exchanging messages that are relevant to
|
||||||
|
@ -169,10 +162,6 @@ export {
|
||||||
PROXY,
|
PROXY,
|
||||||
## The node type doing all the actual traffic analysis.
|
## The node type doing all the actual traffic analysis.
|
||||||
WORKER,
|
WORKER,
|
||||||
## A node acting as a traffic recorder using the
|
|
||||||
## `Time Machine <https://github.com/zeek/time-machine>`_
|
|
||||||
## software.
|
|
||||||
TIME_MACHINE &deprecated="Remove in v7.1: Unused.",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
## Record type to indicate a node in a cluster.
|
## Record type to indicate a node in a cluster.
|
||||||
|
@ -187,12 +176,8 @@ export {
|
||||||
## The port that this node will listen on for peer connections.
|
## The port that this node will listen on for peer connections.
|
||||||
## A value of ``0/unknown`` means the node is not pre-configured to listen.
|
## A value of ``0/unknown`` means the node is not pre-configured to listen.
|
||||||
p: port &default=0/unknown;
|
p: port &default=0/unknown;
|
||||||
## Identifier for the interface a worker is sniffing.
|
|
||||||
interface: string &optional &deprecated="Remove in v7.1: interface is not required and not set consistently on workers. Replace usages with packet_source() or keep a separate worker-to-interface mapping in a global table.";
|
|
||||||
## Name of the manager node this node uses. For workers and proxies.
|
## Name of the manager node this node uses. For workers and proxies.
|
||||||
manager: string &optional;
|
manager: string &optional;
|
||||||
## Name of a time machine node with which this node connects.
|
|
||||||
time_machine: string &optional &deprecated="Remove in v7.1: Unused.";
|
|
||||||
## A unique identifier assigned to the node by the broker framework.
|
## A unique identifier assigned to the node by the broker framework.
|
||||||
## This field is only set while a node is connected.
|
## This field is only set while a node is connected.
|
||||||
id: string &optional;
|
id: string &optional;
|
||||||
|
|
|
@ -94,11 +94,6 @@ event zeek_init() &priority=-10
|
||||||
case WORKER:
|
case WORKER:
|
||||||
Broker::subscribe(Cluster::worker_topic);
|
Broker::subscribe(Cluster::worker_topic);
|
||||||
break;
|
break;
|
||||||
@pragma push ignore-deprecations
|
|
||||||
case TIME_MACHINE:
|
|
||||||
Broker::subscribe(Cluster::time_machine_topic);
|
|
||||||
break;
|
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
default:
|
default:
|
||||||
Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type));
|
Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type));
|
||||||
return;
|
return;
|
||||||
|
@ -121,11 +116,6 @@ event zeek_init() &priority=-10
|
||||||
case MANAGER:
|
case MANAGER:
|
||||||
connect_peers_with_type(LOGGER);
|
connect_peers_with_type(LOGGER);
|
||||||
|
|
||||||
@pragma push ignore-deprecations
|
|
||||||
if ( self?$time_machine )
|
|
||||||
connect_peer(TIME_MACHINE, self$time_machine);
|
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
case PROXY:
|
case PROXY:
|
||||||
connect_peers_with_type(LOGGER);
|
connect_peers_with_type(LOGGER);
|
||||||
|
@ -141,11 +131,6 @@ event zeek_init() &priority=-10
|
||||||
if ( self?$manager )
|
if ( self?$manager )
|
||||||
connect_peer(MANAGER, self$manager);
|
connect_peer(MANAGER, self$manager);
|
||||||
|
|
||||||
@pragma push ignore-deprecations
|
|
||||||
if ( self?$time_machine )
|
|
||||||
connect_peer(TIME_MACHINE, self$time_machine);
|
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,10 +43,6 @@ function __init_cluster_nodes(): bool
|
||||||
typ = rolemap[endp$role];
|
typ = rolemap[endp$role];
|
||||||
|
|
||||||
cnode = [$node_type=typ, $ip=endp$host, $p=endp$p];
|
cnode = [$node_type=typ, $ip=endp$host, $p=endp$p];
|
||||||
@pragma push ignore-deprecations
|
|
||||||
if ( endp?$interface )
|
|
||||||
cnode$interface = endp$interface;
|
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER )
|
if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER )
|
||||||
cnode$manager = manager_name;
|
cnode$manager = manager_name;
|
||||||
if ( endp?$metrics_port )
|
if ( endp?$metrics_port )
|
||||||
|
|
|
@ -5256,12 +5256,6 @@ const dpd_ignore_ports = F &redef;
|
||||||
## connection if it misses the initial handshake.
|
## connection if it misses the initial handshake.
|
||||||
const likely_server_ports: set[port] &redef;
|
const likely_server_ports: set[port] &redef;
|
||||||
|
|
||||||
## If true, output profiling for Time-Machine queries.
|
|
||||||
const time_machine_profiling = F &redef &deprecated="Remove in v7.1. Unused.";
|
|
||||||
|
|
||||||
## If true, warns about unused event handlers at startup.
|
|
||||||
const check_for_unused_event_handlers = F &redef &deprecated="Remove in v7.1. This has been replaced by usage analyzer functionality.";
|
|
||||||
|
|
||||||
## Holds the filename of the trace file given with ``-w`` (empty if none).
|
## Holds the filename of the trace file given with ``-w`` (empty if none).
|
||||||
##
|
##
|
||||||
## .. zeek:see:: record_all_packets
|
## .. zeek:see:: record_all_packets
|
||||||
|
@ -5883,6 +5877,13 @@ export {
|
||||||
|
|
||||||
type MetricVector : vector of Metric;
|
type MetricVector : vector of Metric;
|
||||||
type HistogramMetricVector : vector of HistogramMetric;
|
type HistogramMetricVector : vector of HistogramMetric;
|
||||||
|
|
||||||
|
## Maximum amount of time for CivetWeb HTTP threads to
|
||||||
|
## wait for metric callbacks to complete on the IO loop.
|
||||||
|
const callback_timeout: interval = 5sec &redef;
|
||||||
|
|
||||||
|
## Number of CivetWeb threads to use.
|
||||||
|
const civetweb_threads: count = 2 &redef;
|
||||||
}
|
}
|
||||||
|
|
||||||
module GLOBAL;
|
module GLOBAL;
|
||||||
|
|
|
@ -120,4 +120,11 @@ export {
|
||||||
"searching", [ LDAP::SearchDerefAlias_DEREF_FINDING_BASE ] =
|
"searching", [ LDAP::SearchDerefAlias_DEREF_FINDING_BASE ] =
|
||||||
"finding", [ LDAP::SearchDerefAlias_DEREF_ALWAYS ] = "always", }
|
"finding", [ LDAP::SearchDerefAlias_DEREF_ALWAYS ] = "always", }
|
||||||
&default="unknown";
|
&default="unknown";
|
||||||
|
|
||||||
|
const EXTENDED_REQUESTS = {
|
||||||
|
# StartTLS, https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1
|
||||||
|
[ "1.3.6.1.4.1.1466.20037" ] = "StartTLS",
|
||||||
|
# whoami, https://datatracker.ietf.org/doc/html/rfc4532#section-2
|
||||||
|
[ "1.3.6.1.4.1.4203.1.11.3" ] = "whoami",
|
||||||
|
} &default="unknown" &redef;
|
||||||
}
|
}
|
||||||
|
|
|
@ -229,6 +229,10 @@ event LDAP::message(c: connection,
|
||||||
fmt("%s: %s -> %s", message_id, m$opcode, opcode_str), "LDAP");
|
fmt("%s: %s -> %s", message_id, m$opcode, opcode_str), "LDAP");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m$opcode = opcode_str;
|
||||||
|
} else if ( ! m?$opcode ) {
|
||||||
|
# This can happen if we see a bind response before the bind request.
|
||||||
|
Reporter::conn_weird("LDAP_bind_without_opcode", c, fmt("%s: %s", message_id, opcode_str), "LDAP");
|
||||||
m$opcode = opcode_str;
|
m$opcode = opcode_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,6 +262,9 @@ event LDAP::message(c: connection,
|
||||||
}
|
}
|
||||||
|
|
||||||
m$object = object;
|
m$object = object;
|
||||||
|
|
||||||
|
if ( opcode == LDAP::ProtocolOpcode_EXTENDED_REQUEST )
|
||||||
|
m$object += fmt(" (%s)", EXTENDED_REQUESTS[object]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( argument != "" ) {
|
if ( argument != "" ) {
|
||||||
|
|
|
@ -98,3 +98,44 @@ global LDAP::search_result_entry: event (
|
||||||
message_id: int,
|
message_id: int,
|
||||||
object_name: string
|
object_name: string
|
||||||
);
|
);
|
||||||
|
|
||||||
|
## Event generated for each ExtendedRequest in LDAP messages.
|
||||||
|
##
|
||||||
|
## c: The connection.
|
||||||
|
##
|
||||||
|
## message_id: The messageID element.
|
||||||
|
##
|
||||||
|
## request_name: The name of the extended request.
|
||||||
|
##
|
||||||
|
## request_value: The value of the extended request (empty if missing).
|
||||||
|
global LDAP::extended_request: event (
|
||||||
|
c: connection,
|
||||||
|
message_id: int,
|
||||||
|
request_name: string,
|
||||||
|
request_value: string
|
||||||
|
);
|
||||||
|
|
||||||
|
## Event generated for each ExtendedResponse in LDAP messages.
|
||||||
|
##
|
||||||
|
## c: The connection.
|
||||||
|
##
|
||||||
|
## message_id: The messageID element.
|
||||||
|
##
|
||||||
|
## result: The result code of the response.
|
||||||
|
##
|
||||||
|
## response_name: The name of the extended response (empty if missing).
|
||||||
|
##
|
||||||
|
## response_value: The value of the extended response (empty if missing).
|
||||||
|
global LDAP::extended_response: event (
|
||||||
|
c: connection,
|
||||||
|
message_id: int,
|
||||||
|
result: LDAP::ResultCode,
|
||||||
|
response_name: string,
|
||||||
|
response_value: string
|
||||||
|
);
|
||||||
|
|
||||||
|
## Event generated when a plaintext LDAP connection switched to TLS.
|
||||||
|
##
|
||||||
|
## c: The connection.
|
||||||
|
##
|
||||||
|
global LDAP::starttls: event(c: connection);
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
#
|
#
|
||||||
# Do not edit this file. This file is automatically generated by gen-ct-list.pl
|
# Do not edit this file. This file is automatically generated by gen-ct-list.pl
|
||||||
# File generated at Fri Feb 23 11:37:01 2024
|
# File generated at Tue Jul 23 16:04:45 2024
|
||||||
# File generated from https://www.gstatic.com/ct/log_list/v3/log_list.json
|
# File generated from https://www.gstatic.com/ct/log_list/v3/log_list.json
|
||||||
# Source file generated at: 2024-02-22T12:56:21Z
|
# Source file generated at: 2024-07-23T13:06:08Z
|
||||||
# Source file version: 32.9
|
# Source file version: 39.1
|
||||||
#
|
#
|
||||||
|
|
||||||
@load base/protocols/ssl
|
@load base/protocols/ssl
|
||||||
|
@ -12,21 +12,32 @@ redef ct_logs += {
|
||||||
["\xee\xcd\xd0\x64\xd5\xdb\x1a\xce\xc5\x5c\xb7\x9d\xb4\xcd\x13\xa2\x32\x87\x46\x7c\xbc\xec\xde\xc3\x51\x48\x59\x46\x71\x1f\xb5\x9b"] = CTInfo($description="Google 'Argon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1d\xb9\x6c\xa9\xcb\x69\x94\xc5\x5c\xe6\xb6\xa6\x03\xbb\xd2\xb8\xdc\x54\x43\x17\x28\x99\x0c\x06\x01\x50\x1d\x9d\x64\xc0\x59\x46\x2b\xdc\xc8\x03\x1d\x05\xb4\x2d\xa8\x09\xf7\x99\x41\xed\x04\xfb\xe5\x57\xba\x26\x04\xf6\x11\x52\xce\x14\x65\x3b\x2f\x76\x2b\xc0"),
|
["\xee\xcd\xd0\x64\xd5\xdb\x1a\xce\xc5\x5c\xb7\x9d\xb4\xcd\x13\xa2\x32\x87\x46\x7c\xbc\xec\xde\xc3\x51\x48\x59\x46\x71\x1f\xb5\x9b"] = CTInfo($description="Google 'Argon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1d\xb9\x6c\xa9\xcb\x69\x94\xc5\x5c\xe6\xb6\xa6\x03\xbb\xd2\xb8\xdc\x54\x43\x17\x28\x99\x0c\x06\x01\x50\x1d\x9d\x64\xc0\x59\x46\x2b\xdc\xc8\x03\x1d\x05\xb4\x2d\xa8\x09\xf7\x99\x41\xed\x04\xfb\xe5\x57\xba\x26\x04\xf6\x11\x52\xce\x14\x65\x3b\x2f\x76\x2b\xc0"),
|
||||||
["\x4e\x75\xa3\x27\x5c\x9a\x10\xc3\x38\x5b\x6c\xd4\xdf\x3f\x52\xeb\x1d\xf0\xe0\x8e\x1b\x8d\x69\xc0\xb1\xfa\x64\xb1\x62\x9a\x39\xdf"] = CTInfo($description="Google 'Argon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x20\x82\xa1\xf9\x67\x68\xa8\xe4\xdb\x94\x98\xe2\xe1\x68\x87\xe4\x09\x6d\x20\x35\x33\x38\x3c\xaf\x14\xaa\xd7\x08\x18\xf0\xfd\x16\x9b\xd3\xff\x7c\x27\x82\xd4\x87\xb7\x4e\x24\x46\x3b\xfb\xae\xbe\xc8\x23\x52\x20\x2b\xaa\x44\x05\xfe\x54\xf9\xd5\xf1\x1d\x45\x9a"),
|
["\x4e\x75\xa3\x27\x5c\x9a\x10\xc3\x38\x5b\x6c\xd4\xdf\x3f\x52\xeb\x1d\xf0\xe0\x8e\x1b\x8d\x69\xc0\xb1\xfa\x64\xb1\x62\x9a\x39\xdf"] = CTInfo($description="Google 'Argon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x20\x82\xa1\xf9\x67\x68\xa8\xe4\xdb\x94\x98\xe2\xe1\x68\x87\xe4\x09\x6d\x20\x35\x33\x38\x3c\xaf\x14\xaa\xd7\x08\x18\xf0\xfd\x16\x9b\xd3\xff\x7c\x27\x82\xd4\x87\xb7\x4e\x24\x46\x3b\xfb\xae\xbe\xc8\x23\x52\x20\x2b\xaa\x44\x05\xfe\x54\xf9\xd5\xf1\x1d\x45\x9a"),
|
||||||
["\x12\xf1\x4e\x34\xbd\x53\x72\x4c\x84\x06\x19\xc3\x8f\x3f\x7a\x13\xf8\xe7\xb5\x62\x87\x88\x9c\x6d\x30\x05\x84\xeb\xe5\x86\x26\x3a"] = CTInfo($description="Google 'Argon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaf\xe4\xf3\x94\x2c\xdf\xa6\x27\xb5\xfe\xb2\x61\x83\x19\xc8\x21\x3a\x23\xa8\xa9\x3d\x54\xaf\xbc\x31\x9a\x1c\xd3\xc1\xe3\xb6\xc2\xf3\x0f\xc7\xb9\xca\x3b\x1d\x79\x65\x61\x22\x25\x82\x56\x4e\x98\xe8\xaa\x26\x29\x36\x1e\x28\x60\x6f\xeb\x15\x6e\xf7\x7c\xd0\xba"),
|
["\x12\xf1\x4e\x34\xbd\x53\x72\x4c\x84\x06\x19\xc3\x8f\x3f\x7a\x13\xf8\xe7\xb5\x62\x87\x88\x9c\x6d\x30\x05\x84\xeb\xe5\x86\x26\x3a"] = CTInfo($description="Google 'Argon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaf\xe4\xf3\x94\x2c\xdf\xa6\x27\xb5\xfe\xb2\x61\x83\x19\xc8\x21\x3a\x23\xa8\xa9\x3d\x54\xaf\xbc\x31\x9a\x1c\xd3\xc1\xe3\xb6\xc2\xf3\x0f\xc7\xb9\xca\x3b\x1d\x79\x65\x61\x22\x25\x82\x56\x4e\x98\xe8\xaa\x26\x29\x36\x1e\x28\x60\x6f\xeb\x15\x6e\xf7\x7c\xd0\xba"),
|
||||||
|
["\x0e\x57\x94\xbc\xf3\xae\xa9\x3e\x33\x1b\x2c\x99\x07\xb3\xf7\x90\xdf\x9b\xc2\x3d\x71\x32\x25\xdd\x21\xa9\x25\xac\x61\xc5\x4e\x21"] = CTInfo($description="Google 'Argon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x07\xfc\x1e\xe8\x63\x8e\xff\x1c\x31\x8a\xfc\xb8\x1e\x19\x2b\x60\x50\x00\x3e\x8e\x9e\xda\x77\x37\xe3\xa5\xa8\xda\x8d\x94\xf8\x6b\xe8\x3d\x64\x8f\x27\x3f\x75\xb3\xfc\x6b\x12\xf0\x37\x06\x4f\x64\x58\x75\x14\x5d\x56\x52\xe6\x6a\x2b\x14\x4c\xec\x81\xd1\xea\x3e"),
|
||||||
|
["\xd7\x6d\x7d\x10\xd1\xa7\xf5\x77\xc2\xc7\xe9\x5f\xd7\x00\xbf\xf9\x82\xc9\x33\x5a\x65\xe1\xd0\xb3\x01\x73\x17\xc0\xc8\xc5\x69\x77"] = CTInfo($description="Google 'Argon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2a\x3a\x67\x8b\xfe\xba\x0c\x86\x2b\x4a\x51\x8a\xe9\x17\xfe\x7b\xa1\x76\x73\xfd\xbc\x65\x4b\xc3\x27\xbf\x4d\xf3\x5f\xa0\xca\x29\x80\x11\x20\x32\x78\xd6\x7e\xf9\x34\x60\x8c\x75\xa0\xf5\x35\x50\x9c\xa1\xd3\x49\x4d\x13\xd5\x3b\x6a\x0e\xea\x45\x9d\x24\x13\x22"),
|
||||||
["\x76\xff\x88\x3f\x0a\xb6\xfb\x95\x51\xc2\x61\xcc\xf5\x87\xba\x34\xb4\xa4\xcd\xbb\x29\xdc\x68\x42\x0a\x9f\xe6\x67\x4c\x5a\x3a\x74"] = CTInfo($description="Google 'Xenon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xb9\x60\xe0\x34\x1e\x35\xe4\x65\x00\x93\x4f\x90\x09\xbd\x5a\xec\x44\xdd\x8c\x0f\xce\xed\x11\x3e\x2a\x59\x46\x9a\x31\xb6\xc7\x99\xf7\xdc\xef\x3d\xcd\x8f\x86\xc2\x35\xa5\x3e\xdc\x29\xba\xbb\xf2\x54\xe2\xa8\x0c\x83\x08\x51\x06\xde\x21\x6d\x36\x50\x8e\x38\x4d"),
|
["\x76\xff\x88\x3f\x0a\xb6\xfb\x95\x51\xc2\x61\xcc\xf5\x87\xba\x34\xb4\xa4\xcd\xbb\x29\xdc\x68\x42\x0a\x9f\xe6\x67\x4c\x5a\x3a\x74"] = CTInfo($description="Google 'Xenon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xb9\x60\xe0\x34\x1e\x35\xe4\x65\x00\x93\x4f\x90\x09\xbd\x5a\xec\x44\xdd\x8c\x0f\xce\xed\x11\x3e\x2a\x59\x46\x9a\x31\xb6\xc7\x99\xf7\xdc\xef\x3d\xcd\x8f\x86\xc2\x35\xa5\x3e\xdc\x29\xba\xbb\xf2\x54\xe2\xa8\x0c\x83\x08\x51\x06\xde\x21\x6d\x36\x50\x8e\x38\x4d"),
|
||||||
["\xcf\x11\x56\xee\xd5\x2e\x7c\xaf\xf3\x87\x5b\xd9\x69\x2e\x9b\xe9\x1a\x71\x67\x4a\xb0\x17\xec\xac\x01\xd2\x5b\x77\xce\xcc\x3b\x08"] = CTInfo($description="Google 'Xenon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x82\xe2\xce\x90\x40\x3f\x81\x0e\xdf\xea\xe1\x20\x2b\x5e\x2e\x30\x54\x46\x81\xb9\x58\xed\xaf\xbd\xff\x36\xa7\x9e\x0b\x5f\x6a\x6b\x91\xa5\xc1\x98\xe1\xf2\xcd\xeb\x17\x20\x70\xca\x2a\x12\xe6\x54\x78\x50\xdc\xff\x6d\xfd\x1c\xa7\xb6\x3a\x1f\xf9\x26\xa9\x1b\xbd"),
|
["\xcf\x11\x56\xee\xd5\x2e\x7c\xaf\xf3\x87\x5b\xd9\x69\x2e\x9b\xe9\x1a\x71\x67\x4a\xb0\x17\xec\xac\x01\xd2\x5b\x77\xce\xcc\x3b\x08"] = CTInfo($description="Google 'Xenon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x82\xe2\xce\x90\x40\x3f\x81\x0e\xdf\xea\xe1\x20\x2b\x5e\x2e\x30\x54\x46\x81\xb9\x58\xed\xaf\xbd\xff\x36\xa7\x9e\x0b\x5f\x6a\x6b\x91\xa5\xc1\x98\xe1\xf2\xcd\xeb\x17\x20\x70\xca\x2a\x12\xe6\x54\x78\x50\xdc\xff\x6d\xfd\x1c\xa7\xb6\x3a\x1f\xf9\x26\xa9\x1b\xbd"),
|
||||||
["\xdd\xdc\xca\x34\x95\xd7\xe1\x16\x05\xe7\x95\x32\xfa\xc7\x9f\xf8\x3d\x1c\x50\xdf\xdb\x00\x3a\x14\x12\x76\x0a\x2c\xac\xbb\xc8\x2a"] = CTInfo($description="Google 'Xenon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x6b\xe0\xaf\xed\x06\x7c\x3d\xef\xd9\x0e\xe4\x58\x4b\x04\xd8\x2a\x47\x99\x90\x89\x7a\xb9\x36\xa5\x75\xc8\x04\xb8\xcb\xe2\xaa\x2b\xb5\x68\x9d\x88\x29\xa2\xa5\xcf\xce\x2b\x9a\x15\x9b\xa0\x3e\x9d\x94\x1c\xb2\xb7\x4a\xf2\x51\xec\x40\xed\x62\x47\xa4\x03\x49\x86"),
|
["\xdd\xdc\xca\x34\x95\xd7\xe1\x16\x05\xe7\x95\x32\xfa\xc7\x9f\xf8\x3d\x1c\x50\xdf\xdb\x00\x3a\x14\x12\x76\x0a\x2c\xac\xbb\xc8\x2a"] = CTInfo($description="Google 'Xenon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x6b\xe0\xaf\xed\x06\x7c\x3d\xef\xd9\x0e\xe4\x58\x4b\x04\xd8\x2a\x47\x99\x90\x89\x7a\xb9\x36\xa5\x75\xc8\x04\xb8\xcb\xe2\xaa\x2b\xb5\x68\x9d\x88\x29\xa2\xa5\xcf\xce\x2b\x9a\x15\x9b\xa0\x3e\x9d\x94\x1c\xb2\xb7\x4a\xf2\x51\xec\x40\xed\x62\x47\xa4\x03\x49\x86"),
|
||||||
|
["\x96\x97\x64\xbf\x55\x58\x97\xad\xf7\x43\x87\x68\x37\x08\x42\x77\xe9\xf0\x3a\xd5\xf6\xa4\xf3\x36\x6e\x46\xa4\x3f\x0f\xca\xa9\xc6"] = CTInfo($description="Google 'Xenon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x3a\x1f\xc8\xbb\xce\xd5\x90\x47\x34\xca\xca\x01\x04\x27\x21\x1c\xe2\x29\x3d\x92\xbb\x91\x45\xc7\x5a\x3e\xa5\xd4\xf2\x12\xe6\xe8\xe6\x43\xba\xf3\x7b\xc2\x38\xaf\xfc\x23\x8a\x05\x56\xeb\x03\x0a\x30\xcc\x63\x6c\xd9\x3c\xbe\xf5\x7b\x94\xba\x94\xd3\xbf\x88\x4c"),
|
||||||
|
["\xd8\x09\x55\x3b\x94\x4f\x7a\xff\xc8\x16\x19\x6f\x94\x4f\x85\xab\xb0\xf8\xfc\x5e\x87\x55\x26\x0f\x15\xd1\x2e\x72\xbb\x45\x4b\x14"] = CTInfo($description="Google 'Xenon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe5\x77\x78\x95\x71\x28\xb3\x95\xc9\xa5\xcc\x7a\x4c\xe8\x32\x03\x96\x7b\xfc\x2e\x1d\xb9\xa4\xdb\x43\xa0\xbd\x69\x72\xf9\x45\xba\x9a\xc3\xe9\x96\xd5\x70\xe7\x0d\x7e\xc9\x95\x15\x27\x8a\x72\x30\x65\x86\x43\x53\xdc\x11\x44\x18\x49\x98\x25\x68\xa7\x3c\x05\xbf"),
|
||||||
["\xda\xb6\xbf\x6b\x3f\xb5\xb6\x22\x9f\x9b\xc2\xbb\x5c\x6b\xe8\x70\x91\x71\x6c\xbb\x51\x84\x85\x34\xbd\xa4\x3d\x30\x48\xd7\xfb\xab"] = CTInfo($description="Cloudflare 'Nimbus2024' Log", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x77\xb1\x9b\x7b\x8f\xe6\x8b\x35\xfe\x3a\x92\x29\x2d\xac\x8a\x8d\x51\x8a\x25\xfc\x93\xb6\xd7\xa0\x8b\x29\x37\x71\x1d\x33\xca\xcc\x33\xea\x28\xb9\x1f\xe2\xac\xc3\xa9\x5d\xdd\x97\xbe\xf6\x9e\x94\x25\xdd\x36\x81\xd1\xeb\x5d\x29\xc3\x2b\x44\xf1\x5b\xca\x15\x48"),
|
["\xda\xb6\xbf\x6b\x3f\xb5\xb6\x22\x9f\x9b\xc2\xbb\x5c\x6b\xe8\x70\x91\x71\x6c\xbb\x51\x84\x85\x34\xbd\xa4\x3d\x30\x48\xd7\xfb\xab"] = CTInfo($description="Cloudflare 'Nimbus2024' Log", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x77\xb1\x9b\x7b\x8f\xe6\x8b\x35\xfe\x3a\x92\x29\x2d\xac\x8a\x8d\x51\x8a\x25\xfc\x93\xb6\xd7\xa0\x8b\x29\x37\x71\x1d\x33\xca\xcc\x33\xea\x28\xb9\x1f\xe2\xac\xc3\xa9\x5d\xdd\x97\xbe\xf6\x9e\x94\x25\xdd\x36\x81\xd1\xeb\x5d\x29\xc3\x2b\x44\xf1\x5b\xca\x15\x48"),
|
||||||
["\xcc\xfb\x0f\x6a\x85\x71\x09\x65\xfe\x95\x9b\x53\xce\xe9\xb2\x7c\x22\xe9\x85\x5c\x0d\x97\x8d\xb6\xa9\x7e\x54\xc0\xfe\x4c\x0d\xb0"] = CTInfo($description="Cloudflare 'Nimbus2025'", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2025/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1a\x80\x1a\x15\x19\x19\x23\x79\xb4\xfa\xa0\x79\x8e\x8d\xd5\xc1\xdc\xc2\xb5\x96\x92\x7e\x94\xe0\xc3\x7e\x14\x7c\x0a\x0d\x2d\x46\xa8\x9d\x1b\xb1\x41\x65\x0c\x5f\x98\xc4\x5a\x17\x79\x81\x5b\x4a\x14\x41\xec\xaf\xa9\x5d\x0e\xab\x12\x19\x71\xcd\x43\xef\xbb\x97"),
|
["\xcc\xfb\x0f\x6a\x85\x71\x09\x65\xfe\x95\x9b\x53\xce\xe9\xb2\x7c\x22\xe9\x85\x5c\x0d\x97\x8d\xb6\xa9\x7e\x54\xc0\xfe\x4c\x0d\xb0"] = CTInfo($description="Cloudflare 'Nimbus2025'", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2025/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1a\x80\x1a\x15\x19\x19\x23\x79\xb4\xfa\xa0\x79\x8e\x8d\xd5\xc1\xdc\xc2\xb5\x96\x92\x7e\x94\xe0\xc3\x7e\x14\x7c\x0a\x0d\x2d\x46\xa8\x9d\x1b\xb1\x41\x65\x0c\x5f\x98\xc4\x5a\x17\x79\x81\x5b\x4a\x14\x41\xec\xaf\xa9\x5d\x0e\xab\x12\x19\x71\xcd\x43\xef\xbb\x97"),
|
||||||
["\x48\xb0\xe3\x6b\xda\xa6\x47\x34\x0f\xe5\x6a\x02\xfa\x9d\x30\xeb\x1c\x52\x01\xcb\x56\xdd\x2c\x81\xd9\xbb\xbf\xab\x39\xd8\x84\x73"] = CTInfo($description="DigiCert Yeti2024 Log", $operator="DigiCert", $url="https://yeti2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x57\xb8\xc1\x6f\x30\xa4\x7f\x2e\xe4\xf0\xd0\xd9\x60\x62\x13\x95\xe3\x7a\xe3\x4e\x53\xc3\xb3\xb8\x73\x85\xc1\x18\x0d\x23\x0e\x58\x84\xd2\x78\xef\x9b\xb3\x1e\x2c\x1a\xde\xc1\x8f\x81\x1b\x19\x44\x58\xb7\x00\x77\x60\x20\x1a\x72\xd8\x82\xde\xae\x9e\xb1\xc6\x4b"),
|
["\x48\xb0\xe3\x6b\xda\xa6\x47\x34\x0f\xe5\x6a\x02\xfa\x9d\x30\xeb\x1c\x52\x01\xcb\x56\xdd\x2c\x81\xd9\xbb\xbf\xab\x39\xd8\x84\x73"] = CTInfo($description="DigiCert Yeti2024 Log", $operator="DigiCert", $url="https://yeti2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x57\xb8\xc1\x6f\x30\xa4\x7f\x2e\xe4\xf0\xd0\xd9\x60\x62\x13\x95\xe3\x7a\xe3\x4e\x53\xc3\xb3\xb8\x73\x85\xc1\x18\x0d\x23\x0e\x58\x84\xd2\x78\xef\x9b\xb3\x1e\x2c\x1a\xde\xc1\x8f\x81\x1b\x19\x44\x58\xb7\x00\x77\x60\x20\x1a\x72\xd8\x82\xde\xae\x9e\xb1\xc6\x4b"),
|
||||||
["\x7d\x59\x1e\x12\xe1\x78\x2a\x7b\x1c\x61\x67\x7c\x5e\xfd\xf8\xd0\x87\x5c\x14\xa0\x4e\x95\x9e\xb9\x03\x2f\xd9\x0e\x8c\x2e\x79\xb8"] = CTInfo($description="DigiCert Yeti2025 Log", $operator="DigiCert", $url="https://yeti2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdf\x95\x00\x5e\x10\xc1\x01\xf7\x37\xe3\x10\x74\xd1\xff\xb2\xca\x90\xed\x32\x99\x5f\x0c\x39\xfe\xa1\xd1\x13\x11\xac\xd1\xb3\x73\x93\x20\xc2\x13\x3c\x4c\xb5\x7a\x52\x86\x86\x3d\xe3\x95\x24\x7c\xd8\x91\x98\x48\x3b\xf0\xf0\xdf\x21\xf1\xb0\x81\x5a\x59\x25\x43"),
|
["\x7d\x59\x1e\x12\xe1\x78\x2a\x7b\x1c\x61\x67\x7c\x5e\xfd\xf8\xd0\x87\x5c\x14\xa0\x4e\x95\x9e\xb9\x03\x2f\xd9\x0e\x8c\x2e\x79\xb8"] = CTInfo($description="DigiCert Yeti2025 Log", $operator="DigiCert", $url="https://yeti2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdf\x95\x00\x5e\x10\xc1\x01\xf7\x37\xe3\x10\x74\xd1\xff\xb2\xca\x90\xed\x32\x99\x5f\x0c\x39\xfe\xa1\xd1\x13\x11\xac\xd1\xb3\x73\x93\x20\xc2\x13\x3c\x4c\xb5\x7a\x52\x86\x86\x3d\xe3\x95\x24\x7c\xd8\x91\x98\x48\x3b\xf0\xf0\xdf\x21\xf1\xb0\x81\x5a\x59\x25\x43"),
|
||||||
["\x73\xd9\x9e\x89\x1b\x4c\x96\x78\xa0\x20\x7d\x47\x9d\xe6\xb2\xc6\x1c\xd0\x51\x5e\x71\x19\x2a\x8c\x6b\x80\x10\x7a\xc1\x77\x72\xb5"] = CTInfo($description="DigiCert Nessie2024 Log", $operator="DigiCert", $url="https://nessie2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2d\xfc\xa2\x7b\x36\xbf\x56\x91\xe9\xfe\x3f\xe8\x3d\xfc\xc3\xa7\xe0\x61\x52\xea\x2c\xe9\x05\xa3\x9f\x27\x17\x81\x05\x70\x6b\x81\x61\x44\x8a\xf8\x3b\x10\x80\x42\xed\x03\x2f\x00\x50\x21\xfc\x41\x54\x84\xa3\x54\xd5\x2e\xb2\x7a\x16\x4b\x2a\x1f\x2b\x66\x04\x2b"),
|
["\x73\xd9\x9e\x89\x1b\x4c\x96\x78\xa0\x20\x7d\x47\x9d\xe6\xb2\xc6\x1c\xd0\x51\x5e\x71\x19\x2a\x8c\x6b\x80\x10\x7a\xc1\x77\x72\xb5"] = CTInfo($description="DigiCert Nessie2024 Log", $operator="DigiCert", $url="https://nessie2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2d\xfc\xa2\x7b\x36\xbf\x56\x91\xe9\xfe\x3f\xe8\x3d\xfc\xc3\xa7\xe0\x61\x52\xea\x2c\xe9\x05\xa3\x9f\x27\x17\x81\x05\x70\x6b\x81\x61\x44\x8a\xf8\x3b\x10\x80\x42\xed\x03\x2f\x00\x50\x21\xfc\x41\x54\x84\xa3\x54\xd5\x2e\xb2\x7a\x16\x4b\x2a\x1f\x2b\x66\x04\x2b"),
|
||||||
["\xe6\xd2\x31\x63\x40\x77\x8c\xc1\x10\x41\x06\xd7\x71\xb9\xce\xc1\xd2\x40\xf6\x96\x84\x86\xfb\xba\x87\x32\x1d\xfd\x1e\x37\x8e\x50"] = CTInfo($description="DigiCert Nessie2025 Log", $operator="DigiCert", $url="https://nessie2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\xf0\xf0\xa7\x8b\x81\x2e\x09\x39\x3b\x9f\x42\xda\x38\x44\x5f\xb4\xcc\xed\x36\xbb\xd8\x43\x7f\x16\x49\x57\x87\x04\x7f\xa5\x01\x34\xf7\xe8\x68\x3f\xb7\x78\x1f\x60\x66\x2d\x67\x9a\x75\x80\xb7\x53\xa7\x85\xd5\xbc\xab\x47\x06\x55\xdb\xb5\xdf\x88\xa1\x6f\x38"),
|
["\xe6\xd2\x31\x63\x40\x77\x8c\xc1\x10\x41\x06\xd7\x71\xb9\xce\xc1\xd2\x40\xf6\x96\x84\x86\xfb\xba\x87\x32\x1d\xfd\x1e\x37\x8e\x50"] = CTInfo($description="DigiCert Nessie2025 Log", $operator="DigiCert", $url="https://nessie2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\xf0\xf0\xa7\x8b\x81\x2e\x09\x39\x3b\x9f\x42\xda\x38\x44\x5f\xb4\xcc\xed\x36\xbb\xd8\x43\x7f\x16\x49\x57\x87\x04\x7f\xa5\x01\x34\xf7\xe8\x68\x3f\xb7\x78\x1f\x60\x66\x2d\x67\x9a\x75\x80\xb7\x53\xa7\x85\xd5\xbc\xab\x47\x06\x55\xdb\xb5\xdf\x88\xa1\x6f\x38"),
|
||||||
|
["\xb6\x9d\xdc\xbc\x3c\x1a\xbd\xef\x6f\x9f\xd6\x0c\x88\xb1\x06\x7b\x77\xf0\x82\x68\x8b\x2d\x78\x65\xd0\x4b\x39\xab\xe9\x27\xa5\x75"] = CTInfo($description="DigiCert 'Wyvern2024h1' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2024h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x68\xa6\x79\x14\xd1\x58\xe7\xab\xaa\x29\x69\x7f\x60\xed\x68\xe8\x10\xf6\x07\x84\xc0\xfb\x59\x04\x5a\x09\xc9\x1d\xe1\x4b\xfb\xcd\xdc\x03\xf3\xa8\x2a\x46\xb9\x84\x4d\x69\x30\xec\x23\x35\xc1\x8e\xfc\x9f\xb4\x20\x24\xd7\x15\xac\x87\xf7\x1e\xc1\x0b\x3c\x76\x1a"),
|
||||||
|
["\x0c\x2a\xef\x2c\x4a\x5b\x98\x83\xd4\xdd\xa3\x82\xfe\x50\xfb\x51\x88\xb3\xe9\x73\x33\xa1\xec\x53\xa0\x9d\xc9\xa7\x9d\x0d\x08\x20"] = CTInfo($description="DigiCert 'Wyvern2024h2' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2024h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa8\x73\x12\x9c\x54\xd0\x7a\x7d\xc5\xb5\x17\x2b\x71\x52\x89\x04\x90\xbb\x42\xf1\x9d\xf8\x1c\xde\x4c\xcf\x82\x3c\xbd\x37\x1b\x74\x4c\x3c\xc7\xa3\x13\x87\x01\x51\x13\x14\xda\xa2\x12\x98\x84\xce\x1c\xbe\xcf\x4f\x7a\xef\x15\xfa\xd0\xee\xed\xed\x07\xad\x71\x6d"),
|
||||||
|
["\x73\x20\x22\x0f\x08\x16\x8a\xf9\xf3\xc4\xa6\x8b\x0a\xb2\x6a\x9a\x4a\x00\xee\xf5\x77\x85\x8a\x08\x4d\x05\x00\xd4\xa5\x42\x44\x59"] = CTInfo($description="DigiCert 'Wyvern2025h1' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\xcb\x80\x61\x86\x1b\x1f\xb5\xab\x2b\x20\x76\x59\x83\x66\x0e\xce\xae\xb8\x6f\x3b\x88\x02\xeb\x43\xf4\x87\x90\xcb\x8b\xda\xac\x0e\x19\x50\xe0\xf9\x24\x0e\xab\x26\x93\x8c\x3f\x9e\x0d\x96\x58\x44\x9d\x3b\x8a\x80\xc5\xc8\xbe\xe1\x89\x46\x6b\x48\x4c\xd6\x09"),
|
||||||
|
["\xed\x3c\x4b\xd6\xe8\x06\xc2\xa4\xa2\x00\x57\xdb\xcb\x24\xe2\x38\x01\xdf\x51\x2f\xed\xc4\x86\xc5\x70\x0f\x20\xdd\xb7\x3e\x3f\xe0"] = CTInfo($description="DigiCert 'Wyvern2025h2' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe0\xdb\x41\xef\xe4\x04\xbd\xcb\x6b\x2e\x4c\xcc\xf1\x6c\xde\x41\x58\x7f\xfe\x94\xf6\x7a\xf6\x60\xed\x8b\x76\x72\xa3\xa2\x1c\x31\x13\x32\x35\xa1\xf2\x08\xd2\x68\xc5\x34\xa7\x56\x08\x1c\x63\xde\x95\xe2\x81\x69\x97\x8d\x1e\xa8\xb7\x66\x51\x25\x75\x4d\x78\x2e"),
|
||||||
|
["\xdb\x07\x6c\xde\x6a\x8b\x78\xec\x58\xd6\x05\x64\x96\xeb\x6a\x26\xa8\xc5\x9e\x72\x12\x93\xe8\xac\x03\x27\xdd\xde\x89\xdb\x5a\x2a"] = CTInfo($description="DigiCert 'Sphinx2024h1' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2024h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xc6\xe4\x29\x69\x98\xfe\x28\x92\x57\x12\x4d\x9e\xed\x0e\xe7\x32\xa2\xe6\x9c\x27\x78\xa4\x29\x7c\x99\xd5\xdb\xfa\x22\xc1\xdd\x5e\xa7\xf4\xd8\xea\xc8\xd7\x44\x8d\xe0\xf1\x8c\x0a\x01\x1d\xd8\x22\xa8\xd3\xeb\xc9\x22\x8e\x36\xfb\x4a\xb1\x70\x9c\x5d\xc1\xe8\x33"),
|
||||||
|
["\xdc\xc9\x5e\x6f\xa2\x99\xb9\xb0\xfd\xbd\x6c\xa6\xa3\x6e\x1d\x72\xc4\x21\x2f\xdd\x1e\x0f\x47\x55\x3a\x36\xd6\xcf\x1a\xd1\x1d\x8d"] = CTInfo($description="DigiCert 'Sphinx2024h2' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2024h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdb\x09\x41\x84\xe7\xd1\xf1\x5b\x25\x09\x7b\xe8\xc6\x98\x51\x5e\x29\x85\xfd\x81\xde\x89\xd7\xd0\x86\xa4\xb0\xe5\x15\xec\x5d\x7b\x17\x55\x5f\xc9\x79\x8d\xe4\x22\x36\xe7\xe9\xbf\x38\x3f\xd1\xe9\xd4\x09\x84\x81\xbe\xb6\xc1\xed\x1b\x17\xea\x26\x97\xba\xe9\x9a"),
|
||||||
|
["\xde\x85\x81\xd7\x50\x24\x7c\x6b\xcd\xcb\xaf\x56\x37\xc5\xe7\x81\xc6\x4c\xe4\x6e\xd6\x17\x63\x9f\x8f\x34\xa7\x26\xc9\xe2\xbd\x37"] = CTInfo($description="DigiCert 'Sphinx2025h1' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe3\x2f\x1f\x4d\x89\x05\x75\x29\x78\xbb\x22\x3d\x07\x62\x51\x14\x70\x94\xe7\x3c\xea\xf5\xee\xae\xa6\x48\x9a\x86\x52\x4e\x9e\x5c\xe3\x95\x97\x28\xbb\x52\x4b\x2a\xfd\xc8\xc9\x89\x4e\x45\x31\x17\xd3\x8d\xf2\xe7\xce\x18\x11\x58\x98\x2c\x60\x6f\x58\x20\x36\x6e"),
|
||||||
|
["\xa4\x42\xc5\x06\x49\x60\x61\x54\x8f\x0f\xd4\xea\x9c\xfb\x7a\x2d\x26\x45\x4d\x87\xa9\x7f\x2f\xdf\x45\x59\xf6\x27\x4f\x3a\x84\x54"] = CTInfo($description="DigiCert 'Sphinx2025h2' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x41\x8c\x50\x13\x54\xb1\x19\x05\xb7\x7f\x4a\x20\x6e\xa3\x75\x63\xca\x34\xf4\xcc\x74\xea\x32\x3b\xb6\x8b\x03\x14\xa8\x52\x7f\x32\x87\x5e\x59\x9e\x0f\xab\x18\x9e\x29\x6c\xb5\x72\x77\x1a\x27\x54\x85\x5d\xc1\x7b\x24\xa8\x34\xe3\xcd\x88\xce\xd4\x50\x1b\xbe\x69"),
|
||||||
["\x55\x81\xd4\xc2\x16\x90\x36\x01\x4a\xea\x0b\x9b\x57\x3c\x53\xf0\xc0\xe4\x38\x78\x70\x25\x08\x17\x2f\xa3\xaa\x1d\x07\x13\xd3\x0c"] = CTInfo($description="Sectigo 'Sabre' CT log", $operator="Sectigo", $url="https://sabre.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\x6f\xd2\x89\x0f\x3f\xc5\xf8\x87\x1e\xab\x65\xb3\xd9\xbb\x17\x23\x8c\x06\x0e\x09\x55\x96\x3d\x0a\x08\xa2\xc5\x71\xb3\xd1\xa9\x2f\x28\x3e\x83\x10\xbf\x12\xd0\x44\x66\x15\xef\x54\xe1\x98\x80\xd0\xce\x24\x6d\x3e\x67\x9a\xe9\x37\x23\xce\x52\x93\x86\xda\x80"),
|
["\x55\x81\xd4\xc2\x16\x90\x36\x01\x4a\xea\x0b\x9b\x57\x3c\x53\xf0\xc0\xe4\x38\x78\x70\x25\x08\x17\x2f\xa3\xaa\x1d\x07\x13\xd3\x0c"] = CTInfo($description="Sectigo 'Sabre' CT log", $operator="Sectigo", $url="https://sabre.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\x6f\xd2\x89\x0f\x3f\xc5\xf8\x87\x1e\xab\x65\xb3\xd9\xbb\x17\x23\x8c\x06\x0e\x09\x55\x96\x3d\x0a\x08\xa2\xc5\x71\xb3\xd1\xa9\x2f\x28\x3e\x83\x10\xbf\x12\xd0\x44\x66\x15\xef\x54\xe1\x98\x80\xd0\xce\x24\x6d\x3e\x67\x9a\xe9\x37\x23\xce\x52\x93\x86\xda\x80"),
|
||||||
["\xa2\xe2\xbf\xd6\x1e\xde\x2f\x2f\x07\xa0\xd6\x4e\x6d\x37\xa7\xdc\x65\x43\xb0\xc6\xb5\x2e\xa2\xda\xb7\x8a\xf8\x9a\x6d\xf5\x17\xd8"] = CTInfo($description="Sectigo 'Sabre2024h1'", $operator="Sectigo", $url="https://sabre2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2c\x01\xf6\xce\x31\xbc\xaa\x14\x61\x51\xfe\x6b\x7a\x87\xae\xa6\xd3\x9b\xc7\x87\x2d\x0a\x5a\xc8\x4f\xb5\x54\xdc\xc9\x93\xa0\x00\xee\xca\x1c\xb9\xa7\xb6\x7b\x47\x3b\xe5\x4f\xaa\x6c\x16\x1c\x70\x2e\xc8\xec\x53\x5a\x4c\x21\x4c\x7e\x27\x0b\x13\x14\x5e\xfc\x85"),
|
["\xa2\xe2\xbf\xd6\x1e\xde\x2f\x2f\x07\xa0\xd6\x4e\x6d\x37\xa7\xdc\x65\x43\xb0\xc6\xb5\x2e\xa2\xda\xb7\x8a\xf8\x9a\x6d\xf5\x17\xd8"] = CTInfo($description="Sectigo 'Sabre2024h1'", $operator="Sectigo", $url="https://sabre2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2c\x01\xf6\xce\x31\xbc\xaa\x14\x61\x51\xfe\x6b\x7a\x87\xae\xa6\xd3\x9b\xc7\x87\x2d\x0a\x5a\xc8\x4f\xb5\x54\xdc\xc9\x93\xa0\x00\xee\xca\x1c\xb9\xa7\xb6\x7b\x47\x3b\xe5\x4f\xaa\x6c\x16\x1c\x70\x2e\xc8\xec\x53\x5a\x4c\x21\x4c\x7e\x27\x0b\x13\x14\x5e\xfc\x85"),
|
||||||
["\x19\x98\x10\x71\x09\xf0\xd6\x52\x2e\x30\x80\xd2\x9e\x3f\x64\xbb\x83\x6e\x28\xcc\xf9\x0f\x52\x8e\xee\xdf\xce\x4a\x3f\x16\xb4\xca"] = CTInfo($description="Sectigo 'Sabre2024h2'", $operator="Sectigo", $url="https://sabre2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7a\x10\x4c\x8a\xe7\x22\x7b\x6d\x2a\xba\x8e\xfa\x6b\x4a\x81\xd5\x85\xae\x03\xef\xff\x4b\xfc\x4d\x53\x3d\xb7\x8c\xbb\x75\x09\xc9\xea\x16\x7e\xc1\x77\x16\xd2\xc2\x45\x74\x6d\x8d\xc4\xe1\x88\x37\xdf\xd4\xf3\x60\x65\xfc\xa0\x75\xf0\x20\x66\x8e\x4a\xcc\x19\xda"),
|
["\x19\x98\x10\x71\x09\xf0\xd6\x52\x2e\x30\x80\xd2\x9e\x3f\x64\xbb\x83\x6e\x28\xcc\xf9\x0f\x52\x8e\xee\xdf\xce\x4a\x3f\x16\xb4\xca"] = CTInfo($description="Sectigo 'Sabre2024h2'", $operator="Sectigo", $url="https://sabre2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7a\x10\x4c\x8a\xe7\x22\x7b\x6d\x2a\xba\x8e\xfa\x6b\x4a\x81\xd5\x85\xae\x03\xef\xff\x4b\xfc\x4d\x53\x3d\xb7\x8c\xbb\x75\x09\xc9\xea\x16\x7e\xc1\x77\x16\xd2\xc2\x45\x74\x6d\x8d\xc4\xe1\x88\x37\xdf\xd4\xf3\x60\x65\xfc\xa0\x75\xf0\x20\x66\x8e\x4a\xcc\x19\xda"),
|
||||||
["\xe0\x92\xb3\xfc\x0c\x1d\xc8\xe7\x68\x36\x1f\xde\x61\xb9\x96\x4d\x0a\x52\x78\x19\x8a\x72\xd6\x72\xc4\xb0\x4d\xa5\x6d\x6f\x54\x04"] = CTInfo($description="Sectigo 'Sabre2025h1'", $operator="Sectigo", $url="https://sabre2025h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7e\x2f\x39\xf1\xe8\x23\x8e\xb3\x32\x04\xaf\x4d\x57\xf6\xdb\xc5\x74\xa4\x7a\x6d\x3b\x07\x51\x0c\x5a\xfb\x80\x30\x05\xc6\x5a\x0c\xc4\x76\xd6\x06\xa8\x57\x4d\xfb\xdf\xe4\x82\x90\xc2\x41\xae\x70\xb3\x31\xa2\xe3\xfa\x3d\x5f\x2c\x5d\x04\xcd\xb4\x9d\x55\xab\x41"),
|
["\xe0\x92\xb3\xfc\x0c\x1d\xc8\xe7\x68\x36\x1f\xde\x61\xb9\x96\x4d\x0a\x52\x78\x19\x8a\x72\xd6\x72\xc4\xb0\x4d\xa5\x6d\x6f\x54\x04"] = CTInfo($description="Sectigo 'Sabre2025h1'", $operator="Sectigo", $url="https://sabre2025h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7e\x2f\x39\xf1\xe8\x23\x8e\xb3\x32\x04\xaf\x4d\x57\xf6\xdb\xc5\x74\xa4\x7a\x6d\x3b\x07\x51\x0c\x5a\xfb\x80\x30\x05\xc6\x5a\x0c\xc4\x76\xd6\x06\xa8\x57\x4d\xfb\xdf\xe4\x82\x90\xc2\x41\xae\x70\xb3\x31\xa2\xe3\xfa\x3d\x5f\x2c\x5d\x04\xcd\xb4\x9d\x55\xab\x41"),
|
||||||
["\x1a\x04\xff\x49\xd0\x54\x1d\x40\xaf\xf6\xa0\xc3\xbf\xf1\xd8\xc4\x67\x2f\x4e\xec\xee\x23\x40\x68\x98\x6b\x17\x40\x2e\xdc\x89\x7d"] = CTInfo($description="Sectigo 'Sabre2025h2'", $operator="Sectigo", $url="https://sabre2025h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x13\x11\x2d\x7b\xf3\x93\x81\xe4\xb9\x7c\xd9\x64\x3b\xe7\xb5\x83\x99\x66\x79\x59\x47\x6a\x42\x5e\xd6\xbd\x63\x2e\xb7\x91\x4b\xae\xbc\x56\xc4\xc5\x6e\x09\xa0\xd7\x64\x1a\xc8\xc1\xaf\x89\x8b\xf5\x58\xd8\xba\xeb\x7b\x83\x52\xe9\xf4\xe0\xa5\xcd\xcd\x92\xcc"),
|
["\x1a\x04\xff\x49\xd0\x54\x1d\x40\xaf\xf6\xa0\xc3\xbf\xf1\xd8\xc4\x67\x2f\x4e\xec\xee\x23\x40\x68\x98\x6b\x17\x40\x2e\xdc\x89\x7d"] = CTInfo($description="Sectigo 'Sabre2025h2'", $operator="Sectigo", $url="https://sabre2025h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x13\x11\x2d\x7b\xf3\x93\x81\xe4\xb9\x7c\xd9\x64\x3b\xe7\xb5\x83\x99\x66\x79\x59\x47\x6a\x42\x5e\xd6\xbd\x63\x2e\xb7\x91\x4b\xae\xbc\x56\xc4\xc5\x6e\x09\xa0\xd7\x64\x1a\xc8\xc1\xaf\x89\x8b\xf5\x58\xd8\xba\xeb\x7b\x83\x52\xe9\xf4\xe0\xa5\xcd\xcd\x92\xcc"),
|
||||||
["\x6f\x53\x76\xac\x31\xf0\x31\x19\xd8\x99\x00\xa4\x51\x15\xff\x77\x15\x1c\x11\xd9\x02\xc1\x00\x29\x06\x8d\xb2\x08\x9a\x37\xd9\x13"] = CTInfo($description="Sectigo 'Mammoth' CT log", $operator="Sectigo", $url="https://mammoth.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xef\xe4\x7d\x74\x2e\x15\x15\xb6\xe9\xbb\x23\x8b\xfb\x2c\xb5\xe1\xc7\x80\x98\x47\xfb\x40\x69\x68\xfc\x49\xad\x61\x4e\x83\x47\x3c\x1a\xb7\x8d\xdf\xff\x7b\x30\xb4\xba\xff\x2f\xcb\xa0\x14\xe3\xad\xd5\x85\x3f\x44\x59\x8c\x8c\x60\x8b\xd7\xb8\xb1\xbf\xae\x8c\x67"),
|
|
||||||
["\x29\xd0\x3a\x1b\xb6\x74\xaa\x71\x1c\xd3\x03\x5b\x65\x57\xc1\x4f\x8a\xa7\x8b\x4f\xe8\x38\x94\x49\xec\xa4\x53\xf9\x44\xbd\x24\x68"] = CTInfo($description="Sectigo 'Mammoth2024h1'", $operator="Sectigo", $url="https://mammoth2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa4\x59\x90\xf3\x71\x24\x24\xf7\xc3\x55\x27\x56\x9c\xa3\x59\x1e\xf7\xb7\x9f\xce\xab\x4e\x19\x66\x4d\xd0\x8a\xfa\x9d\x62\xa4\x24\xf0\x3b\x20\xe4\x1d\x14\x67\xc8\xfc\xe4\x37\xf2\x4b\x38\x54\x5a\xcf\x9f\x6b\x07\x90\xd0\x0e\x7e\x3d\x4c\x87\xb2\xe8\x3f\x07\xcc"),
|
["\x29\xd0\x3a\x1b\xb6\x74\xaa\x71\x1c\xd3\x03\x5b\x65\x57\xc1\x4f\x8a\xa7\x8b\x4f\xe8\x38\x94\x49\xec\xa4\x53\xf9\x44\xbd\x24\x68"] = CTInfo($description="Sectigo 'Mammoth2024h1'", $operator="Sectigo", $url="https://mammoth2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa4\x59\x90\xf3\x71\x24\x24\xf7\xc3\x55\x27\x56\x9c\xa3\x59\x1e\xf7\xb7\x9f\xce\xab\x4e\x19\x66\x4d\xd0\x8a\xfa\x9d\x62\xa4\x24\xf0\x3b\x20\xe4\x1d\x14\x67\xc8\xfc\xe4\x37\xf2\x4b\x38\x54\x5a\xcf\x9f\x6b\x07\x90\xd0\x0e\x7e\x3d\x4c\x87\xb2\xe8\x3f\x07\xcc"),
|
||||||
["\x50\x85\x01\x58\xdc\xb6\x05\x95\xc0\x0e\x92\xa8\x11\x02\xec\xcd\xfe\x3f\x6b\x78\x58\x42\x9f\x57\x98\x35\x38\xc9\xda\x52\x50\x63"] = CTInfo($description="Sectigo 'Mammoth2024h1b'", $operator="Sectigo", $url="https://mammoth2024h1b.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa3\xd5\x07\x28\x7a\x04\x34\xae\xca\xbe\x80\x79\x4f\x3e\xf6\x41\xf4\x24\x04\xe1\xd6\x36\x5a\x1a\x09\xf2\xd1\xba\x84\x17\xae\x1e\xa1\x7c\x00\x1d\x54\x73\x90\x75\x21\xa8\xd1\xda\x5e\x10\xe1\x8c\xec\xb2\x8a\x8c\xc8\xe7\xdd\xcd\xe2\x07\xf0\x4e\x16\x02\x57\x37"),
|
["\x50\x85\x01\x58\xdc\xb6\x05\x95\xc0\x0e\x92\xa8\x11\x02\xec\xcd\xfe\x3f\x6b\x78\x58\x42\x9f\x57\x98\x35\x38\xc9\xda\x52\x50\x63"] = CTInfo($description="Sectigo 'Mammoth2024h1b'", $operator="Sectigo", $url="https://mammoth2024h1b.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa3\xd5\x07\x28\x7a\x04\x34\xae\xca\xbe\x80\x79\x4f\x3e\xf6\x41\xf4\x24\x04\xe1\xd6\x36\x5a\x1a\x09\xf2\xd1\xba\x84\x17\xae\x1e\xa1\x7c\x00\x1d\x54\x73\x90\x75\x21\xa8\xd1\xda\x5e\x10\xe1\x8c\xec\xb2\x8a\x8c\xc8\xe7\xdd\xcd\xe2\x07\xf0\x4e\x16\x02\x57\x37"),
|
||||||
["\xdf\xe1\x56\xeb\xaa\x05\xaf\xb5\x9c\x0f\x86\x71\x8d\xa8\xc0\x32\x4e\xae\x56\xd9\x6e\xa7\xf5\xa5\x6a\x01\xd1\xc1\x3b\xbe\x52\x5c"] = CTInfo($description="Sectigo 'Mammoth2024h2'", $operator="Sectigo", $url="https://mammoth2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x66\x22\x24\x6e\xbe\x52\x62\x0a\xa0\xaf\xc3\x25\x1a\x36\x2e\xa7\x60\x89\xa2\x65\xbf\xa4\x5f\xbd\x85\x6a\x94\x05\x81\x35\x90\x54\x31\x95\xe7\x11\x9e\xa3\x2e\x0f\x85\xef\xa7\x88\x57\x8b\x63\x1a\x81\xc1\x41\x9d\x7d\xec\x01\x3a\xdb\xb9\xc1\x27\xf4\x65\x1e"),
|
["\xdf\xe1\x56\xeb\xaa\x05\xaf\xb5\x9c\x0f\x86\x71\x8d\xa8\xc0\x32\x4e\xae\x56\xd9\x6e\xa7\xf5\xa5\x6a\x01\xd1\xc1\x3b\xbe\x52\x5c"] = CTInfo($description="Sectigo 'Mammoth2024h2'", $operator="Sectigo", $url="https://mammoth2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x66\x22\x24\x6e\xbe\x52\x62\x0a\xa0\xaf\xc3\x25\x1a\x36\x2e\xa7\x60\x89\xa2\x65\xbf\xa4\x5f\xbd\x85\x6a\x94\x05\x81\x35\x90\x54\x31\x95\xe7\x11\x9e\xa3\x2e\x0f\x85\xef\xa7\x88\x57\x8b\x63\x1a\x81\xc1\x41\x9d\x7d\xec\x01\x3a\xdb\xb9\xc1\x27\xf4\x65\x1e"),
|
||||||
|
@ -39,4 +50,6 @@ redef ct_logs += {
|
||||||
["\x87\x4f\xb5\x0d\xc0\x29\xd9\x93\x1d\xe5\x73\xe9\xf2\x89\x9e\x8e\x45\x33\xb3\x92\xd3\x8b\x0a\x46\x25\x74\xbf\x0f\xee\xb2\xfc\x1e"] = CTInfo($description="Trust Asia Log2024-2", $operator="TrustAsia", $url="https://ct2024.trustasia.com/log2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x64\xe2\x79\x81\x3f\x61\xd7\xec\xc6\xf8\x65\x28\x1d\xa0\xb4\x66\x33\xc3\x25\xd5\x0a\x95\x78\x9c\x8f\xfe\xa4\x2a\xd8\x8f\x7e\x72\xe0\xfe\xa8\x7f\xf8\xb1\x2d\x85\xc0\x8e\x12\x74\x0d\x2f\x8c\xab\xd7\x7f\x7a\x1e\xd9\x84\x33\x39\xe8\xfd\x89\x5f\x96\x48\x08"),
|
["\x87\x4f\xb5\x0d\xc0\x29\xd9\x93\x1d\xe5\x73\xe9\xf2\x89\x9e\x8e\x45\x33\xb3\x92\xd3\x8b\x0a\x46\x25\x74\xbf\x0f\xee\xb2\xfc\x1e"] = CTInfo($description="Trust Asia Log2024-2", $operator="TrustAsia", $url="https://ct2024.trustasia.com/log2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x64\xe2\x79\x81\x3f\x61\xd7\xec\xc6\xf8\x65\x28\x1d\xa0\xb4\x66\x33\xc3\x25\xd5\x0a\x95\x78\x9c\x8f\xfe\xa4\x2a\xd8\x8f\x7e\x72\xe0\xfe\xa8\x7f\xf8\xb1\x2d\x85\xc0\x8e\x12\x74\x0d\x2f\x8c\xab\xd7\x7f\x7a\x1e\xd9\x84\x33\x39\xe8\xfd\x89\x5f\x96\x48\x08"),
|
||||||
["\x28\xe2\x81\x38\xfd\x83\x21\x45\xe9\xa9\xd6\xaa\x75\x37\x6d\x83\x77\xa8\x85\x12\xb3\xc0\x7f\x72\x41\x48\x21\xdc\xbd\xe9\x8c\x66"] = CTInfo($description="TrustAsia Log2025a", $operator="TrustAsia", $url="https://ct2025-a.trustasia.com/log2025a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x70\xe5\xb1\xa4\x09\x79\x2b\x9d\xf8\xa3\xa0\xdf\x18\xef\x95\x5d\x03\x6c\x7b\xa1\x91\xa9\xb8\x80\x7d\xec\x5c\x02\x08\xe2\x6e\x2f\x7c\x32\x70\xbd\x96\x84\x5f\xa6\x62\xe9\x65\xb5\x7c\x90\x58\xba\x22\xd5\xf9\xf5\x69\x54\xb7\xa8\x94\x4e\x32\x09\xae\x26\x11\x4d"),
|
["\x28\xe2\x81\x38\xfd\x83\x21\x45\xe9\xa9\xd6\xaa\x75\x37\x6d\x83\x77\xa8\x85\x12\xb3\xc0\x7f\x72\x41\x48\x21\xdc\xbd\xe9\x8c\x66"] = CTInfo($description="TrustAsia Log2025a", $operator="TrustAsia", $url="https://ct2025-a.trustasia.com/log2025a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x70\xe5\xb1\xa4\x09\x79\x2b\x9d\xf8\xa3\xa0\xdf\x18\xef\x95\x5d\x03\x6c\x7b\xa1\x91\xa9\xb8\x80\x7d\xec\x5c\x02\x08\xe2\x6e\x2f\x7c\x32\x70\xbd\x96\x84\x5f\xa6\x62\xe9\x65\xb5\x7c\x90\x58\xba\x22\xd5\xf9\xf5\x69\x54\xb7\xa8\x94\x4e\x32\x09\xae\x26\x11\x4d"),
|
||||||
["\x28\x2c\x8b\xdd\x81\x0f\xf9\x09\x12\x0a\xce\x16\xd6\xe0\xec\x20\x1b\xea\x82\xa3\xa4\xaf\x19\xd9\xef\xfb\x59\xe8\x3f\xdc\x42\x68"] = CTInfo($description="TrustAsia Log2025b", $operator="TrustAsia", $url="https://ct2025-b.trustasia.com/log2025b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaa\xa0\x8b\xdb\x67\x14\x5d\x97\x89\x1d\x08\x8d\x06\xd7\xc1\x94\x8e\xb0\xfa\x4c\x46\xd5\x53\x08\x78\x2b\x04\x53\x6c\xf3\xde\xb1\xd1\x53\x40\xda\x90\x57\xe6\x1a\x9e\x3c\xc7\x03\xb8\xbd\x2f\xa9\xcf\xe8\x7b\x5e\xe1\x4b\x60\xe5\x38\x43\x60\x97\xc1\x5b\x2f\x65"),
|
["\x28\x2c\x8b\xdd\x81\x0f\xf9\x09\x12\x0a\xce\x16\xd6\xe0\xec\x20\x1b\xea\x82\xa3\xa4\xaf\x19\xd9\xef\xfb\x59\xe8\x3f\xdc\x42\x68"] = CTInfo($description="TrustAsia Log2025b", $operator="TrustAsia", $url="https://ct2025-b.trustasia.com/log2025b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaa\xa0\x8b\xdb\x67\x14\x5d\x97\x89\x1d\x08\x8d\x06\xd7\xc1\x94\x8e\xb0\xfa\x4c\x46\xd5\x53\x08\x78\x2b\x04\x53\x6c\xf3\xde\xb1\xd1\x53\x40\xda\x90\x57\xe6\x1a\x9e\x3c\xc7\x03\xb8\xbd\x2f\xa9\xcf\xe8\x7b\x5e\xe1\x4b\x60\xe5\x38\x43\x60\x97\xc1\x5b\x2f\x65"),
|
||||||
|
["\x74\xdb\x9d\x58\xf7\xd4\x7e\x9d\xfd\x78\x7a\x16\x2a\x99\x1c\x18\xcf\x69\x8d\xa7\xc7\x29\x91\x8c\x9a\x18\xb0\x45\x0d\xba\x44\xbc"] = CTInfo($description="TrustAsia 'log2026a'", $operator="TrustAsia", $url="https://ct2026-a.trustasia.com/log2026a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x4e\x7a\xc9\xa6\x07\xf9\xff\x74\xec\x98\xcb\x49\xe1\x00\x24\xb3\x59\x2e\x83\xfd\xc0\x70\x35\x33\x4c\x63\xca\x74\x83\xc0\x3c\x5b\x53\x40\x7c\x31\x1f\x35\xa4\x5f\x0f\xe4\xee\x4f\x89\x17\xe8\x5b\x2e\xc5\xac\x00\x05\xc9\x76\x37\x45\x97\x03\x15\xff\x60\x59"),
|
||||||
|
["\x25\xb7\xef\xde\xa1\x13\x01\x93\xed\x93\x07\x97\x70\xaa\x32\x2a\x26\x62\x0d\xe3\x5a\xc8\xaa\x7c\x75\x19\x7d\xe0\xb1\xa9\xe0\x65"] = CTInfo($description="TrustAsia 'log2026b'", $operator="TrustAsia", $url="https://ct2026-b.trustasia.com/log2026b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x0f\x12\x8c\xa9\xe6\xe3\xec\x62\xee\xdf\x58\xc8\x50\xe6\x26\x70\x76\x10\xb7\x04\x39\xb3\xa7\xf8\x4c\x73\x3b\xc3\x38\x5a\x12\x00\x4c\xe0\xda\x0e\x16\x8a\x45\x32\x0a\x31\xaa\x22\xc7\x9d\x7d\x05\x53\xc7\x9e\x94\xea\x9b\x57\x46\xbf\x4f\xa4\x7e\xfb\xdf\xfa\x85"),
|
||||||
};
|
};
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -65,13 +65,11 @@ export {
|
||||||
## cluster nodes that need them and don't have them explicitly specified
|
## cluster nodes that need them and don't have them explicitly specified
|
||||||
## in cluster configurations.
|
## in cluster configurations.
|
||||||
const auto_assign_broker_ports = T &redef;
|
const auto_assign_broker_ports = T &redef;
|
||||||
const auto_assign_ports = T &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_ports.";
|
|
||||||
|
|
||||||
## The TCP start port to use for auto-assigning cluster node listening
|
## The TCP start port to use for auto-assigning cluster node listening
|
||||||
## ports, if :zeek:see:`Management::Controller::auto_assign_broker_ports` is
|
## ports, if :zeek:see:`Management::Controller::auto_assign_broker_ports` is
|
||||||
## enabled (the default) and nodes don't come with those ports assigned.
|
## enabled (the default) and nodes don't come with those ports assigned.
|
||||||
const auto_assign_broker_start_port = 2200/tcp &redef;
|
const auto_assign_broker_start_port = 2200/tcp &redef;
|
||||||
const auto_assign_start_port = 2200/tcp &redef &deprecated="Remove in v7.1: replaced by auto_assign_broker_start_port.";
|
|
||||||
|
|
||||||
## Whether the controller should auto-assign metrics ports for Prometheus
|
## Whether the controller should auto-assign metrics ports for Prometheus
|
||||||
## to nodes that need them and don't have them explicitly specified in
|
## to nodes that need them and don't have them explicitly specified in
|
||||||
|
|
|
@ -335,12 +335,6 @@ function config_assign_broker_ports(config: Management::Configuration)
|
||||||
# instances.
|
# instances.
|
||||||
local start_port = Management::Controller::auto_assign_broker_start_port;
|
local start_port = Management::Controller::auto_assign_broker_start_port;
|
||||||
|
|
||||||
@pragma push ignore-deprecations
|
|
||||||
# Keep deprecated config setting working until 7.1:
|
|
||||||
if ( Management::Controller::auto_assign_start_port != 2200/tcp )
|
|
||||||
start_port = Management::Controller::auto_assign_start_port;
|
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
|
|
||||||
local p = port_to_count(start_port);
|
local p = port_to_count(start_port);
|
||||||
|
|
||||||
# A set that tracks the ports we've used so far. Helpful for avoiding
|
# A set that tracks the ports we've used so far. Helpful for avoiding
|
||||||
|
@ -613,17 +607,10 @@ function config_validate(config: Management::Configuration,
|
||||||
# ports. Verify this both for Broker's ports and the metrics export
|
# ports. Verify this both for Broker's ports and the metrics export
|
||||||
# ones.
|
# ones.
|
||||||
|
|
||||||
@pragma push ignore-deprecations
|
|
||||||
# Keep deprecated config setting working until 7.1:
|
|
||||||
local auto_broker_ports = Management::Controller::auto_assign_broker_ports;
|
|
||||||
if ( ! Management::Controller::auto_assign_ports )
|
|
||||||
auto_broker_ports = F;
|
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
|
|
||||||
local nodes: vector of string;
|
local nodes: vector of string;
|
||||||
local nodes_str: string;
|
local nodes_str: string;
|
||||||
|
|
||||||
if ( ! auto_broker_ports )
|
if ( ! Management::Controller::auto_assign_broker_ports )
|
||||||
{
|
{
|
||||||
nodes = config_nodes_lacking_broker_ports(config);
|
nodes = config_nodes_lacking_broker_ports(config);
|
||||||
|
|
||||||
|
@ -1042,17 +1029,10 @@ event Management::Controller::API::stage_configuration_request(reqid: string, co
|
||||||
g_configs[STAGED] = config;
|
g_configs[STAGED] = config;
|
||||||
config_copy = copy(config);
|
config_copy = copy(config);
|
||||||
|
|
||||||
@pragma push ignore-deprecations
|
if ( Management::Controller::auto_assign_broker_ports )
|
||||||
# Keep deprecated config setting working until 7.1:
|
|
||||||
local auto_broker_ports = Management::Controller::auto_assign_broker_ports;
|
|
||||||
if ( ! Management::Controller::auto_assign_ports )
|
|
||||||
auto_broker_ports = F;
|
|
||||||
|
|
||||||
if ( auto_broker_ports )
|
|
||||||
config_assign_broker_ports(config_copy);
|
config_assign_broker_ports(config_copy);
|
||||||
if ( Management::Controller::auto_assign_metrics_ports )
|
if ( Management::Controller::auto_assign_metrics_ports )
|
||||||
config_assign_metrics_ports(config_copy);
|
config_assign_metrics_ports(config_copy);
|
||||||
@pragma pop ignore-deprecations
|
|
||||||
|
|
||||||
g_configs[READY] = config_copy;
|
g_configs[READY] = config_copy;
|
||||||
|
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
@deprecated "Remove in v7.1: Cluster nodes now implicitly listen on metrics port if set in cluster-layout."
|
|
||||||
@load base/frameworks/telemetry
|
|
|
@ -1,117 +0,0 @@
|
||||||
##! This script implements the "Zeek side" of several load balancing
|
|
||||||
##! approaches for Zeek clusters.
|
|
||||||
|
|
||||||
@deprecated "Remove in v7.1. This script has not seen extensions for the past 10 years and is not at all recommended to use for packet load balancing purposes. On Linux, AF_PACKET is recommended and works out of the box. On FreeBSD, there is Netmap with lb. Otherwise, NIC specific packet sources and approaches exist that handle the load balancing."
|
|
||||||
|
|
||||||
@pragma push ignore-deprecations
|
|
||||||
|
|
||||||
@load base/frameworks/cluster
|
|
||||||
@load base/frameworks/packet-filter
|
|
||||||
|
|
||||||
module LoadBalancing;
|
|
||||||
|
|
||||||
export {
|
|
||||||
|
|
||||||
type Method: enum {
|
|
||||||
## Apply BPF filters to each worker in a way that causes them to
|
|
||||||
## automatically flow balance traffic between them.
|
|
||||||
AUTO_BPF,
|
|
||||||
};
|
|
||||||
|
|
||||||
## Defines the method of load balancing to use.
|
|
||||||
const method = AUTO_BPF &redef;
|
|
||||||
|
|
||||||
redef record Cluster::Node += {
|
|
||||||
## A BPF filter for load balancing traffic sniffed on a single
|
|
||||||
## interface across a number of processes. In normal uses, this
|
|
||||||
## will be assigned dynamically by the manager and installed by
|
|
||||||
## the workers.
|
|
||||||
lb_filter: string &optional;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
@if ( Cluster::is_enabled() )
|
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
|
||||||
{
|
|
||||||
if ( method != AUTO_BPF )
|
|
||||||
return;
|
|
||||||
|
|
||||||
local worker_ip_interface: table[addr, string] of count = table();
|
|
||||||
local sorted_node_names: vector of string = vector();
|
|
||||||
local node: Cluster::Node;
|
|
||||||
local name: string;
|
|
||||||
|
|
||||||
# Sort nodes list so that every node iterates over it in same order.
|
|
||||||
for ( name in Cluster::nodes )
|
|
||||||
sorted_node_names += name;
|
|
||||||
|
|
||||||
sort(sorted_node_names, strcmp);
|
|
||||||
|
|
||||||
for ( idx in sorted_node_names )
|
|
||||||
{
|
|
||||||
name = sorted_node_names[idx];
|
|
||||||
node = Cluster::nodes[name];
|
|
||||||
|
|
||||||
if ( node$node_type != Cluster::WORKER )
|
|
||||||
next;
|
|
||||||
|
|
||||||
if ( ! node?$interface )
|
|
||||||
next;
|
|
||||||
|
|
||||||
if ( [node$ip, node$interface] !in worker_ip_interface )
|
|
||||||
worker_ip_interface[node$ip, node$interface] = 0;
|
|
||||||
|
|
||||||
++worker_ip_interface[node$ip, node$interface];
|
|
||||||
}
|
|
||||||
|
|
||||||
# Now that we've counted up how many processes are running per
|
|
||||||
# interface, let's create the filters for each worker.
|
|
||||||
local lb_proc_track: table[addr, string] of count = table();
|
|
||||||
|
|
||||||
for ( idx in sorted_node_names )
|
|
||||||
{
|
|
||||||
name = sorted_node_names[idx];
|
|
||||||
node = Cluster::nodes[name];
|
|
||||||
|
|
||||||
if ( node$node_type != Cluster::WORKER )
|
|
||||||
next;
|
|
||||||
|
|
||||||
if ( ! node?$interface )
|
|
||||||
next;
|
|
||||||
|
|
||||||
if ( [node$ip, node$interface] !in worker_ip_interface )
|
|
||||||
next;
|
|
||||||
|
|
||||||
if ( [node$ip, node$interface] !in lb_proc_track )
|
|
||||||
lb_proc_track[node$ip, node$interface] = 0;
|
|
||||||
|
|
||||||
local this_lb_proc = lb_proc_track[node$ip, node$interface];
|
|
||||||
local total_lb_procs = worker_ip_interface[node$ip, node$interface];
|
|
||||||
++lb_proc_track[node$ip, node$interface];
|
|
||||||
|
|
||||||
if ( total_lb_procs > 1 )
|
|
||||||
node$lb_filter = PacketFilter::sampling_filter(total_lb_procs,
|
|
||||||
this_lb_proc);
|
|
||||||
}
|
|
||||||
|
|
||||||
# Finally, install filter for the current node if it needs one.
|
|
||||||
for ( idx in sorted_node_names )
|
|
||||||
{
|
|
||||||
name = sorted_node_names[idx];
|
|
||||||
node = Cluster::nodes[name];
|
|
||||||
|
|
||||||
if ( name != Cluster::node )
|
|
||||||
next;
|
|
||||||
|
|
||||||
if ( ! node?$lb_filter )
|
|
||||||
next;
|
|
||||||
|
|
||||||
restrict_filters["lb_filter"] = node$lb_filter;
|
|
||||||
PacketFilter::install();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@endif
|
|
||||||
|
|
||||||
@pragma pop
|
|
|
@ -1,2 +0,0 @@
|
||||||
##! This loads the default tuning
|
|
||||||
@load ./defaults
|
|
|
@ -1,2 +0,0 @@
|
||||||
Sets various defaults, and prints warning messages to stdout under
|
|
||||||
certain conditions.
|
|
|
@ -1 +0,0 @@
|
||||||
@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general.";
|
|
|
@ -1 +0,0 @@
|
||||||
@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general.";
|
|
|
@ -1 +0,0 @@
|
||||||
@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general.";
|
|
|
@ -1 +0,0 @@
|
||||||
@deprecated "Remove in v7.1 The policy/tuning/defaults package is deprecated. The options set here are now the defaults for Zeek in general.";
|
|
|
@ -18,7 +18,7 @@ type ZeekTypeTag = enum {
|
||||||
} &cxxname="::zeek::spicy::rt::ZeekTypeTag";
|
} &cxxname="::zeek::spicy::rt::ZeekTypeTag";
|
||||||
|
|
||||||
declare public void register_spicy_module_begin(string name, string description) &cxxname="zeek::spicy::rt::register_spicy_module_begin";
|
declare public void register_spicy_module_begin(string name, string description) &cxxname="zeek::spicy::rt::register_spicy_module_begin";
|
||||||
declare public void register_protocol_analyzer(string name, hilti::Protocol protocol, vector<PortRange> ports, string parser_orig, string parser_resp, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_protocol_analyzer" &have_prototype;
|
declare public void register_protocol_analyzer(string name, hilti::Protocol protocol, string parser_orig, string parser_resp, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_protocol_analyzer" &have_prototype;
|
||||||
declare public void register_file_analyzer(string name, vector<string> mime_types, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_file_analyzer" &have_prototype;
|
declare public void register_file_analyzer(string name, vector<string> mime_types, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_file_analyzer" &have_prototype;
|
||||||
declare public void register_packet_analyzer(string name, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_packet_analyzer" &have_prototype;
|
declare public void register_packet_analyzer(string name, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_packet_analyzer" &have_prototype;
|
||||||
declare public void register_type(string ns, string id, BroType t) &cxxname="zeek::spicy::rt::register_type" &have_prototype;
|
declare public void register_type(string ns, string id, BroType t) &cxxname="zeek::spicy::rt::register_type" &have_prototype;
|
||||||
|
|
|
@ -78,7 +78,6 @@
|
||||||
# @load frameworks/spicy/record-spicy-batch.zeek
|
# @load frameworks/spicy/record-spicy-batch.zeek
|
||||||
# @load frameworks/spicy/resource-usage.zeek
|
# @load frameworks/spicy/resource-usage.zeek
|
||||||
@load frameworks/software/windows-version-detection.zeek
|
@load frameworks/software/windows-version-detection.zeek
|
||||||
@load frameworks/telemetry/prometheus.zeek
|
|
||||||
@load frameworks/telemetry/log.zeek
|
@load frameworks/telemetry/log.zeek
|
||||||
@load integration/collective-intel/__load__.zeek
|
@load integration/collective-intel/__load__.zeek
|
||||||
@load integration/collective-intel/main.zeek
|
@load integration/collective-intel/main.zeek
|
||||||
|
@ -86,7 +85,6 @@
|
||||||
@load misc/detect-traceroute/__load__.zeek
|
@load misc/detect-traceroute/__load__.zeek
|
||||||
@load misc/detect-traceroute/main.zeek
|
@load misc/detect-traceroute/main.zeek
|
||||||
# @load misc/dump-events.zeek
|
# @load misc/dump-events.zeek
|
||||||
@load misc/load-balancing.zeek
|
|
||||||
@load misc/loaded-scripts.zeek
|
@load misc/loaded-scripts.zeek
|
||||||
@load misc/profiling.zeek
|
@load misc/profiling.zeek
|
||||||
@load misc/stats.zeek
|
@load misc/stats.zeek
|
||||||
|
@ -142,10 +140,5 @@
|
||||||
@load protocols/ssl/validate-ocsp.zeek
|
@load protocols/ssl/validate-ocsp.zeek
|
||||||
@load protocols/ssl/validate-sct.zeek
|
@load protocols/ssl/validate-sct.zeek
|
||||||
@load protocols/ssl/weak-keys.zeek
|
@load protocols/ssl/weak-keys.zeek
|
||||||
@load tuning/__load__.zeek
|
|
||||||
@load tuning/defaults/__load__.zeek
|
|
||||||
@load tuning/defaults/extracted_file_limits.zeek
|
|
||||||
@load tuning/defaults/packet-fragments.zeek
|
|
||||||
@load tuning/defaults/warnings.zeek
|
|
||||||
@load tuning/json-logs.zeek
|
@load tuning/json-logs.zeek
|
||||||
@load tuning/track-all-assets.zeek
|
@load tuning/track-all-assets.zeek
|
||||||
|
|
|
@ -636,6 +636,8 @@ install(
|
||||||
# https://gitlab.kitware.com/cmake/cmake/-/issues/17122 Exclude the ones that
|
# https://gitlab.kitware.com/cmake/cmake/-/issues/17122 Exclude the ones that
|
||||||
# this affects explicitly.
|
# this affects explicitly.
|
||||||
PATTERN "script_opt/CPP/maint" EXCLUDE
|
PATTERN "script_opt/CPP/maint" EXCLUDE
|
||||||
|
PATTERN "script_opt/ZAM/maint" EXCLUDE
|
||||||
|
PATTERN "script_opt/ZAM/OPs" EXCLUDE
|
||||||
PATTERN "fuzzers/corpora" EXCLUDE)
|
PATTERN "fuzzers/corpora" EXCLUDE)
|
||||||
|
|
||||||
install(
|
install(
|
||||||
|
|
|
@ -258,15 +258,6 @@ analyzer::Analyzer* Connection::FindAnalyzer(const zeek::Tag& tag) {
|
||||||
|
|
||||||
analyzer::Analyzer* Connection::FindAnalyzer(const char* name) { return adapter->FindChild(name); }
|
analyzer::Analyzer* Connection::FindAnalyzer(const char* name) { return adapter->FindChild(name); }
|
||||||
|
|
||||||
void Connection::AppendAddl(const char* str) {
|
|
||||||
const auto& cv = GetVal();
|
|
||||||
|
|
||||||
const char* old = cv->GetFieldAs<StringVal>(6)->CheckString();
|
|
||||||
const char* format = *old ? "%s %s" : "%s%s";
|
|
||||||
|
|
||||||
cv->Assign(6, util::fmt(format, old, str));
|
|
||||||
}
|
|
||||||
|
|
||||||
void Connection::Match(detail::Rule::PatternType type, const u_char* data, int len, bool is_orig, bool bol, bool eol,
|
void Connection::Match(detail::Rule::PatternType type, const u_char* data, int len, bool is_orig, bool bol, bool eol,
|
||||||
bool clear_state) {
|
bool clear_state) {
|
||||||
if ( primary_PIA )
|
if ( primary_PIA )
|
||||||
|
|
|
@ -145,12 +145,6 @@ public:
|
||||||
*/
|
*/
|
||||||
const RecordValPtr& GetVal() override;
|
const RecordValPtr& GetVal() override;
|
||||||
|
|
||||||
/**
|
|
||||||
* Append additional entries to the history field in the connection record.
|
|
||||||
*/
|
|
||||||
[[deprecated("Remove in v7.1 - Appears unused and named rough. Use CheckHistory() or AddHistory() instead.")]] void
|
|
||||||
AppendAddl(const char* str);
|
|
||||||
|
|
||||||
void Match(detail::Rule::PatternType type, const u_char* data, int len, bool is_orig, bool bol, bool eol,
|
void Match(detail::Rule::PatternType type, const u_char* data, int len, bool is_orig, bool bol, bool eol,
|
||||||
bool clear_state);
|
bool clear_state);
|
||||||
|
|
||||||
|
|
105
src/DNS_Mgr.cc
105
src/DNS_Mgr.cc
|
@ -45,6 +45,7 @@ using ztd::out_ptr::out_ptr;
|
||||||
#include "zeek/Val.h"
|
#include "zeek/Val.h"
|
||||||
#include "zeek/ZeekString.h"
|
#include "zeek/ZeekString.h"
|
||||||
#include "zeek/iosource/Manager.h"
|
#include "zeek/iosource/Manager.h"
|
||||||
|
#include "zeek/telemetry/Manager.h"
|
||||||
|
|
||||||
// Number of seconds we'll wait for a reply.
|
// Number of seconds we'll wait for a reply.
|
||||||
constexpr int DNS_TIMEOUT = 5;
|
constexpr int DNS_TIMEOUT = 5;
|
||||||
|
@ -545,6 +546,55 @@ void DNS_Mgr::InitSource() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void DNS_Mgr::InitPostScript() {
|
void DNS_Mgr::InitPostScript() {
|
||||||
|
num_requests_metric =
|
||||||
|
telemetry_mgr->CounterInstance("zeek", "dnsmgr_requests", {}, "Total number of requests through DNS_Mgr");
|
||||||
|
successful_metric = telemetry_mgr->CounterInstance("zeek", "dnsmgr_successful_requests", {},
|
||||||
|
"Total number of successful requests through DNS_Mgr");
|
||||||
|
failed_metric = telemetry_mgr->CounterInstance("zeek", "dnsmgr_failed_requests", {},
|
||||||
|
"Total number of failed requests through DNS_Mgr");
|
||||||
|
asyncs_pending_metric = telemetry_mgr->GaugeInstance("zeek", "dnsmgr_pending_asyncs_requests", {},
|
||||||
|
"Number of pending async requests through DNS_Mgr");
|
||||||
|
|
||||||
|
cached_hosts_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "dnsmgr_cache_entries", {{"type", "host"}},
|
||||||
|
"Number of cached hosts in DNS_Mgr", "", []() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = 0;
|
||||||
|
|
||||||
|
if ( dns_mgr ) {
|
||||||
|
dns_mgr->UpdateCachedStats(false);
|
||||||
|
metric.gauge.value = static_cast<double>(dns_mgr->last_cached_stats.hosts);
|
||||||
|
}
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
cached_addresses_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "dnsmgr_cache_entries", {{"type", "address"}},
|
||||||
|
"Number of cached addresses in DNS_Mgr", "", []() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = 0;
|
||||||
|
|
||||||
|
if ( dns_mgr ) {
|
||||||
|
dns_mgr->UpdateCachedStats(false);
|
||||||
|
metric.gauge.value =
|
||||||
|
static_cast<double>(dns_mgr->last_cached_stats.addresses);
|
||||||
|
}
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
cached_texts_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "dnsmgr_cache_entries", {{"type", "text"}},
|
||||||
|
"Number of cached texts in DNS_Mgr", "", []() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = 0;
|
||||||
|
|
||||||
|
if ( dns_mgr ) {
|
||||||
|
dns_mgr->UpdateCachedStats(false);
|
||||||
|
metric.gauge.value = static_cast<double>(dns_mgr->last_cached_stats.texts);
|
||||||
|
}
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
if ( ! doctest::is_running_in_test ) {
|
if ( ! doctest::is_running_in_test ) {
|
||||||
dm_rec = id::find_type<RecordType>("dns_mapping");
|
dm_rec = id::find_type<RecordType>("dns_mapping");
|
||||||
|
|
||||||
|
@ -1158,7 +1208,7 @@ void DNS_Mgr::IssueAsyncRequests() {
|
||||||
AsyncRequest* req = asyncs_queued.front();
|
AsyncRequest* req = asyncs_queued.front();
|
||||||
asyncs_queued.pop_front();
|
asyncs_queued.pop_front();
|
||||||
|
|
||||||
++num_requests;
|
num_requests_metric->Inc();
|
||||||
req->time = util::current_time();
|
req->time = util::current_time();
|
||||||
|
|
||||||
if ( req->type == T_PTR )
|
if ( req->type == T_PTR )
|
||||||
|
@ -1173,6 +1223,7 @@ void DNS_Mgr::IssueAsyncRequests() {
|
||||||
dns_req->MakeRequest(channel, this);
|
dns_req->MakeRequest(channel, this);
|
||||||
|
|
||||||
++asyncs_pending;
|
++asyncs_pending;
|
||||||
|
asyncs_pending_metric->Inc();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1182,11 +1233,11 @@ void DNS_Mgr::CheckAsyncHostRequest(const std::string& host, bool timeout) {
|
||||||
|
|
||||||
if ( i != asyncs.end() ) {
|
if ( i != asyncs.end() ) {
|
||||||
if ( timeout ) {
|
if ( timeout ) {
|
||||||
++failed;
|
failed_metric->Inc();
|
||||||
i->second->Timeout();
|
i->second->Timeout();
|
||||||
}
|
}
|
||||||
else if ( auto addrs = LookupNameInCache(host, true, false) ) {
|
else if ( auto addrs = LookupNameInCache(host, true, false) ) {
|
||||||
++successful;
|
successful_metric->Inc();
|
||||||
i->second->Resolved(addrs);
|
i->second->Resolved(addrs);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1195,6 +1246,7 @@ void DNS_Mgr::CheckAsyncHostRequest(const std::string& host, bool timeout) {
|
||||||
delete i->second;
|
delete i->second;
|
||||||
asyncs.erase(i);
|
asyncs.erase(i);
|
||||||
--asyncs_pending;
|
--asyncs_pending;
|
||||||
|
asyncs_pending_metric->Dec();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1207,11 +1259,11 @@ void DNS_Mgr::CheckAsyncAddrRequest(const IPAddr& addr, bool timeout) {
|
||||||
|
|
||||||
if ( i != asyncs.end() ) {
|
if ( i != asyncs.end() ) {
|
||||||
if ( timeout ) {
|
if ( timeout ) {
|
||||||
++failed;
|
failed_metric->Inc();
|
||||||
i->second->Timeout();
|
i->second->Timeout();
|
||||||
}
|
}
|
||||||
else if ( auto name = LookupAddrInCache(addr, true, false) ) {
|
else if ( auto name = LookupAddrInCache(addr, true, false) ) {
|
||||||
++successful;
|
successful_metric->Inc();
|
||||||
i->second->Resolved(name->CheckString());
|
i->second->Resolved(name->CheckString());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1220,6 +1272,7 @@ void DNS_Mgr::CheckAsyncAddrRequest(const IPAddr& addr, bool timeout) {
|
||||||
delete i->second;
|
delete i->second;
|
||||||
asyncs.erase(i);
|
asyncs.erase(i);
|
||||||
--asyncs_pending;
|
--asyncs_pending;
|
||||||
|
asyncs_pending_metric->Dec();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1229,11 +1282,11 @@ void DNS_Mgr::CheckAsyncOtherRequest(const std::string& host, bool timeout, int
|
||||||
auto i = asyncs.find(std::make_pair(request_type, host));
|
auto i = asyncs.find(std::make_pair(request_type, host));
|
||||||
if ( i != asyncs.end() ) {
|
if ( i != asyncs.end() ) {
|
||||||
if ( timeout ) {
|
if ( timeout ) {
|
||||||
++failed;
|
failed_metric->Inc();
|
||||||
i->second->Timeout();
|
i->second->Timeout();
|
||||||
}
|
}
|
||||||
else if ( auto name = LookupOtherInCache(host, request_type, true) ) {
|
else if ( auto name = LookupOtherInCache(host, request_type, true) ) {
|
||||||
++successful;
|
successful_metric->Inc();
|
||||||
i->second->Resolved(name->CheckString());
|
i->second->Resolved(name->CheckString());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1242,6 +1295,7 @@ void DNS_Mgr::CheckAsyncOtherRequest(const std::string& host, bool timeout, int
|
||||||
delete i->second;
|
delete i->second;
|
||||||
asyncs.erase(i);
|
asyncs.erase(i);
|
||||||
--asyncs_pending;
|
--asyncs_pending;
|
||||||
|
asyncs_pending_metric->Dec();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1293,26 +1347,35 @@ void DNS_Mgr::Process() {
|
||||||
ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
|
ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DNS_Mgr::GetStats(Stats* stats) {
|
void DNS_Mgr::UpdateCachedStats(bool force) {
|
||||||
// TODO: can this use the telemetry framework?
|
double now = util::current_time();
|
||||||
stats->requests = num_requests;
|
if ( force || last_cached_stats_update < now - 0.01 ) {
|
||||||
stats->successful = successful;
|
last_cached_stats.hosts = 0;
|
||||||
stats->failed = failed;
|
last_cached_stats.addresses = 0;
|
||||||
stats->pending = asyncs_pending;
|
last_cached_stats.texts = 0;
|
||||||
|
last_cached_stats.total = all_mappings.size();
|
||||||
stats->cached_hosts = 0;
|
|
||||||
stats->cached_addresses = 0;
|
|
||||||
stats->cached_texts = 0;
|
|
||||||
stats->cached_total = all_mappings.size();
|
|
||||||
|
|
||||||
for ( const auto& [key, mapping] : all_mappings ) {
|
for ( const auto& [key, mapping] : all_mappings ) {
|
||||||
if ( mapping->ReqType() == T_PTR )
|
if ( mapping->ReqType() == T_PTR )
|
||||||
stats->cached_addresses++;
|
last_cached_stats.addresses++;
|
||||||
else if ( mapping->ReqType() == T_A )
|
else if ( mapping->ReqType() == T_A )
|
||||||
stats->cached_hosts++;
|
last_cached_stats.hosts++;
|
||||||
else
|
else
|
||||||
stats->cached_texts++;
|
last_cached_stats.texts++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
last_cached_stats_update = now;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DNS_Mgr::GetStats(Stats* stats) {
|
||||||
|
stats->requests = static_cast<unsigned long>(num_requests_metric->Value());
|
||||||
|
stats->successful = static_cast<unsigned long>(successful_metric->Value());
|
||||||
|
stats->failed = static_cast<unsigned long>(failed_metric->Value());
|
||||||
|
stats->pending = asyncs_pending;
|
||||||
|
|
||||||
|
UpdateCachedStats(true);
|
||||||
|
stats->cached = last_cached_stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DNS_Mgr::AsyncRequest::Resolved(const std::string& name) {
|
void DNS_Mgr::AsyncRequest::Resolved(const std::string& name) {
|
||||||
|
|
|
@ -42,6 +42,13 @@ using TableValPtr = IntrusivePtr<TableVal>;
|
||||||
using StringValPtr = IntrusivePtr<StringVal>;
|
using StringValPtr = IntrusivePtr<StringVal>;
|
||||||
using RecordValPtr = IntrusivePtr<RecordVal>;
|
using RecordValPtr = IntrusivePtr<RecordVal>;
|
||||||
|
|
||||||
|
namespace telemetry {
|
||||||
|
class Gauge;
|
||||||
|
class Counter;
|
||||||
|
using GaugePtr = std::shared_ptr<Gauge>;
|
||||||
|
using CounterPtr = std::shared_ptr<Counter>;
|
||||||
|
} // namespace telemetry
|
||||||
|
|
||||||
} // namespace zeek
|
} // namespace zeek
|
||||||
|
|
||||||
namespace zeek::detail {
|
namespace zeek::detail {
|
||||||
|
@ -198,15 +205,19 @@ public:
|
||||||
*/
|
*/
|
||||||
bool Save();
|
bool Save();
|
||||||
|
|
||||||
|
struct CachedStats {
|
||||||
|
unsigned long hosts;
|
||||||
|
unsigned long addresses;
|
||||||
|
unsigned long texts;
|
||||||
|
unsigned long total;
|
||||||
|
};
|
||||||
|
|
||||||
struct Stats {
|
struct Stats {
|
||||||
unsigned long requests; // These count only async requests.
|
unsigned long requests; // These count only async requests.
|
||||||
unsigned long successful;
|
unsigned long successful;
|
||||||
unsigned long failed;
|
unsigned long failed;
|
||||||
unsigned long pending;
|
unsigned long pending;
|
||||||
unsigned long cached_hosts;
|
CachedStats cached;
|
||||||
unsigned long cached_addresses;
|
|
||||||
unsigned long cached_texts;
|
|
||||||
unsigned long cached_total;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -285,6 +296,8 @@ protected:
|
||||||
const char* Tag() override { return "DNS_Mgr"; }
|
const char* Tag() override { return "DNS_Mgr"; }
|
||||||
double GetNextTimeout() override;
|
double GetNextTimeout() override;
|
||||||
|
|
||||||
|
void UpdateCachedStats(bool force);
|
||||||
|
|
||||||
DNS_MgrMode mode;
|
DNS_MgrMode mode;
|
||||||
|
|
||||||
MappingMap all_mappings;
|
MappingMap all_mappings;
|
||||||
|
@ -293,7 +306,6 @@ protected:
|
||||||
std::string dir; // directory in which cache_name resides
|
std::string dir; // directory in which cache_name resides
|
||||||
|
|
||||||
bool did_init = false;
|
bool did_init = false;
|
||||||
int asyncs_pending = 0;
|
|
||||||
|
|
||||||
RecordTypePtr dm_rec;
|
RecordTypePtr dm_rec;
|
||||||
|
|
||||||
|
@ -327,9 +339,19 @@ protected:
|
||||||
using QueuedList = std::list<AsyncRequest*>;
|
using QueuedList = std::list<AsyncRequest*>;
|
||||||
QueuedList asyncs_queued;
|
QueuedList asyncs_queued;
|
||||||
|
|
||||||
unsigned long num_requests = 0;
|
telemetry::CounterPtr num_requests_metric;
|
||||||
unsigned long successful = 0;
|
telemetry::CounterPtr successful_metric;
|
||||||
unsigned long failed = 0;
|
telemetry::CounterPtr failed_metric;
|
||||||
|
telemetry::GaugePtr asyncs_pending_metric;
|
||||||
|
|
||||||
|
telemetry::GaugePtr cached_hosts_metric;
|
||||||
|
telemetry::GaugePtr cached_addresses_metric;
|
||||||
|
telemetry::GaugePtr cached_texts_metric;
|
||||||
|
|
||||||
|
double last_cached_stats_update = 0;
|
||||||
|
CachedStats last_cached_stats;
|
||||||
|
|
||||||
|
int asyncs_pending = 0;
|
||||||
|
|
||||||
std::set<int> socket_fds;
|
std::set<int> socket_fds;
|
||||||
std::set<int> write_socket_fds;
|
std::set<int> write_socket_fds;
|
||||||
|
|
|
@ -44,13 +44,6 @@ public:
|
||||||
// Returns true if there is at least one local or remote handler.
|
// Returns true if there is at least one local or remote handler.
|
||||||
explicit operator bool() const;
|
explicit operator bool() const;
|
||||||
|
|
||||||
[[deprecated("Remove in v7.1 - Unused event handlers are now found via UsageAnalyzer.")]] void SetUsed() {
|
|
||||||
used = true;
|
|
||||||
}
|
|
||||||
[[deprecated("Remove in v7.1 - Unused event handlers are now found via UsageAnalyzer.")]] bool Used() const {
|
|
||||||
return used;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handlers marked as error handlers will not be called recursively to
|
// Handlers marked as error handlers will not be called recursively to
|
||||||
// avoid infinite loops if they trigger a similar error themselves.
|
// avoid infinite loops if they trigger a similar error themselves.
|
||||||
void SetErrorHandler() { error_handler = true; }
|
void SetErrorHandler() { error_handler = true; }
|
||||||
|
|
|
@ -21,23 +21,12 @@ EventHandlerPtr EventRegistry::Register(std::string_view name, bool is_from_scri
|
||||||
if ( ! is_from_script )
|
if ( ! is_from_script )
|
||||||
not_only_from_script.insert(std::string(name));
|
not_only_from_script.insert(std::string(name));
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
// Remove in v7.1
|
|
||||||
h->SetUsed();
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
h = new EventHandler(std::string(name));
|
h = new EventHandler(std::string(name));
|
||||||
event_registry->Register(h, is_from_script);
|
event_registry->Register(h, is_from_script);
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
// Remove in v7.1
|
|
||||||
h->SetUsed();
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
|
|
||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,36 +63,6 @@ EventRegistry::string_list EventRegistry::Match(RE_Matcher* pattern) {
|
||||||
return names;
|
return names;
|
||||||
}
|
}
|
||||||
|
|
||||||
EventRegistry::string_list EventRegistry::UnusedHandlers() {
|
|
||||||
string_list names;
|
|
||||||
|
|
||||||
for ( const auto& entry : handlers ) {
|
|
||||||
EventHandler* v = entry.second.get();
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
if ( v->GetFunc() && ! v->Used() )
|
|
||||||
names.push_back(entry.first);
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
}
|
|
||||||
|
|
||||||
return names;
|
|
||||||
}
|
|
||||||
|
|
||||||
EventRegistry::string_list EventRegistry::UsedHandlers() {
|
|
||||||
string_list names;
|
|
||||||
|
|
||||||
for ( const auto& entry : handlers ) {
|
|
||||||
EventHandler* v = entry.second.get();
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
if ( v->GetFunc() && v->Used() )
|
|
||||||
names.push_back(entry.first);
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
}
|
|
||||||
|
|
||||||
return names;
|
|
||||||
}
|
|
||||||
|
|
||||||
EventRegistry::string_list EventRegistry::AllHandlers() {
|
EventRegistry::string_list EventRegistry::AllHandlers() {
|
||||||
string_list names;
|
string_list names;
|
||||||
|
|
||||||
|
|
|
@ -69,8 +69,6 @@ public:
|
||||||
// themselves.
|
// themselves.
|
||||||
void SetErrorHandler(std::string_view name);
|
void SetErrorHandler(std::string_view name);
|
||||||
|
|
||||||
[[deprecated("Remove in v7.1 - Unused handlers are now found via UsageAnalyzer.")]] string_list UnusedHandlers();
|
|
||||||
[[deprecated("Remove in v7.1 - UsedHandlers() is unreliable - use AllHandlers().")]] string_list UsedHandlers();
|
|
||||||
string_list AllHandlers();
|
string_list AllHandlers();
|
||||||
|
|
||||||
void PrintDebug();
|
void PrintDebug();
|
||||||
|
|
12
src/Expr.cc
12
src/Expr.cc
|
@ -414,13 +414,6 @@ NameExpr::NameExpr(IDPtr arg_id, bool const_init) : Expr(EXPR_NAME), id(std::mov
|
||||||
SetType(make_intrusive<TypeType>(id->GetType()));
|
SetType(make_intrusive<TypeType>(id->GetType()));
|
||||||
else
|
else
|
||||||
SetType(id->GetType());
|
SetType(id->GetType());
|
||||||
|
|
||||||
EventHandler* h = event_registry->Lookup(id->Name());
|
|
||||||
if ( h )
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
h->SetUsed();
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NameExpr::CanDel() const {
|
bool NameExpr::CanDel() const {
|
||||||
|
@ -4417,11 +4410,6 @@ EventExpr::EventExpr(const char* arg_name, ListExprPtr arg_args)
|
||||||
event_registry->Register(h, true);
|
event_registry->Register(h, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
h->SetUsed();
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
|
|
||||||
handler = h;
|
handler = h;
|
||||||
|
|
||||||
if ( args->IsError() ) {
|
if ( args->IsError() ) {
|
||||||
|
|
|
@ -434,7 +434,7 @@ ValPtr ScriptFunc::Invoke(zeek::Args* args, Frame* parent) const {
|
||||||
|
|
||||||
// Warn if the function returns something, but we returned from
|
// Warn if the function returns something, but we returned from
|
||||||
// the function without an explicit return, or without a value.
|
// the function without an explicit return, or without a value.
|
||||||
else if ( GetType()->Yield() && GetType()->Yield()->Tag() != TYPE_VOID &&
|
else if ( GetType()->Yield() && GetType()->Yield()->Tag() != TYPE_VOID && ! GetType()->ExpressionlessReturnOkay() &&
|
||||||
(flow != FLOW_RETURN /* we fell off the end */ || ! result /* explicit return with no result */) &&
|
(flow != FLOW_RETURN /* we fell off the end */ || ! result /* explicit return with no result */) &&
|
||||||
! f->HasDelayed() )
|
! f->HasDelayed() )
|
||||||
reporter->Warning("non-void function returning without a value: %s", Name());
|
reporter->Warning("non-void function returning without a value: %s", Name());
|
||||||
|
|
|
@ -182,8 +182,6 @@ int dpd_match_only_beginning;
|
||||||
int dpd_late_match_stop;
|
int dpd_late_match_stop;
|
||||||
int dpd_ignore_ports;
|
int dpd_ignore_ports;
|
||||||
|
|
||||||
int check_for_unused_event_handlers;
|
|
||||||
|
|
||||||
int record_all_packets;
|
int record_all_packets;
|
||||||
|
|
||||||
zeek_uint_t bits_per_uid;
|
zeek_uint_t bits_per_uid;
|
||||||
|
@ -229,7 +227,6 @@ void init_general_global_var() {
|
||||||
table_incremental_step = id::find_val("table_incremental_step")->AsCount();
|
table_incremental_step = id::find_val("table_incremental_step")->AsCount();
|
||||||
packet_filter_default = id::find_val("packet_filter_default")->AsBool();
|
packet_filter_default = id::find_val("packet_filter_default")->AsBool();
|
||||||
sig_max_group_size = id::find_val("sig_max_group_size")->AsCount();
|
sig_max_group_size = id::find_val("sig_max_group_size")->AsCount();
|
||||||
check_for_unused_event_handlers = id::find_val("check_for_unused_event_handlers")->AsBool();
|
|
||||||
record_all_packets = id::find_val("record_all_packets")->AsBool();
|
record_all_packets = id::find_val("record_all_packets")->AsBool();
|
||||||
bits_per_uid = id::find_val("bits_per_uid")->AsCount();
|
bits_per_uid = id::find_val("bits_per_uid")->AsCount();
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,8 +85,6 @@ extern int dpd_match_only_beginning;
|
||||||
extern int dpd_late_match_stop;
|
extern int dpd_late_match_stop;
|
||||||
extern int dpd_ignore_ports;
|
extern int dpd_ignore_ports;
|
||||||
|
|
||||||
extern int check_for_unused_event_handlers;
|
|
||||||
|
|
||||||
extern int record_all_packets;
|
extern int record_all_packets;
|
||||||
|
|
||||||
extern zeek_uint_t bits_per_uid;
|
extern zeek_uint_t bits_per_uid;
|
||||||
|
|
|
@ -70,12 +70,6 @@ OpaqueValPtr OpaqueMgr::Instantiate(const std::string& id) const {
|
||||||
return x != _types.end() ? (*x->second)() : nullptr;
|
return x != _types.end() ? (*x->second)() : nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
broker::expected<broker::data> OpaqueVal::Serialize() const {
|
|
||||||
if ( auto res = SerializeData() )
|
|
||||||
return zeek::detail::BrokerDataAccess::Unbox(*res);
|
|
||||||
return {broker::make_error(broker::ec::serialization_failed)};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<BrokerData> OpaqueVal::SerializeData() const {
|
std::optional<BrokerData> OpaqueVal::SerializeData() const {
|
||||||
auto type = OpaqueMgr::mgr()->TypeID(this);
|
auto type = OpaqueMgr::mgr()->TypeID(this);
|
||||||
|
|
||||||
|
@ -89,8 +83,6 @@ std::optional<BrokerData> OpaqueVal::SerializeData() const {
|
||||||
return std::move(builder).Build();
|
return std::move(builder).Build();
|
||||||
}
|
}
|
||||||
|
|
||||||
OpaqueValPtr OpaqueVal::Unserialize(const broker::data& data) { return UnserializeData(BrokerDataView(&data)); }
|
|
||||||
|
|
||||||
OpaqueValPtr OpaqueVal::UnserializeData(BrokerDataView data) {
|
OpaqueValPtr OpaqueVal::UnserializeData(BrokerDataView data) {
|
||||||
if ( ! data.IsList() )
|
if ( ! data.IsList() )
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -114,22 +106,9 @@ OpaqueValPtr OpaqueVal::UnserializeData(BrokerListView v) {
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
broker::expected<broker::data> OpaqueVal::DoSerialize() const {
|
std::optional<BrokerData> OpaqueVal::DoSerializeData() const { return std::nullopt; }
|
||||||
return {broker::make_error(broker::ec::serialization_failed)};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<BrokerData> OpaqueVal::DoSerializeData() const {
|
bool OpaqueVal::DoUnserializeData(BrokerDataView data) { return false; }
|
||||||
if ( auto res = DoSerialize() ) {
|
|
||||||
return BrokerData{std::move(*res)};
|
|
||||||
}
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool OpaqueVal::DoUnserialize(const broker::data&) { return false; }
|
|
||||||
|
|
||||||
bool OpaqueVal::DoUnserializeData(BrokerDataView data) {
|
|
||||||
return DoUnserialize(zeek::detail::BrokerDataAccess::Unbox(data));
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<BrokerData> OpaqueVal::SerializeType(const TypePtr& t) {
|
std::optional<BrokerData> OpaqueVal::SerializeType(const TypePtr& t) {
|
||||||
if ( t->InternalType() == TYPE_INTERNAL_ERROR )
|
if ( t->InternalType() == TYPE_INTERNAL_ERROR )
|
||||||
|
|
|
@ -91,19 +91,6 @@ private:
|
||||||
std::unordered_map<std::string, Factory*> _types;
|
std::unordered_map<std::string, Factory*> _types;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Legacy macro to insert into an OpaqueVal-derived class's declaration. Overrides the "old" serialization methods
|
|
||||||
* DoSerialize and DoUnserialize.
|
|
||||||
* @deprecated Use DECLARE_OPAQUE_VALUE_DATA instead. Remove in v7.1.
|
|
||||||
*/
|
|
||||||
#define DECLARE_OPAQUE_VALUE(T) \
|
|
||||||
friend class zeek::OpaqueMgr::Register<T>; \
|
|
||||||
friend zeek::IntrusivePtr<T> zeek::make_intrusive<T>(); \
|
|
||||||
broker::expected<broker::data> DoSerialize() const override; \
|
|
||||||
bool DoUnserialize(const broker::data& data) override; \
|
|
||||||
const char* OpaqueName() const override { return #T; } \
|
|
||||||
static zeek::OpaqueValPtr OpaqueInstantiate() { return zeek::make_intrusive<T>(); }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Macro to insert into an OpaqueVal-derived class's declaration. Overrides the "new" serialization methods
|
* Macro to insert into an OpaqueVal-derived class's declaration. Overrides the "new" serialization methods
|
||||||
* DoSerializeData and DoUnserializeData.
|
* DoSerializeData and DoUnserializeData.
|
||||||
|
@ -132,28 +119,11 @@ public:
|
||||||
explicit OpaqueVal(OpaqueTypePtr t);
|
explicit OpaqueVal(OpaqueTypePtr t);
|
||||||
~OpaqueVal() override = default;
|
~OpaqueVal() override = default;
|
||||||
|
|
||||||
/**
|
|
||||||
* Serializes the value into a Broker representation.
|
|
||||||
*
|
|
||||||
* @return the broker representation, or an error if serialization
|
|
||||||
* isn't supported or failed.
|
|
||||||
*/
|
|
||||||
[[deprecated("Remove in v7.1: use SerializeData instead")]] broker::expected<broker::data> Serialize() const;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @copydoc Serialize
|
* @copydoc Serialize
|
||||||
*/
|
*/
|
||||||
std::optional<BrokerData> SerializeData() const;
|
std::optional<BrokerData> SerializeData() const;
|
||||||
|
|
||||||
/**
|
|
||||||
* Reinstantiates a value from its serialized Broker representation.
|
|
||||||
*
|
|
||||||
* @param data Broker representation as returned by *Serialize()*.
|
|
||||||
* @return unserialized instances with reference count at +1
|
|
||||||
*/
|
|
||||||
[[deprecated("Remove in v7.1: use UnserializeData instead")]] static OpaqueValPtr Unserialize(
|
|
||||||
const broker::data& data);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @copydoc Unserialize
|
* @copydoc Unserialize
|
||||||
*/
|
*/
|
||||||
|
@ -168,11 +138,6 @@ protected:
|
||||||
friend class Val;
|
friend class Val;
|
||||||
friend class OpaqueMgr;
|
friend class OpaqueMgr;
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Override DoSerializeData instead. Remove in v7.1.
|
|
||||||
*/
|
|
||||||
virtual broker::expected<broker::data> DoSerialize() const;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Must be overridden to provide a serialized version of the derived
|
* Must be overridden to provide a serialized version of the derived
|
||||||
* class' state.
|
* class' state.
|
||||||
|
@ -182,11 +147,6 @@ protected:
|
||||||
*/
|
*/
|
||||||
virtual std::optional<BrokerData> DoSerializeData() const;
|
virtual std::optional<BrokerData> DoSerializeData() const;
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Override DoUnserializeData instead. Remove in v7.1.
|
|
||||||
*/
|
|
||||||
virtual bool DoUnserialize(const broker::data& data);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Must be overridden to recreate the derived class' state from a
|
* Must be overridden to recreate the derived class' state from a
|
||||||
* serialization.
|
* serialization.
|
||||||
|
|
|
@ -93,11 +93,9 @@ const IDPtr& lookup_ID(const char* name, const char* curr_module, bool no_global
|
||||||
bool check_export) {
|
bool check_export) {
|
||||||
bool explicit_global = zeek::util::starts_with(name, "::");
|
bool explicit_global = zeek::util::starts_with(name, "::");
|
||||||
|
|
||||||
// Ad-hoc deprecation if a name starts with "GLOBAL::". In v7.1 we could
|
static std::string global_prefix = util::fmt("%s::", GLOBAL_MODULE_NAME);
|
||||||
// tweak {ID} to reject GLOBAL::, or switch this warning to error instead.
|
if ( zeek::util::starts_with(name, global_prefix) )
|
||||||
static std::string deprecated_prefix = util::fmt("%s::", GLOBAL_MODULE_NAME);
|
reporter->Error("Using GLOBAL:: as a prefix is invalid. Use :: instead (%s)", name);
|
||||||
if ( zeek::util::starts_with(name, deprecated_prefix) )
|
|
||||||
reporter->Deprecation(util::fmt("Remove in v7.1: Use :: instead of %s (%s)", deprecated_prefix.c_str(), name));
|
|
||||||
|
|
||||||
std::string fullname = make_full_var_name(curr_module, name);
|
std::string fullname = make_full_var_name(curr_module, name);
|
||||||
std::string ID_module = extract_module_name(fullname.c_str());
|
std::string ID_module = extract_module_name(fullname.c_str());
|
||||||
|
|
12
src/Stats.cc
12
src/Stats.cc
|
@ -119,7 +119,7 @@ void ProfileLogger::Log() {
|
||||||
|
|
||||||
// TODO: This previously output the number of connections, but now that we're storing
|
// TODO: This previously output the number of connections, but now that we're storing
|
||||||
// sessions as well as connections, this might need to be renamed.
|
// sessions as well as connections, this might need to be renamed.
|
||||||
file->Write(util::fmt("%.06f Conns: total=%" PRIu64 " current=%" PRIu64 "/%u\n", run_state::network_time,
|
file->Write(util::fmt("%.06f Conns: total=%" PRIu64 " current=%" PRIu64 "/%zu\n", run_state::network_time,
|
||||||
Connection::TotalConnections(), Connection::CurrentConnections(),
|
Connection::TotalConnections(), Connection::CurrentConnections(),
|
||||||
session_mgr->CurrentSessions()));
|
session_mgr->CurrentSessions()));
|
||||||
|
|
||||||
|
@ -173,7 +173,7 @@ void ProfileLogger::Log() {
|
||||||
util::fmt("%.06f DNS_Mgr: requests=%lu successful=%lu failed=%lu pending=%lu "
|
util::fmt("%.06f DNS_Mgr: requests=%lu successful=%lu failed=%lu pending=%lu "
|
||||||
"cached_hosts=%lu cached_addrs=%lu\n",
|
"cached_hosts=%lu cached_addrs=%lu\n",
|
||||||
run_state::network_time, dstats.requests, dstats.successful, dstats.failed, dstats.pending,
|
run_state::network_time, dstats.requests, dstats.successful, dstats.failed, dstats.pending,
|
||||||
dstats.cached_hosts, dstats.cached_addresses));
|
dstats.cached.hosts, dstats.cached.addresses));
|
||||||
|
|
||||||
trigger::Manager::Stats tstats;
|
trigger::Manager::Stats tstats;
|
||||||
trigger_mgr->GetStats(&tstats);
|
trigger_mgr->GetStats(&tstats);
|
||||||
|
@ -188,7 +188,7 @@ void ProfileLogger::Log() {
|
||||||
timer_type_to_string(static_cast<TimerType>(i)), current_timers[i]));
|
timer_type_to_string(static_cast<TimerType>(i)), current_timers[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
file->Write(util::fmt("%0.6f Threads: current=%d\n", run_state::network_time, thread_mgr->NumThreads()));
|
file->Write(util::fmt("%0.6f Threads: current=%zu\n", run_state::network_time, thread_mgr->NumThreads()));
|
||||||
|
|
||||||
const threading::Manager::msg_stats_list& thread_stats = thread_mgr->GetMsgThreadStats();
|
const threading::Manager::msg_stats_list& thread_stats = thread_mgr->GetMsgThreadStats();
|
||||||
for ( threading::Manager::msg_stats_list::const_iterator i = thread_stats.begin(); i != thread_stats.end(); ++i ) {
|
for ( threading::Manager::msg_stats_list::const_iterator i = thread_stats.begin(); i != thread_stats.end(); ++i ) {
|
||||||
|
@ -213,14 +213,12 @@ void ProfileLogger::Log() {
|
||||||
cs.num_events_outgoing, cs.num_logs_incoming, cs.num_logs_outgoing, cs.num_ids_incoming,
|
cs.num_events_outgoing, cs.num_logs_incoming, cs.num_logs_outgoing, cs.num_ids_incoming,
|
||||||
cs.num_ids_outgoing));
|
cs.num_ids_outgoing));
|
||||||
|
|
||||||
// Script-level state.
|
|
||||||
const auto& globals = global_scope()->Vars();
|
|
||||||
|
|
||||||
if ( expensive ) {
|
if ( expensive ) {
|
||||||
|
// Script-level state.
|
||||||
int total_table_entries = 0;
|
int total_table_entries = 0;
|
||||||
int total_table_rentries = 0;
|
int total_table_rentries = 0;
|
||||||
|
|
||||||
for ( const auto& global : globals ) {
|
for ( const auto& global : global_scope()->Vars() ) {
|
||||||
auto& id = global.second;
|
auto& id = global.second;
|
||||||
|
|
||||||
// We don't show/count internal globals as they are always
|
// We don't show/count internal globals as they are always
|
||||||
|
|
|
@ -54,12 +54,6 @@ const char* stmt_name(StmtTag t) {
|
||||||
"std-function",
|
"std-function",
|
||||||
};
|
};
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
if ( int(t) == STMT_ANY )
|
|
||||||
return "any";
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
|
|
||||||
return stmt_names[int(t)];
|
return stmt_names[int(t)];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,6 @@ namespace zeek::detail {
|
||||||
|
|
||||||
// These are in a separate file to break circular dependences
|
// These are in a separate file to break circular dependences
|
||||||
enum StmtTag {
|
enum StmtTag {
|
||||||
STMT_ANY [[deprecated("Remove in v7.1 - Unused and plugins should use STMT_EXTERN.")]] = -1,
|
|
||||||
STMT_ALARM, // Does no longer exist but kept to create enums consistent.
|
STMT_ALARM, // Does no longer exist but kept to create enums consistent.
|
||||||
STMT_PRINT,
|
STMT_PRINT,
|
||||||
STMT_EVENT,
|
STMT_EVENT,
|
||||||
|
|
30
src/Timer.cc
30
src/Timer.cc
|
@ -9,7 +9,7 @@
|
||||||
#include "zeek/RunState.h"
|
#include "zeek/RunState.h"
|
||||||
#include "zeek/broker/Manager.h"
|
#include "zeek/broker/Manager.h"
|
||||||
#include "zeek/iosource/Manager.h"
|
#include "zeek/iosource/Manager.h"
|
||||||
#include "zeek/iosource/PktSrc.h"
|
#include "zeek/telemetry/Manager.h"
|
||||||
#include "zeek/util.h"
|
#include "zeek/util.h"
|
||||||
|
|
||||||
namespace zeek::detail {
|
namespace zeek::detail {
|
||||||
|
@ -97,6 +97,34 @@ void TimerMgr::InitPostScript() {
|
||||||
iosource_mgr->Register(this, true);
|
iosource_mgr->Register(this, true);
|
||||||
|
|
||||||
dispatch_all_expired = zeek::detail::max_timer_expires == 0;
|
dispatch_all_expired = zeek::detail::max_timer_expires == 0;
|
||||||
|
|
||||||
|
cumulative_num_metric = telemetry_mgr->CounterInstance("zeek", "timers", {}, "Cumulative number of timers", "",
|
||||||
|
[]() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.counter.value =
|
||||||
|
static_cast<double>(timer_mgr->CumulativeNum());
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
lag_time_metric = telemetry_mgr->GaugeInstance("zeek", "timers_lag_time", {},
|
||||||
|
"Lag between current network time and last expired timer", "seconds",
|
||||||
|
[]() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value =
|
||||||
|
run_state::network_time - timer_mgr->last_timestamp;
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
std::shared_ptr<telemetry::GaugeFamily> family =
|
||||||
|
telemetry_mgr->GaugeFamily("zeek", "timers_pending", {"type"}, "Number of timers for a certain type");
|
||||||
|
for ( int i = 0; i < NUM_TIMER_TYPES; i++ ) {
|
||||||
|
current_timer_metrics[i] = family->GetOrAdd({{"type", timer_type_to_string(static_cast<TimerType>(i))}},
|
||||||
|
[i]() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = TimerMgr::CurrentTimers()[i];
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TimerMgr::Add(Timer* timer) {
|
void TimerMgr::Add(Timer* timer) {
|
||||||
|
|
17
src/Timer.h
17
src/Timer.h
|
@ -10,7 +10,14 @@
|
||||||
|
|
||||||
namespace zeek {
|
namespace zeek {
|
||||||
class ODesc;
|
class ODesc;
|
||||||
}
|
|
||||||
|
namespace telemetry {
|
||||||
|
class Gauge;
|
||||||
|
class Counter;
|
||||||
|
using GaugePtr = std::shared_ptr<Gauge>;
|
||||||
|
using CounterPtr = std::shared_ptr<Counter>;
|
||||||
|
} // namespace telemetry
|
||||||
|
} // namespace zeek
|
||||||
|
|
||||||
namespace zeek::detail {
|
namespace zeek::detail {
|
||||||
|
|
||||||
|
@ -153,10 +160,12 @@ private:
|
||||||
// for the max_timer_expires=0 case.
|
// for the max_timer_expires=0 case.
|
||||||
bool dispatch_all_expired = false;
|
bool dispatch_all_expired = false;
|
||||||
|
|
||||||
size_t peak_size = 0;
|
|
||||||
size_t cumulative_num = 0;
|
|
||||||
|
|
||||||
static unsigned int current_timers[NUM_TIMER_TYPES];
|
static unsigned int current_timers[NUM_TIMER_TYPES];
|
||||||
|
|
||||||
|
telemetry::CounterPtr cumulative_num_metric;
|
||||||
|
telemetry::GaugePtr lag_time_metric;
|
||||||
|
telemetry::GaugePtr current_timer_metrics[NUM_TIMER_TYPES];
|
||||||
|
|
||||||
std::unique_ptr<PriorityQueue> q;
|
std::unique_ptr<PriorityQueue> q;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include "zeek/Traverse.h"
|
#include "zeek/Traverse.h"
|
||||||
#include "zeek/Val.h"
|
#include "zeek/Val.h"
|
||||||
#include "zeek/iosource/Manager.h"
|
#include "zeek/iosource/Manager.h"
|
||||||
|
#include "zeek/telemetry/Manager.h"
|
||||||
|
|
||||||
using namespace zeek::detail;
|
using namespace zeek::detail;
|
||||||
using namespace zeek::detail::trigger;
|
using namespace zeek::detail::trigger;
|
||||||
|
@ -88,12 +89,6 @@ protected:
|
||||||
double time;
|
double time;
|
||||||
};
|
};
|
||||||
|
|
||||||
Trigger::Trigger(std::shared_ptr<WhenInfo> wi, double timeout, const IDSet& _globals, std::vector<ValPtr> _local_aggrs,
|
|
||||||
Frame* f, const Location* loc)
|
|
||||||
: Trigger(std::move(wi), _globals, std::move(_local_aggrs), timeout, f, loc) {
|
|
||||||
Unref(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
Trigger::Trigger(std::shared_ptr<WhenInfo> wi, const IDSet& _globals, std::vector<ValPtr> _local_aggrs, double timeout,
|
Trigger::Trigger(std::shared_ptr<WhenInfo> wi, const IDSet& _globals, std::vector<ValPtr> _local_aggrs, double timeout,
|
||||||
Frame* f, const Location* loc) {
|
Frame* f, const Location* loc) {
|
||||||
timeout_value = timeout;
|
timeout_value = timeout;
|
||||||
|
@ -437,7 +432,19 @@ Manager::Manager() : iosource::IOSource() { pending = new TriggerList(); }
|
||||||
|
|
||||||
Manager::~Manager() { delete pending; }
|
Manager::~Manager() { delete pending; }
|
||||||
|
|
||||||
void Manager::InitPostScript() { iosource_mgr->Register(this, true); }
|
void Manager::InitPostScript() {
|
||||||
|
trigger_count = telemetry_mgr->CounterInstance("zeek", "triggers", {}, "Total number of triggers scheduled");
|
||||||
|
trigger_pending =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "pending_triggers", {}, "Pending number of triggers", "",
|
||||||
|
[]() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value =
|
||||||
|
trigger_mgr ? static_cast<double>(trigger_mgr->pending->size()) : 0.0;
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
iosource_mgr->Register(this, true);
|
||||||
|
}
|
||||||
|
|
||||||
double Manager::GetNextTimeout() { return pending->empty() ? -1 : run_state::network_time + 0.100; }
|
double Manager::GetNextTimeout() { return pending->empty() ? -1 : run_state::network_time + 0.100; }
|
||||||
|
|
||||||
|
@ -468,13 +475,13 @@ void Manager::Queue(Trigger* trigger) {
|
||||||
if ( std::find(pending->begin(), pending->end(), trigger) == pending->end() ) {
|
if ( std::find(pending->begin(), pending->end(), trigger) == pending->end() ) {
|
||||||
Ref(trigger);
|
Ref(trigger);
|
||||||
pending->push_back(trigger);
|
pending->push_back(trigger);
|
||||||
total_triggers++;
|
trigger_count->Inc();
|
||||||
iosource_mgr->Wakeup(Tag());
|
iosource_mgr->Wakeup(Tag());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Manager::GetStats(Stats* stats) {
|
void Manager::GetStats(Stats* stats) {
|
||||||
stats->total = total_triggers;
|
stats->total = static_cast<unsigned long>(trigger_count->Value());
|
||||||
stats->pending = pending->size();
|
stats->pending = pending->size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,13 @@ class Val;
|
||||||
|
|
||||||
using ValPtr = IntrusivePtr<Val>;
|
using ValPtr = IntrusivePtr<Val>;
|
||||||
|
|
||||||
|
namespace telemetry {
|
||||||
|
class Gauge;
|
||||||
|
class Counter;
|
||||||
|
using GaugePtr = std::shared_ptr<Gauge>;
|
||||||
|
using CounterPtr = std::shared_ptr<Counter>;
|
||||||
|
} // namespace telemetry
|
||||||
|
|
||||||
namespace detail {
|
namespace detail {
|
||||||
|
|
||||||
class Frame;
|
class Frame;
|
||||||
|
@ -39,13 +46,6 @@ class TriggerTraversalCallback;
|
||||||
|
|
||||||
class Trigger final : public Obj, public notifier::detail::Receiver {
|
class Trigger final : public Obj, public notifier::detail::Receiver {
|
||||||
public:
|
public:
|
||||||
// This first constructor can return an invalid pointer, so
|
|
||||||
// its value must not be used further.
|
|
||||||
[[deprecated(
|
|
||||||
"Remove in v7.1. Use second Trigger constructor via "
|
|
||||||
"make_intrusive<...>.")]] Trigger(std::shared_ptr<WhenInfo> wi, double timeout, const IDSet& globals,
|
|
||||||
std::vector<ValPtr> local_aggrs, Frame* f, const Location* loc);
|
|
||||||
|
|
||||||
// Use this constructor via make_intrusive<...>. The usual pattern is
|
// Use this constructor via make_intrusive<...>. The usual pattern is
|
||||||
// to then discard what's returned, i.e. "(void)make_intrusive<...>" -
|
// to then discard what's returned, i.e. "(void)make_intrusive<...>" -
|
||||||
// however, a valid pointer will be returned that can be used for
|
// however, a valid pointer will be returned that can be used for
|
||||||
|
@ -187,7 +187,8 @@ public:
|
||||||
private:
|
private:
|
||||||
using TriggerList = std::list<Trigger*>;
|
using TriggerList = std::list<Trigger*>;
|
||||||
TriggerList* pending;
|
TriggerList* pending;
|
||||||
unsigned long total_triggers = 0;
|
telemetry::CounterPtr trigger_count;
|
||||||
|
telemetry::GaugePtr trigger_pending;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace trigger
|
} // namespace trigger
|
||||||
|
|
|
@ -599,12 +599,9 @@ TypePtr SetType::ShallowClone() { return make_intrusive<SetType>(indices, elemen
|
||||||
|
|
||||||
SetType::~SetType() = default;
|
SetType::~SetType() = default;
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
FuncType::Capture::Capture(detail::IDPtr _id, bool _deep_copy) : id(std::move(_id)), deep_copy(_deep_copy) {
|
FuncType::Capture::Capture(detail::IDPtr _id, bool _deep_copy) : id(std::move(_id)), deep_copy(_deep_copy) {
|
||||||
is_managed = id ? ZVal::IsManagedType(id->GetType()) : false;
|
is_managed = id ? ZVal::IsManagedType(id->GetType()) : false;
|
||||||
}
|
}
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
|
|
||||||
FuncType::FuncType(RecordTypePtr arg_args, TypePtr arg_yield, FunctionFlavor arg_flavor)
|
FuncType::FuncType(RecordTypePtr arg_args, TypePtr arg_yield, FunctionFlavor arg_flavor)
|
||||||
: Type(TYPE_FUNC), args(std::move(arg_args)), arg_types(make_intrusive<TypeList>()), yield(std::move(arg_yield)) {
|
: Type(TYPE_FUNC), args(std::move(arg_args)), arg_types(make_intrusive<TypeList>()), yield(std::move(arg_yield)) {
|
||||||
|
|
10
src/Type.h
10
src/Type.h
|
@ -513,8 +513,6 @@ public:
|
||||||
public:
|
public:
|
||||||
Capture(detail::IDPtr _id, bool _deep_copy);
|
Capture(detail::IDPtr _id, bool _deep_copy);
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
Capture(const Capture&) = default;
|
Capture(const Capture&) = default;
|
||||||
Capture(Capture&&) = default;
|
Capture(Capture&&) = default;
|
||||||
Capture& operator=(const Capture&) = default;
|
Capture& operator=(const Capture&) = default;
|
||||||
|
@ -527,11 +525,11 @@ public:
|
||||||
|
|
||||||
// For script optimization:
|
// For script optimization:
|
||||||
void SetID(detail::IDPtr new_id) { id = std::move(new_id); }
|
void SetID(detail::IDPtr new_id) { id = std::move(new_id); }
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
|
|
||||||
[[deprecated("Remove in v7.1. Use non-default constructor and associated accessors.")]] detail::IDPtr id;
|
private:
|
||||||
[[deprecated("Remove in v7.1. Use non-default constructor and associated accessors.")]] bool deep_copy;
|
detail::IDPtr id;
|
||||||
[[deprecated("Remove in v7.1. Use non-default constructor and associated accessors.")]] bool is_managed;
|
bool deep_copy;
|
||||||
|
bool is_managed;
|
||||||
};
|
};
|
||||||
|
|
||||||
using CaptureList = std::vector<Capture>;
|
using CaptureList = std::vector<Capture>;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
spicy_add_analyzer(
|
spicy_add_analyzer(
|
||||||
NAME LDAP
|
NAME LDAP
|
||||||
PACKAGE_NAME spicy-ldap
|
PACKAGE_NAME spicy-ldap
|
||||||
SOURCES ldap.spicy ldap.evt asn1.spicy
|
SOURCES ldap.spicy ldap.evt asn1.spicy ldap_zeek.spicy
|
||||||
MODULES LDAP ASN1)
|
MODULES LDAP ASN1 LDAP_Zeek)
|
||||||
|
|
|
@ -41,3 +41,18 @@ on LDAP::SearchRequest -> event LDAP::search_request($conn,
|
||||||
on LDAP::SearchResultEntry -> event LDAP::search_result_entry($conn,
|
on LDAP::SearchResultEntry -> event LDAP::search_result_entry($conn,
|
||||||
message.messageID,
|
message.messageID,
|
||||||
self.objectName);
|
self.objectName);
|
||||||
|
|
||||||
|
on LDAP::ExtendedRequest -> event LDAP::extended_request($conn,
|
||||||
|
message.messageID,
|
||||||
|
self.requestName,
|
||||||
|
self.requestValue);
|
||||||
|
|
||||||
|
on LDAP::ExtendedResponse -> event LDAP::extended_response($conn,
|
||||||
|
message.messageID,
|
||||||
|
message.result_.code,
|
||||||
|
self.responseName,
|
||||||
|
self.responseValue);
|
||||||
|
|
||||||
|
# Once switched into MessageMode::TLS, we won't parse messages anymore,
|
||||||
|
# so this is raised just once.
|
||||||
|
on LDAP::Message if (ctx.messageMode == LDAP::MessageMode::TLS) -> event LDAP::starttls($conn);
|
||||||
|
|
|
@ -130,29 +130,104 @@ public type Result = unit {
|
||||||
const GSSAPI_MECH_MS_KRB5 = "1.2.840.48018.1.2.2";
|
const GSSAPI_MECH_MS_KRB5 = "1.2.840.48018.1.2.2";
|
||||||
|
|
||||||
# Supported SASL stripping modes.
|
# Supported SASL stripping modes.
|
||||||
type SaslStripping = enum {
|
type MessageMode = enum {
|
||||||
MS_KRB5 = 1, # Payload starts with a 4 byte length followed by a wrap token that may or may not be sealed.
|
MS_KRB5 = 1, # Payload starts with a 4 byte length followed by a wrap token that may or may not be sealed.
|
||||||
|
TLS = 2, # Client/server used StartTLS, forward to SSL analyzer.
|
||||||
|
MAYBE_ENCRYPTED = 3, # Use a heuristic to determine encrypted traffic.
|
||||||
|
CLEARTEXT = 4, # Assume cleartext.
|
||||||
|
ENCRYPTED = 5, # Assume encrypted.
|
||||||
};
|
};
|
||||||
|
|
||||||
type Ctx = struct {
|
type Ctx = struct {
|
||||||
saslStripping: SaslStripping; # Which mode of SASL stripping to use.
|
messageMode: MessageMode; # Message dispatching mode
|
||||||
|
saslMechanism: string; # The SASL mechanism selected by the client.
|
||||||
|
startTlsRequested: bool; # Did the client use the StartTLS extended request?
|
||||||
};
|
};
|
||||||
|
|
||||||
#-----------------------------------------------------------------------------
|
#-----------------------------------------------------------------------------
|
||||||
public type Messages = unit {
|
public type Messages = unit {
|
||||||
%context = Ctx;
|
%context = Ctx;
|
||||||
: SASLStrip(self.context())[];
|
: MessageDispatch(self.context())[];
|
||||||
};
|
};
|
||||||
|
|
||||||
#-----------------------------------------------------------------------------
|
#-----------------------------------------------------------------------------
|
||||||
public type SASLStrip = unit(ctx: Ctx&) {
|
public type MessageDispatch = unit(ctx: Ctx&) {
|
||||||
switch( ctx.saslStripping ) {
|
switch( ctx.messageMode ) {
|
||||||
SaslStripping::Undef -> : Message(ctx);
|
MessageMode::Undef -> : Message(ctx);
|
||||||
SaslStripping::MS_KRB5 -> : SaslMsKrb5Stripper(ctx);
|
MessageMode::MS_KRB5 -> : SaslMsKrb5Stripper(ctx);
|
||||||
|
MessageMode::TLS -> : TlsForward; # never returns
|
||||||
|
MessageMode::MAYBE_ENCRYPTED -> : MaybeEncrypted(ctx);
|
||||||
|
MessageMode::CLEARTEXT -> : Message(ctx);
|
||||||
|
MessageMode::ENCRYPTED -> : EncryptedMessage;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#-----------------------------------------------------------------------------
|
||||||
|
type MaybeEncrypted = unit(ctx: Ctx&) {
|
||||||
|
# A plaintext LDAP message always starts with at least 3 bytes and the first
|
||||||
|
# byte is 0x30 for the sequence. A SASL encrypted message starts with a 4 byte
|
||||||
|
# length field. The heuristic here is that if the first byte is a 0x30,
|
||||||
|
# assume it's unencrypted LDAP. This should be pretty good, if it was an
|
||||||
|
# encrypted/SASL wrapped message, it would have a size between 0x30000000 and
|
||||||
|
# 0x30FFFFFF, meaning at least a size of ~768MB, which seems unlikely.
|
||||||
|
var start: iterator<stream>;
|
||||||
|
var saslLen: uint64;
|
||||||
|
var mech: bytes;
|
||||||
|
|
||||||
|
on %init {
|
||||||
|
self.start = self.input();
|
||||||
|
# Don't have starts_with() on string, work around that.
|
||||||
|
# https://github.com/zeek/spicy/issues/1807
|
||||||
|
self.mech = ctx.saslMechanism.encode(spicy::Charset::UTF8);
|
||||||
|
}
|
||||||
|
|
||||||
|
first: uint8 {
|
||||||
|
if ( $$ == 0x30 ) {
|
||||||
|
ctx.messageMode = MessageMode::CLEARTEXT;
|
||||||
|
} else {
|
||||||
|
ctx.messageMode = MessageMode::ENCRYPTED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# As a further heuristic, if encrypted mode was decided and the client
|
||||||
|
# requested GSSAPI or GSS-SPNEGO (or we just didn't see it) peak a bit
|
||||||
|
# into the SASL payload and check if it starts with a 0504 (WRAP_TOKEN).
|
||||||
|
# If so, switch into KRB mode assuming that's what is being used and
|
||||||
|
# have a chance seeing some more plaintext LDAP in non-sealed tokens.
|
||||||
|
rem: uint8[3] if ( ctx.messageMode == MessageMode::ENCRYPTED && (|self.mech| == 0 || self.mech.starts_with(b"GSS")) ) {
|
||||||
|
self.saslLen = (uint64(self.first) << 24) + (uint64($$[0]) << 16) + (uint64($$[1]) << 8) + uint64($$[2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
: uint16 if ( self.saslLen >= 2 ) {
|
||||||
|
if ( $$ == 0x0504 ) {
|
||||||
|
ctx.messageMode = MessageMode::MS_KRB5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Rewind the input.
|
||||||
|
: void {
|
||||||
|
# Prevent MessageDispatch from recursing endlessly.
|
||||||
|
assert ctx.messageMode != MessageMode::MAYBE_ENCRYPTED;
|
||||||
|
self.set_input(self.start);
|
||||||
|
}
|
||||||
|
|
||||||
|
# One recursion to parse with the new ctx.messageMode setting.
|
||||||
|
: MessageDispatch(ctx);
|
||||||
|
};
|
||||||
|
|
||||||
|
#-----------------------------------------------------------------------------
|
||||||
|
type EncryptedMessage = unit {
|
||||||
|
len: uint32;
|
||||||
|
: skip bytes &size=self.len;
|
||||||
|
};
|
||||||
|
|
||||||
|
#-----------------------------------------------------------------------------
|
||||||
|
type TlsForward = unit {
|
||||||
|
# Just consume everything. This is hooked in ldap_zeek.spicy
|
||||||
|
chunk: bytes &chunked &eod;
|
||||||
|
};
|
||||||
|
|
||||||
type KrbWrapToken = unit {
|
type KrbWrapToken = unit {
|
||||||
# https://datatracker.ietf.org/doc/html/rfc4121#section-4.2.6.2
|
# https://datatracker.ietf.org/doc/html/rfc4121#section-4.2.6.2
|
||||||
|
|
||||||
|
@ -174,6 +249,9 @@ type KrbWrapToken = unit {
|
||||||
} else if ( self.rrc == 0 ) {
|
} else if ( self.rrc == 0 ) {
|
||||||
self.trailer_ec = self.ec;
|
self.trailer_ec = self.ec;
|
||||||
} else {
|
} else {
|
||||||
|
if ( ! self.ctx_flags.sealed )
|
||||||
|
# If it's sealed, we'll consume until &eod anyhow
|
||||||
|
# and ec/rrc shouldn't apply, otherwise, bail.
|
||||||
throw "Unhandled rc %s and ec %s" % (self.ec, self.rrc);
|
throw "Unhandled rc %s and ec %s" % (self.ec, self.rrc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -223,6 +301,7 @@ public type Message = unit(ctx: Ctx&) {
|
||||||
var arg: string = "";
|
var arg: string = "";
|
||||||
var seqHeaderLen: uint64;
|
var seqHeaderLen: uint64;
|
||||||
var msgLen: uint64;
|
var msgLen: uint64;
|
||||||
|
var opLen: uint64;
|
||||||
|
|
||||||
seqHeader: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::Sequence) {
|
seqHeader: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::Sequence) {
|
||||||
self.msgLen = $$.len.len;
|
self.msgLen = $$.len.len;
|
||||||
|
@ -241,10 +320,11 @@ public type Message = unit(ctx: Ctx&) {
|
||||||
|
|
||||||
protocolOp: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Application) {
|
protocolOp: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Application) {
|
||||||
self.opcode = cast<ProtocolOpcode>(cast<uint8>($$.tag.type_));
|
self.opcode = cast<ProtocolOpcode>(cast<uint8>($$.tag.type_));
|
||||||
|
self.opLen = $$.len.len;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch ( self.opcode ) {
|
switch ( self.opcode ) {
|
||||||
ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self);
|
ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self, ctx);
|
||||||
ProtocolOpcode::BIND_RESPONSE -> BIND_RESPONSE: BindResponse(self, ctx);
|
ProtocolOpcode::BIND_RESPONSE -> BIND_RESPONSE: BindResponse(self, ctx);
|
||||||
ProtocolOpcode::UNBIND_REQUEST -> UNBIND_REQUEST: UnbindRequest(self);
|
ProtocolOpcode::UNBIND_REQUEST -> UNBIND_REQUEST: UnbindRequest(self);
|
||||||
ProtocolOpcode::SEARCH_REQUEST -> SEARCH_REQUEST: SearchRequest(self);
|
ProtocolOpcode::SEARCH_REQUEST -> SEARCH_REQUEST: SearchRequest(self);
|
||||||
|
@ -263,12 +343,12 @@ public type Message = unit(ctx: Ctx&) {
|
||||||
# just commenting this out, it will stop processing LDAP Messages in this connection
|
# just commenting this out, it will stop processing LDAP Messages in this connection
|
||||||
ProtocolOpcode::ADD_REQUEST -> ADD_REQUEST: NotImplemented(self);
|
ProtocolOpcode::ADD_REQUEST -> ADD_REQUEST: NotImplemented(self);
|
||||||
ProtocolOpcode::COMPARE_REQUEST -> COMPARE_REQUEST: NotImplemented(self);
|
ProtocolOpcode::COMPARE_REQUEST -> COMPARE_REQUEST: NotImplemented(self);
|
||||||
ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: NotImplemented(self);
|
ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: ExtendedRequest(self, ctx);
|
||||||
ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: NotImplemented(self);
|
ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: ExtendedResponse(self, ctx);
|
||||||
ProtocolOpcode::INTERMEDIATE_RESPONSE -> INTERMEDIATE_RESPONSE: NotImplemented(self);
|
ProtocolOpcode::INTERMEDIATE_RESPONSE -> INTERMEDIATE_RESPONSE: NotImplemented(self);
|
||||||
ProtocolOpcode::MOD_DN_REQUEST -> MOD_DN_REQUEST: NotImplemented(self);
|
ProtocolOpcode::MOD_DN_REQUEST -> MOD_DN_REQUEST: NotImplemented(self);
|
||||||
ProtocolOpcode::SEARCH_RESULT_REFERENCE -> SEARCH_RESULT_REFERENCE: NotImplemented(self);
|
ProtocolOpcode::SEARCH_RESULT_REFERENCE -> SEARCH_RESULT_REFERENCE: NotImplemented(self);
|
||||||
} &size=self.protocolOp.len.len;
|
} &size=self.opLen;
|
||||||
|
|
||||||
# Ensure some invariants hold after parsing the command.
|
# Ensure some invariants hold after parsing the command.
|
||||||
: void &requires=(self.offset() >= self.seqHeaderLen);
|
: void &requires=(self.offset() >= self.seqHeaderLen);
|
||||||
|
@ -295,26 +375,29 @@ type GSS_SPNEGO_negTokenInit = unit {
|
||||||
: skip bytes &eod;
|
: skip bytes &eod;
|
||||||
};
|
};
|
||||||
|
|
||||||
# Peak into GSS-SPNEGO payload and ensure it is indeed GSS-SPNEGO.
|
# Peak into GSS-SPNEGO payload and ensure it is indeed GSS-SPNEGO,
|
||||||
type GSS_SPNEGO = unit {
|
# or GSS-SPNEGO with a NTMLSSP payload that starts with NTLMSSP.
|
||||||
|
type GSS_SPNEGO_Init = unit {
|
||||||
# This is the optional octet string in SaslCredentials.
|
# This is the optional octet string in SaslCredentials.
|
||||||
credentialsHeader: ASN1::ASN1Header &requires=($$.tag.type_ == ASN1::ASN1Type::OctetString);
|
credentialsHeader: ASN1::ASN1Header &requires=($$.tag.type_ == ASN1::ASN1Type::OctetString);
|
||||||
|
|
||||||
# Now we either have the initial message as specified in RFC2743 or
|
# Now we either have the initial message as specified in RFC2743 or
|
||||||
# a continuation from RFC4178
|
# a continuation from RFC4178, or a "NTMLSSP" signature.
|
||||||
#
|
#
|
||||||
# 60 -> APPLICATION [0] https://datatracker.ietf.org/doc/html/rfc2743#page-81)
|
# 60 -> APPLICATION [0] https://datatracker.ietf.org/doc/html/rfc2743#page-81
|
||||||
# a1 -> CHOICE [1] https://www.rfc-editor.org/rfc/rfc4178#section-4.2
|
# a1 -> CHOICE [1] https://www.rfc-editor.org/rfc/rfc4178#section-4.2
|
||||||
|
# "NTMLSSP" https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/907f519d-6217-45b1-b421-dca10fc8af0d
|
||||||
#
|
#
|
||||||
gssapiHeader: ASN1::ASN1Header &requires=(
|
switch {
|
||||||
$$.tag.class == ASN1::ASN1Class::Application && $$.tag.type_ == ASN1::ASN1Type(0)
|
-> spnegoInitByte: uint8(0x60);
|
||||||
|| $$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(1)
|
-> spnegoChoiceByte: uint8(0xa1);
|
||||||
);
|
-> ntlmSignature: skip b"NTLMSSP"; # Unsupported, should forward to child analyzer!
|
||||||
|
};
|
||||||
|
|
||||||
switch ( self.gssapiHeader.tag.type_ ) {
|
spnegoLen: skip ASN1::LengthType if (self?.spnegoInitByte || self?.spnegoChoiceByte);
|
||||||
ASN1::ASN1Type(0) -> initial: GSS_SPNEGO_negTokenInit;
|
|
||||||
* -> : skip bytes &eod;
|
# Peak into the SPNEGO_negTokenInit
|
||||||
} &size=self.gssapiHeader.len.len;
|
spnegoInitial: skip GSS_SPNEGO_negTokenInit if (self?.spnegoInitByte);
|
||||||
};
|
};
|
||||||
|
|
||||||
type SaslCredentials = unit() {
|
type SaslCredentials = unit() {
|
||||||
|
@ -322,12 +405,22 @@ type SaslCredentials = unit() {
|
||||||
|
|
||||||
# Peak into GSS-SPNEGO payload if we have any.
|
# Peak into GSS-SPNEGO payload if we have any.
|
||||||
switch ( self.mechanism ) {
|
switch ( self.mechanism ) {
|
||||||
"GSS-SPNEGO" -> gss_spnego: GSS_SPNEGO;
|
"GSS-SPNEGO" -> gss_spnego: GSS_SPNEGO_Init;
|
||||||
* -> : skip bytes &eod;
|
* -> : skip bytes &eod;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
type NegTokenResp = unit {
|
type GSS_SPNEGO_Subsequent = unit {
|
||||||
|
switch {
|
||||||
|
-> spnegoChoiceByte: uint8(0xa1);
|
||||||
|
-> ntmlSignature: skip b"NTLMSSP"; # Unsupported, should forward to NTLM!
|
||||||
|
};
|
||||||
|
|
||||||
|
spnegoChoiceLen: skip ASN1::LengthType if (self?.spnegoChoiceByte);
|
||||||
|
negTokenResp: GSS_SPNEGO_negTokenResp if (self?.spnegoChoiceByte);
|
||||||
|
};
|
||||||
|
|
||||||
|
type GSS_SPNEGO_negTokenResp = unit {
|
||||||
var accepted: bool;
|
var accepted: bool;
|
||||||
var supportedMech: ASN1::ASN1Message;
|
var supportedMech: ASN1::ASN1Message;
|
||||||
|
|
||||||
|
@ -355,34 +448,13 @@ type NegTokenResp = unit {
|
||||||
} &parse-from=self.supportedMech.application_data;
|
} &parse-from=self.supportedMech.application_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
type ServerSaslCreds = unit {
|
|
||||||
serverSaslCreds: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(7));
|
|
||||||
|
|
||||||
# The PCAP missing_ldap_logs.pcapng has a1 81 b6 here for the GSS-SPNEGO response.
|
|
||||||
#
|
|
||||||
# This is context-specific ID 1, constructed, and a length of 182 as
|
|
||||||
# specified by in 4.2 of RFC4178.
|
|
||||||
#
|
|
||||||
# https://www.rfc-editor.org/rfc/rfc4178#section-4.2
|
|
||||||
#
|
|
||||||
# TODO: This is only valid for a GSS-SPNEGO negTokenResp.
|
|
||||||
# If you want to support something else, remove the requires
|
|
||||||
# and add more to the switch below.
|
|
||||||
choice: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific);
|
|
||||||
|
|
||||||
switch ( self.choice.tag.type_ ) {
|
|
||||||
ASN1::ASN1Type(1) -> negTokenResp: NegTokenResp;
|
|
||||||
# ...
|
|
||||||
} &size=self.choice.len.len;
|
|
||||||
};
|
|
||||||
|
|
||||||
# TODO(fox-ds): A helper unit for requests for which no handling has been implemented.
|
# TODO(fox-ds): A helper unit for requests for which no handling has been implemented.
|
||||||
# Eventually all uses of this unit should be replaced with actual parsers so this unit can be removed.
|
# Eventually all uses of this unit should be replaced with actual parsers so this unit can be removed.
|
||||||
type NotImplemented = unit(inout message: Message) {
|
type NotImplemented = unit(inout message: Message) {
|
||||||
: skip bytes &eod;
|
: skip bytes &eod;
|
||||||
};
|
};
|
||||||
|
|
||||||
type BindRequest = unit(inout message: Message) {
|
type BindRequest = unit(inout message: Message, ctx: Ctx&) {
|
||||||
version: ASN1::ASN1Message(True) &convert=$$.body.num_value;
|
version: ASN1::ASN1Message(True) &convert=$$.body.num_value;
|
||||||
name: ASN1::ASN1Message(True) &convert=$$.body.str_value {
|
name: ASN1::ASN1Message(True) &convert=$$.body.str_value {
|
||||||
message.obj = self.name;
|
message.obj = self.name;
|
||||||
|
@ -406,12 +478,32 @@ type BindRequest = unit(inout message: Message) {
|
||||||
saslCreds: SaslCredentials() &parse-from=self.authData if ((self.authType == BindAuthType::BIND_AUTH_SASL) &&
|
saslCreds: SaslCredentials() &parse-from=self.authData if ((self.authType == BindAuthType::BIND_AUTH_SASL) &&
|
||||||
(|self.authData| > 0)) {
|
(|self.authData| > 0)) {
|
||||||
message.arg = self.saslCreds.mechanism;
|
message.arg = self.saslCreds.mechanism;
|
||||||
|
ctx.saslMechanism = self.saslCreds.mechanism;
|
||||||
}
|
}
|
||||||
} &requires=(self?.authType && (self.authType != BindAuthType::Undef));
|
} &requires=(self?.authType && (self.authType != BindAuthType::Undef));
|
||||||
|
|
||||||
|
type ServerSaslCreds = unit {
|
||||||
|
serverSaslCreds: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(7));
|
||||||
|
payload: bytes &size=self.serverSaslCreds.len.len;
|
||||||
|
};
|
||||||
|
|
||||||
type BindResponse = unit(inout message: Message, ctx: Ctx&) {
|
type BindResponse = unit(inout message: Message, ctx: Ctx&) {
|
||||||
: Result {
|
: Result {
|
||||||
message.result_ = $$;
|
message.result_ = $$;
|
||||||
|
|
||||||
|
# The SASL authentication was successful. We do not actually
|
||||||
|
# know if the following messages are encrypted or not. This may be
|
||||||
|
# mechanism and parameter specific. For example SCRAM-SHA512 or NTLM
|
||||||
|
# will continue to be cleartext, while SRP or GSS-API would be encrypted.
|
||||||
|
#
|
||||||
|
# Switch messageMode into trial mode which is explored via MessageDispatch
|
||||||
|
# and the MaybeEncrypted unit.
|
||||||
|
#
|
||||||
|
# Note, messageMode may be changed to something more specific like
|
||||||
|
# MS_KRB5 below.
|
||||||
|
if ( |ctx.saslMechanism| > 0 && $$.code == ResultCode::SUCCESS ) {
|
||||||
|
ctx.messageMode = MessageMode::MAYBE_ENCRYPTED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Try to parse serverSaslCreds if there's any input remaining. This
|
# Try to parse serverSaslCreds if there's any input remaining. This
|
||||||
|
@ -421,14 +513,18 @@ type BindResponse = unit(inout message: Message, ctx: Ctx&) {
|
||||||
# if the serverSaslCreds field exists or not. But, not sure we can
|
# if the serverSaslCreds field exists or not. But, not sure we can
|
||||||
# check if there's any bytes left at this point outside of passing
|
# check if there's any bytes left at this point outside of passing
|
||||||
# in the length and playing with offset().
|
# in the length and playing with offset().
|
||||||
serverSaslCreds: ServerSaslCreds[] &eod {
|
serverSaslCreds: ServerSaslCreds[] &eod;
|
||||||
if ( |self.serverSaslCreds| > 0 ) {
|
|
||||||
if ( self.serverSaslCreds[0]?.negTokenResp ) {
|
# If the client requested GSS-SPNEGO, try to parse the server's response
|
||||||
local token = self.serverSaslCreds[0].negTokenResp;
|
# to switch message mode.
|
||||||
|
gss_spnego: GSS_SPNEGO_Subsequent &parse-from=self.serverSaslCreds[0].payload
|
||||||
|
if (ctx.saslMechanism == "GSS-SPNEGO" && |self.serverSaslCreds| > 0) {
|
||||||
|
|
||||||
|
if ( $$?.negTokenResp ) {
|
||||||
|
local token = $$.negTokenResp;
|
||||||
if ( token.accepted && token?.supportedMechOid ) {
|
if ( token.accepted && token?.supportedMechOid ) {
|
||||||
if ( token.supportedMechOid == GSSAPI_MECH_MS_KRB5 ) {
|
if ( token.supportedMechOid == GSSAPI_MECH_MS_KRB5 ) {
|
||||||
ctx.saslStripping = SaslStripping::MS_KRB5;
|
ctx.messageMode = MessageMode::MS_KRB5;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -980,16 +1076,61 @@ type AbandonRequest = unit(inout message: Message) {
|
||||||
#-----------------------------------------------------------------------------
|
#-----------------------------------------------------------------------------
|
||||||
# Extended Operation
|
# Extended Operation
|
||||||
# https://tools.ietf.org/html/rfc4511#section-4.12
|
# https://tools.ietf.org/html/rfc4511#section-4.12
|
||||||
|
type ExtendedRequest = unit(inout message: Message, ctx: Ctx&) {
|
||||||
|
var requestValue: bytes;
|
||||||
|
header: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific);
|
||||||
|
requestName: bytes &size=self.header.len.len &convert=$$.decode(spicy::Charset::ASCII) {
|
||||||
|
message.obj = $$;
|
||||||
|
}
|
||||||
|
|
||||||
# TODO: implement ExtendedRequest
|
# If there's more byte to parse, it's the requestValue.
|
||||||
# type ExtendedRequest = unit(inout message: Message) {
|
: ASN1::ASN1Message(False)
|
||||||
#
|
&requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific)
|
||||||
# };
|
if ( message.opLen > self.offset() ) {
|
||||||
|
|
||||||
# TODO: implement ExtendedResponse
|
self.requestValue = $$.application_data;
|
||||||
# type ExtendedResponse = unit(inout message: Message) {
|
}
|
||||||
|
|
||||||
|
on %done {
|
||||||
|
# Did the client request StartTLS?
|
||||||
#
|
#
|
||||||
# };
|
# https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1
|
||||||
|
if ( self.requestName == "1.3.6.1.4.1.1466.20037" )
|
||||||
|
ctx.startTlsRequested = True;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#-----------------------------------------------------------------------------
|
||||||
|
type ExtendedResponseEntry = unit(inout r: ExtendedResponse) {
|
||||||
|
: ASN1::ASN1Message(False) &requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific) {
|
||||||
|
if ( $$.head.tag.type_ == ASN1::ASN1Type(10) )
|
||||||
|
r.responseName = $$.application_data;
|
||||||
|
else if ( $$.head.tag.type_ == ASN1::ASN1Type(11) )
|
||||||
|
r.responseValue = $$.application_data;
|
||||||
|
else
|
||||||
|
throw "Unhandled extended response tag %s" % $$.head.tag;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#-----------------------------------------------------------------------------
|
||||||
|
type ExtendedResponse = unit(inout message: Message, ctx: Ctx&) {
|
||||||
|
var responseName: bytes;
|
||||||
|
var responseValue: bytes;
|
||||||
|
: Result {
|
||||||
|
message.result_ = $$;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Try to parse two ASN1 entries if there are bytes left in the unit.
|
||||||
|
# Both are optional and identified by context specific tagging.
|
||||||
|
: ExtendedResponseEntry(self) if ( message.opLen > self.offset() );
|
||||||
|
: ExtendedResponseEntry(self) if ( message.opLen > self.offset() );
|
||||||
|
|
||||||
|
on %done {
|
||||||
|
# Client had requested StartTLS and it was successful? Switch to SSL.
|
||||||
|
if ( ctx.startTlsRequested && message.result_.code == ResultCode::SUCCESS )
|
||||||
|
ctx.messageMode = MessageMode::TLS;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
#-----------------------------------------------------------------------------
|
#-----------------------------------------------------------------------------
|
||||||
# IntermediateResponse Message
|
# IntermediateResponse Message
|
||||||
|
|
12
src/analyzer/protocol/ldap/ldap_zeek.spicy
Normal file
12
src/analyzer/protocol/ldap/ldap_zeek.spicy
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
module LDAP_Zeek;
|
||||||
|
|
||||||
|
import LDAP;
|
||||||
|
import zeek;
|
||||||
|
|
||||||
|
on LDAP::TlsForward::%init {
|
||||||
|
zeek::protocol_begin("SSL");
|
||||||
|
}
|
||||||
|
|
||||||
|
on LDAP::TlsForward::chunk {
|
||||||
|
zeek::protocol_data_in(zeek::is_orig(), self.chunk);
|
||||||
|
}
|
|
@ -195,7 +195,6 @@ event modbus_write_multiple_registers_response%(c: connection, headers: ModbusHe
|
||||||
##
|
##
|
||||||
## refs: A vector of reference records.
|
## refs: A vector of reference records.
|
||||||
event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileRecordRequests%);
|
event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileRecordRequests%);
|
||||||
event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references";
|
|
||||||
|
|
||||||
## Generated for a Modbus read file record response.
|
## Generated for a Modbus read file record response.
|
||||||
##
|
##
|
||||||
|
@ -207,7 +206,6 @@ event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders%) &
|
||||||
##
|
##
|
||||||
## refs: A vector of reference records.
|
## refs: A vector of reference records.
|
||||||
event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileRecordResponses%);
|
event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileRecordResponses%);
|
||||||
event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references";
|
|
||||||
|
|
||||||
## Generated for a Modbus write file record request.
|
## Generated for a Modbus write file record request.
|
||||||
##
|
##
|
||||||
|
@ -219,7 +217,6 @@ event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders%)
|
||||||
##
|
##
|
||||||
## refs: A vector of reference records.
|
## refs: A vector of reference records.
|
||||||
event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileReferences%);
|
event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileReferences%);
|
||||||
event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references";
|
|
||||||
|
|
||||||
## Generated for a Modbus write file record response.
|
## Generated for a Modbus write file record response.
|
||||||
##
|
##
|
||||||
|
@ -231,7 +228,6 @@ event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders%)
|
||||||
##
|
##
|
||||||
## refs: A vector of reference records.
|
## refs: A vector of reference records.
|
||||||
event modbus_write_file_record_response%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileReferences%);
|
event modbus_write_file_record_response%(c: connection, headers: ModbusHeaders, byte_count: count, refs: ModbusFileReferences%);
|
||||||
event modbus_write_file_record_response%(c: connection, headers: ModbusHeaders%) &deprecated="Remove in v7.1. Use the version that takes a byte_count and vector of references";
|
|
||||||
|
|
||||||
## Generated for a Modbus mask write register request.
|
## Generated for a Modbus mask write register request.
|
||||||
##
|
##
|
||||||
|
|
|
@ -84,6 +84,57 @@ event mysql_server_version%(c: connection, ver: string%);
|
||||||
##
|
##
|
||||||
## username: The username supplied by the client
|
## username: The username supplied by the client
|
||||||
##
|
##
|
||||||
## .. zeek:see:: mysql_command_request mysql_error mysql_ok mysql_server_version
|
## .. zeek:see:: mysql_command_request mysql_error mysql_ok mysql_server_version mysql_ssl_request
|
||||||
event mysql_handshake%(c: connection, username: string%);
|
event mysql_handshake%(c: connection, username: string%);
|
||||||
|
|
||||||
|
## Generated for a short client handshake response packet with the CLIENT_SSL
|
||||||
|
## flag set. Usually the client will initiate a TLS handshake afterwards.
|
||||||
|
#
|
||||||
|
## See the MySQL `documentation <http://dev.mysql.com/doc/internals/en/client-server-protocol.html>`__
|
||||||
|
## for more information about the MySQL protocol.
|
||||||
|
##
|
||||||
|
## c: The connection.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: mysql_handshake
|
||||||
|
event mysql_ssl_request%(c: connection%);
|
||||||
|
|
||||||
|
## Generated for information about plugin authentication within handshake packets.
|
||||||
|
##
|
||||||
|
## c: The connection.
|
||||||
|
##
|
||||||
|
## is_orig: True if this is from the client, false if from the server.
|
||||||
|
##
|
||||||
|
## name: Name of the authentication plugin.
|
||||||
|
##
|
||||||
|
## data: The initial auth data. From the server, it is the concatenation of
|
||||||
|
## auth_plugin_data_part_1 and auth_plugin_data_part_2 in the handshake.
|
||||||
|
## For the client it is the auth_response in the handshake response.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: mysql_handshake mysql_auth_switch_request mysql_auth_more_data
|
||||||
|
event mysql_auth_plugin%(c: connection, is_orig: bool, name: string, data: string%);
|
||||||
|
|
||||||
|
## Generated for a server packet with an auth switch request.
|
||||||
|
##
|
||||||
|
## c: The connection.
|
||||||
|
##
|
||||||
|
## name: The plugin name.
|
||||||
|
##
|
||||||
|
## data: Initial authentication data for the plugin.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: mysql_handshake mysql_auth_more_data
|
||||||
|
event mysql_auth_switch_request%(c: connection, name: string, data: string%);
|
||||||
|
|
||||||
|
## Generated for opaque authentication data exchanged between client and server
|
||||||
|
## after the client's handshake packet, but before the server replied with
|
||||||
|
## an OK_Packet
|
||||||
|
##
|
||||||
|
## Data is specific to the plugin auth mechanism used by client and server.
|
||||||
|
##
|
||||||
|
## c: The connection.
|
||||||
|
##
|
||||||
|
## is_orig: True if this is from the client, false if from the server.
|
||||||
|
##
|
||||||
|
## data: More authentication data.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: mysql_handshake mysql_auth_switch_request
|
||||||
|
event mysql_auth_more_data%(c: connection, is_orig: bool, data: string%);
|
||||||
|
|
|
@ -14,6 +14,28 @@ refine flow MySQL_Flow += {
|
||||||
connection()->zeek_analyzer()->Conn(),
|
connection()->zeek_analyzer()->Conn(),
|
||||||
zeek::make_intrusive<zeek::StringVal>(c_str(${msg.handshake9.server_version})));
|
zeek::make_intrusive<zeek::StringVal>(c_str(${msg.handshake9.server_version})));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( mysql_auth_plugin )
|
||||||
|
{
|
||||||
|
if ( ${msg.version} == 10 && (${msg.handshake10.capability_flags_2} << 16) & CLIENT_PLUGIN_AUTH )
|
||||||
|
{
|
||||||
|
auto auth_plugin = zeek::make_intrusive<zeek::StringVal>(c_str(${msg.handshake10.auth_plugin}));
|
||||||
|
auto data_part_1 = ${msg.handshake10.auth_plugin_data_part_1};
|
||||||
|
auto data_part_2 = ${msg.handshake10.auth_plugin_data_part_2};
|
||||||
|
std::vector<zeek::data_chunk_t> data_parts = {
|
||||||
|
zeek::data_chunk_t{data_part_1.length(), reinterpret_cast<const char*>(data_part_1.begin())},
|
||||||
|
zeek::data_chunk_t{data_part_2.length(), reinterpret_cast<const char*>(data_part_2.begin())},
|
||||||
|
};
|
||||||
|
auto data = zeek::make_intrusive<zeek::StringVal>(zeek::concatenate(data_parts));
|
||||||
|
|
||||||
|
zeek::BifEvent::enqueue_mysql_auth_plugin(connection()->zeek_analyzer(),
|
||||||
|
connection()->zeek_analyzer()->Conn(),
|
||||||
|
false /*is_orig*/,
|
||||||
|
std::move(auth_plugin),
|
||||||
|
std::move(data));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -23,23 +45,42 @@ refine flow MySQL_Flow += {
|
||||||
connection()->zeek_analyzer()->AnalyzerConfirmation();
|
connection()->zeek_analyzer()->AnalyzerConfirmation();
|
||||||
|
|
||||||
// If the client requested SSL and didn't provide credentials, switch to SSL
|
// If the client requested SSL and didn't provide credentials, switch to SSL
|
||||||
if ( ${msg.version} == 10 && ( ${msg.v10_response.cap_flags} & CLIENT_SSL ) && ${msg.v10_response.credentials}->empty() )
|
if ( ${msg.version} == 10 && ( ${msg.v10_response.cap_flags} & CLIENT_SSL ))
|
||||||
{
|
{
|
||||||
connection()->zeek_analyzer()->StartTLS();
|
connection()->zeek_analyzer()->StartTLS();
|
||||||
|
|
||||||
|
if ( mysql_ssl_request )
|
||||||
|
zeek::BifEvent::enqueue_mysql_ssl_request(connection()->zeek_analyzer(),
|
||||||
|
connection()->zeek_analyzer()->Conn());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( mysql_handshake )
|
if ( mysql_handshake )
|
||||||
{
|
{
|
||||||
if ( ${msg.version} == 10 && ${msg.v10_response.credentials}->size() > 0 )
|
if ( ${msg.version} == 10 )
|
||||||
zeek::BifEvent::enqueue_mysql_handshake(connection()->zeek_analyzer(),
|
zeek::BifEvent::enqueue_mysql_handshake(connection()->zeek_analyzer(),
|
||||||
connection()->zeek_analyzer()->Conn(),
|
connection()->zeek_analyzer()->Conn(),
|
||||||
zeek::make_intrusive<zeek::StringVal>(c_str(${msg.v10_response.credentials[0].username})));
|
zeek::make_intrusive<zeek::StringVal>(c_str(${msg.v10_response.plain.credentials.username})));
|
||||||
if ( ${msg.version} == 9 )
|
if ( ${msg.version} == 9 )
|
||||||
zeek::BifEvent::enqueue_mysql_handshake(connection()->zeek_analyzer(),
|
zeek::BifEvent::enqueue_mysql_handshake(connection()->zeek_analyzer(),
|
||||||
connection()->zeek_analyzer()->Conn(),
|
connection()->zeek_analyzer()->Conn(),
|
||||||
zeek::make_intrusive<zeek::StringVal>(c_str(${msg.v9_response.username})));
|
zeek::make_intrusive<zeek::StringVal>(c_str(${msg.v9_response.username})));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( mysql_auth_plugin )
|
||||||
|
{
|
||||||
|
if ( ${msg.version} == 10 && ${msg.v10_response.plain.cap_flags} & CLIENT_PLUGIN_AUTH )
|
||||||
|
{
|
||||||
|
auto auth_plugin = zeek::make_intrusive<zeek::StringVal>(c_str(${msg.v10_response.plain.auth_plugin}));
|
||||||
|
auto data = to_stringval(${msg.v10_response.plain.credentials.password.val});
|
||||||
|
zeek::BifEvent::enqueue_mysql_auth_plugin(connection()->zeek_analyzer(),
|
||||||
|
connection()->zeek_analyzer()->Conn(),
|
||||||
|
true /*is_orig*/,
|
||||||
|
std::move(auth_plugin),
|
||||||
|
std::move(data));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -83,8 +124,8 @@ refine flow MySQL_Flow += {
|
||||||
|
|
||||||
function proc_resultset(msg: Resultset): bool
|
function proc_resultset(msg: Resultset): bool
|
||||||
%{
|
%{
|
||||||
if ( ${msg.is_eof} )
|
if ( ${msg.is_eof_or_ok} )
|
||||||
return true; // Raised through proc_eof_packet()
|
return true; // Raised through proc_eof_packet() or proc_ok_packet()
|
||||||
|
|
||||||
if ( ! mysql_result_row )
|
if ( ! mysql_result_row )
|
||||||
return true;
|
return true;
|
||||||
|
@ -112,6 +153,24 @@ refine flow MySQL_Flow += {
|
||||||
return true;
|
return true;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
function proc_auth_switch_request(msg: AuthSwitchRequest): bool
|
||||||
|
%{
|
||||||
|
zeek::BifEvent::enqueue_mysql_auth_switch_request(connection()->zeek_analyzer(),
|
||||||
|
connection()->zeek_analyzer()->Conn(),
|
||||||
|
zeek::make_intrusive<zeek::StringVal>(c_str(${msg.name})),
|
||||||
|
to_stringval(${msg.data}));
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function proc_auth_more_data(msg: AuthMoreData): bool
|
||||||
|
%{
|
||||||
|
zeek::BifEvent::enqueue_mysql_auth_more_data(connection()->zeek_analyzer(),
|
||||||
|
connection()->zeek_analyzer()->Conn(),
|
||||||
|
${is_orig},
|
||||||
|
to_stringval(${msg.data}));
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
refine typeattr Initial_Handshake_Packet += &let {
|
refine typeattr Initial_Handshake_Packet += &let {
|
||||||
|
@ -141,3 +200,11 @@ refine typeattr EOF_Packet += &let {
|
||||||
refine typeattr Resultset += &let {
|
refine typeattr Resultset += &let {
|
||||||
proc = $context.flow.proc_resultset(this);
|
proc = $context.flow.proc_resultset(this);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
refine typeattr AuthSwitchRequest += &let {
|
||||||
|
proc = $context.flow.proc_auth_switch_request(this);
|
||||||
|
};
|
||||||
|
|
||||||
|
refine typeattr AuthMoreData += &let {
|
||||||
|
proc = $context.flow.proc_auth_more_data(this);
|
||||||
|
};
|
||||||
|
|
|
@ -140,6 +140,11 @@ enum state {
|
||||||
COMMAND_PHASE = 1,
|
COMMAND_PHASE = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum ConnectionExpected {
|
||||||
|
EXPECT_HANDSHAKE,
|
||||||
|
EXPECT_AUTH_DATA,
|
||||||
|
};
|
||||||
|
|
||||||
enum Expected {
|
enum Expected {
|
||||||
NO_EXPECTATION,
|
NO_EXPECTATION,
|
||||||
EXPECT_STATUS,
|
EXPECT_STATUS,
|
||||||
|
@ -158,12 +163,133 @@ enum EOFType {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum Client_Capabilities {
|
enum Client_Capabilities {
|
||||||
|
CLIENT_CONNECT_WITH_DB = 0x00000008,
|
||||||
CLIENT_SSL = 0x00000800,
|
CLIENT_SSL = 0x00000800,
|
||||||
|
CLIENT_PLUGIN_AUTH = 0x00080000,
|
||||||
|
CLIENT_CONNECT_ATTRS = 0x00100000,
|
||||||
# Expects an OK (instead of EOF) after the resultset rows of a Text Resultset.
|
# Expects an OK (instead of EOF) after the resultset rows of a Text Resultset.
|
||||||
CLIENT_DEPRECATE_EOF = 0x01000000,
|
CLIENT_DEPRECATE_EOF = 0x01000000,
|
||||||
|
CLIENT_ZSTD_COMPRESSION_ALGORITHM = 0x04000000,
|
||||||
|
CLIENT_QUERY_ATTRIBUTES = 0x08000000,
|
||||||
|
};
|
||||||
|
|
||||||
|
# Binary Protocol Resultset encoding.
|
||||||
|
#
|
||||||
|
# https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_binary_resultset.html
|
||||||
|
#
|
||||||
|
# Values taken from here: https://dev.mysql.com/doc/dev/mysql-server/latest/namespaceclassic__protocol_1_1field__type.html
|
||||||
|
enum field_types {
|
||||||
|
TYPE_DECIMAL = 0x00,
|
||||||
|
TYPE_TINY = 0x01,
|
||||||
|
TYPE_SHORT = 0x02,
|
||||||
|
TYPE_LONG = 0x03,
|
||||||
|
TYPE_FLOAT = 0x04,
|
||||||
|
TYPE_DOUBLE = 0x05,
|
||||||
|
TYPE_NULL = 0x06,
|
||||||
|
TYPE_TIMESTAMP = 0x07,
|
||||||
|
TYPE_LONGLONG = 0x08,
|
||||||
|
TYPE_INT24 = 0x09,
|
||||||
|
TYPE_DATE = 0x0a,
|
||||||
|
TYPE_TIME = 0x0b,
|
||||||
|
TYPE_DATETIME = 0x0c,
|
||||||
|
TYPE_YEAR = 0x0d,
|
||||||
|
TYPE_VARCHAR = 0x0f,
|
||||||
|
TYPE_BIT = 0x10,
|
||||||
|
TYPE_TIMESTAMP2 = 0x11,
|
||||||
|
TYPE_JSON = 0xf5,
|
||||||
|
TYPE_NEWDECIMAL = 0xf6,
|
||||||
|
TYPE_ENUM = 0xf7,
|
||||||
|
TYPE_SET = 0xf8,
|
||||||
|
TYPE_TINYBLOB = 0xf9,
|
||||||
|
TYPE_MEDIUMBLOB = 0xfa,
|
||||||
|
TYPE_LONGBLOB = 0xfb,
|
||||||
|
TYPE_BLOB = 0xfc,
|
||||||
|
TYPE_VARSTRING = 0xfd,
|
||||||
|
TYPE_STRING = 0xfe,
|
||||||
|
TYPE_GEOMETRY = 0xff,
|
||||||
|
};
|
||||||
|
|
||||||
|
type Date = record {
|
||||||
|
year : int16;
|
||||||
|
month: int8;
|
||||||
|
day : int8;
|
||||||
|
};
|
||||||
|
|
||||||
|
type Time = record {
|
||||||
|
hour : int8;
|
||||||
|
minute: int8;
|
||||||
|
second: int8;
|
||||||
|
};
|
||||||
|
|
||||||
|
type BinaryDate = record {
|
||||||
|
len: uint8 &enforce(len == 0 || len == 4 || len == 7 || len == 11);
|
||||||
|
have_date: case ( len > 0 ) of {
|
||||||
|
true -> date : Date;
|
||||||
|
false -> none_1: empty;
|
||||||
|
};
|
||||||
|
have_time: case ( len > 4 ) of {
|
||||||
|
true -> time : Time;
|
||||||
|
false -> none_2: empty;
|
||||||
|
};
|
||||||
|
have_micros: case ( len > 7 ) of {
|
||||||
|
true -> micros: int32;
|
||||||
|
false -> none_3: empty;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
type DurationTime = record {
|
||||||
|
is_negative: int8 &enforce(is_negative == 0 || is_negative == 1);
|
||||||
|
days : int32;
|
||||||
|
time : Time;
|
||||||
|
};
|
||||||
|
|
||||||
|
type BinaryTime = record {
|
||||||
|
len: uint8 &enforce(len == 0 || len == 8 || len == 12);
|
||||||
|
have_time: case ( len > 0 ) of {
|
||||||
|
true -> time : DurationTime;
|
||||||
|
false -> none_1: empty;
|
||||||
|
};
|
||||||
|
have_micros: case ( len > 8 ) of {
|
||||||
|
true -> micros: int32;
|
||||||
|
false -> none_2: empty;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
type BinaryValue(type: uint16) = record {
|
||||||
|
value: case ( type ) of {
|
||||||
|
TYPE_DECIMAL -> decimal_val: LengthEncodedInteger;
|
||||||
|
TYPE_TINY -> tiny_val: int8;
|
||||||
|
TYPE_SHORT -> short_val: int16;
|
||||||
|
TYPE_LONG -> long_val: int32;
|
||||||
|
TYPE_FLOAT -> float_val: bytestring &length=4;
|
||||||
|
TYPE_DOUBLE -> double_val: bytestring &length=8;
|
||||||
|
TYPE_NULL -> null_val: empty; # in null_bitmap
|
||||||
|
TYPE_TIMESTAMP -> timestamp_val: BinaryDate;
|
||||||
|
TYPE_LONGLONG -> longlong_val: int64;
|
||||||
|
TYPE_INT24 -> int24_val: int32;
|
||||||
|
TYPE_DATE -> date_val: BinaryDate;
|
||||||
|
TYPE_TIME -> time_val: BinaryTime;
|
||||||
|
TYPE_DATETIME -> datetime_val: BinaryDate;
|
||||||
|
TYPE_YEAR -> year_val: int16;
|
||||||
|
TYPE_VARCHAR -> varchar_val: LengthEncodedString;
|
||||||
|
TYPE_BIT -> bit_val: LengthEncodedString;
|
||||||
|
TYPE_TIMESTAMP2 -> timestamp2_val: BinaryDate;
|
||||||
|
TYPE_JSON -> json_val: LengthEncodedString;
|
||||||
|
TYPE_NEWDECIMAL -> newdecimal_val: LengthEncodedString;
|
||||||
|
TYPE_ENUM -> enum_val: LengthEncodedString;
|
||||||
|
TYPE_SET -> set_val: LengthEncodedString;
|
||||||
|
TYPE_TINYBLOB -> tinyblob_val: LengthEncodedString;
|
||||||
|
TYPE_MEDIUMBLOB -> mediumblob_val: LengthEncodedString;
|
||||||
|
TYPE_LONGBLOB -> longblob_val: LengthEncodedString;
|
||||||
|
TYPE_BLOB -> blob_val: LengthEncodedString;
|
||||||
|
TYPE_VARSTRING -> varstring_val: LengthEncodedString;
|
||||||
|
TYPE_STRING -> string_val: LengthEncodedString;
|
||||||
|
TYPE_GEOMETRY -> geometry_val: LengthEncodedString;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
type NUL_String = RE/[^\0]*\0/;
|
type NUL_String = RE/[^\0]*\0/;
|
||||||
|
type EmptyOrNUL_String = RE/([^\0]*\0)?/;
|
||||||
|
|
||||||
# MySQL PDU
|
# MySQL PDU
|
||||||
|
|
||||||
|
@ -193,7 +319,7 @@ type Server_Message(seq_id: uint8, pkt_len: uint32) = case is_initial of {
|
||||||
};
|
};
|
||||||
|
|
||||||
type Client_Message(state: int) = case state of {
|
type Client_Message(state: int) = case state of {
|
||||||
CONNECTION_PHASE -> connection_phase: Handshake_Response_Packet;
|
CONNECTION_PHASE -> connection_phase: Connection_Phase_Packets;
|
||||||
COMMAND_PHASE -> command_phase : Command_Request_Packet;
|
COMMAND_PHASE -> command_phase : Command_Request_Packet;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -219,8 +345,24 @@ type Handshake_v10 = record {
|
||||||
character_set : uint8;
|
character_set : uint8;
|
||||||
status_flags : uint16;
|
status_flags : uint16;
|
||||||
capability_flags_2 : uint16;
|
capability_flags_2 : uint16;
|
||||||
auth_plugin_data_len : uint8;
|
auth_plugin_data_len : uint8 &enforce( auth_plugin_data_len==0 || auth_plugin_data_len >= 21);
|
||||||
auth_plugin_name : NUL_String;
|
reserved : padding[10];
|
||||||
|
auth_plugin_data_part_2: bytestring &length=auth_plugin_data_part_2_len;
|
||||||
|
have_plugin : case ( ( capability_flags_2 << 16 ) & CLIENT_PLUGIN_AUTH ) of {
|
||||||
|
CLIENT_PLUGIN_AUTH -> auth_plugin: NUL_String;
|
||||||
|
0x0 -> none : empty;
|
||||||
|
};
|
||||||
|
} &let {
|
||||||
|
# The length of auth_plugin_data_part_2 is at least 13 bytes,
|
||||||
|
# or auth_plugin_data_len - 8 if that is larger, check for
|
||||||
|
# auth_plugin_data_len > 21 (8 + 13) to prevent underflow for
|
||||||
|
# when subtracting 8.
|
||||||
|
#
|
||||||
|
# https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_handshake_v10.html
|
||||||
|
auth_plugin_data_part_2_len = auth_plugin_data_len > 21 ? auth_plugin_data_len - 8 : 13;
|
||||||
|
update_auth_plugin: bool = $context.connection.set_auth_plugin(auth_plugin)
|
||||||
|
&if( ( capability_flags_2 << 16 ) & CLIENT_PLUGIN_AUTH );
|
||||||
|
server_query_attrs: bool = $context.connection.set_server_query_attrs(( capability_flags_2 << 16 ) & CLIENT_QUERY_ATTRIBUTES);
|
||||||
};
|
};
|
||||||
|
|
||||||
type Handshake_v9 = record {
|
type Handshake_v9 = record {
|
||||||
|
@ -240,7 +382,45 @@ type Handshake_Response_Packet = case $context.connection.get_version() of {
|
||||||
|
|
||||||
type Handshake_Credentials_v10 = record {
|
type Handshake_Credentials_v10 = record {
|
||||||
username : NUL_String;
|
username : NUL_String;
|
||||||
password : bytestring &restofdata;
|
password : LengthEncodedString;
|
||||||
|
};
|
||||||
|
|
||||||
|
type Connection_Attribute = record {
|
||||||
|
name : LengthEncodedString;
|
||||||
|
value : LengthEncodedString;
|
||||||
|
};
|
||||||
|
|
||||||
|
type Handshake_Connection_Attributes = record {
|
||||||
|
length : uint8;
|
||||||
|
attrs : Connection_Attribute[] &until($input.length() == 0);
|
||||||
|
} &length = length+1;
|
||||||
|
|
||||||
|
type Handshake_Plain_v10(cap_flags: uint32) = record {
|
||||||
|
credentials: Handshake_Credentials_v10;
|
||||||
|
have_db : case ( cap_flags & CLIENT_CONNECT_WITH_DB ) of {
|
||||||
|
CLIENT_CONNECT_WITH_DB -> database: NUL_String;
|
||||||
|
0x0 -> none_1 : empty;
|
||||||
|
};
|
||||||
|
have_plugin : case ( cap_flags & CLIENT_PLUGIN_AUTH ) of {
|
||||||
|
CLIENT_PLUGIN_AUTH -> auth_plugin: EmptyOrNUL_String;
|
||||||
|
0x0 -> none_2 : empty;
|
||||||
|
};
|
||||||
|
have_attrs : case ( cap_flags & CLIENT_CONNECT_ATTRS ) of {
|
||||||
|
CLIENT_CONNECT_ATTRS -> conn_attrs: Handshake_Connection_Attributes;
|
||||||
|
0x0 -> none_3 : empty;
|
||||||
|
};
|
||||||
|
have_zstd : case ( cap_flags & CLIENT_ZSTD_COMPRESSION_ALGORITHM ) of {
|
||||||
|
CLIENT_ZSTD_COMPRESSION_ALGORITHM -> zstd_compression_level: uint8;
|
||||||
|
0x0 -> none_4 : empty;
|
||||||
|
};
|
||||||
|
} &let {
|
||||||
|
update_auth_plugin: bool = $context.connection.set_auth_plugin(auth_plugin)
|
||||||
|
&if( cap_flags & CLIENT_PLUGIN_AUTH );
|
||||||
|
|
||||||
|
# Switch client state into expecting more auth data. If the server responds
|
||||||
|
# with an OK_Packet before, will switch into COMMAND_PHASE.
|
||||||
|
update_conn_expectation: bool = $context.connection.set_next_conn_expected(EXPECT_AUTH_DATA)
|
||||||
|
&if( cap_flags & CLIENT_PLUGIN_AUTH );
|
||||||
};
|
};
|
||||||
|
|
||||||
type Handshake_Response_Packet_v10 = record {
|
type Handshake_Response_Packet_v10 = record {
|
||||||
|
@ -248,9 +428,13 @@ type Handshake_Response_Packet_v10 = record {
|
||||||
max_pkt_size: uint32;
|
max_pkt_size: uint32;
|
||||||
char_set : uint8;
|
char_set : uint8;
|
||||||
pad : padding[23];
|
pad : padding[23];
|
||||||
credentials : Handshake_Credentials_v10[] &until($input.length() == 0);
|
use_ssl : case ( cap_flags & CLIENT_SSL ) of {
|
||||||
|
CLIENT_SSL -> none : empty;
|
||||||
|
default -> plain: Handshake_Plain_v10(cap_flags);
|
||||||
|
};
|
||||||
} &let {
|
} &let {
|
||||||
deprecate_eof: bool = $context.connection.set_deprecate_eof(cap_flags & CLIENT_DEPRECATE_EOF);
|
deprecate_eof: bool = $context.connection.set_deprecate_eof(cap_flags & CLIENT_DEPRECATE_EOF);
|
||||||
|
client_query_attrs: bool = $context.connection.set_client_query_attrs(cap_flags & CLIENT_QUERY_ATTRIBUTES);
|
||||||
};
|
};
|
||||||
|
|
||||||
type Handshake_Response_Packet_v9 = record {
|
type Handshake_Response_Packet_v9 = record {
|
||||||
|
@ -258,17 +442,71 @@ type Handshake_Response_Packet_v9 = record {
|
||||||
max_pkt_size : uint24le;
|
max_pkt_size : uint24le;
|
||||||
username : NUL_String;
|
username : NUL_String;
|
||||||
auth_response: NUL_String;
|
auth_response: NUL_String;
|
||||||
have_db : case ( cap_flags & 0x8 ) of {
|
have_db : case ( cap_flags & CLIENT_CONNECT_WITH_DB ) of {
|
||||||
0x8 -> database: NUL_String;
|
CLIENT_CONNECT_WITH_DB -> database: NUL_String;
|
||||||
0x0 -> none : empty;
|
0x0 -> none : empty;
|
||||||
};
|
};
|
||||||
password : bytestring &restofdata;
|
password : bytestring &restofdata;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Connection Phase
|
||||||
|
|
||||||
|
type Connection_Phase_Packets = case $context.connection.get_conn_expectation() of {
|
||||||
|
EXPECT_HANDSHAKE -> handshake_resp: Handshake_Response_Packet;
|
||||||
|
EXPECT_AUTH_DATA -> auth_data: AuthMoreData(true);
|
||||||
|
};
|
||||||
|
|
||||||
|
# Query attribute handling for COM_QUERY
|
||||||
|
#
|
||||||
|
type AttributeTypeAndName = record {
|
||||||
|
type: uint8;
|
||||||
|
unsigned_flag: uint8;
|
||||||
|
name: LengthEncodedString;
|
||||||
|
};
|
||||||
|
|
||||||
|
type AttributeValue(is_null: bool, type: uint8) = record {
|
||||||
|
null: case is_null of {
|
||||||
|
false -> val: BinaryValue(type);
|
||||||
|
true -> null_val: empty;
|
||||||
|
};
|
||||||
|
} &let {
|
||||||
|
# Move parsing the next query attribute.
|
||||||
|
done = $context.connection.next_query_attr();
|
||||||
|
};
|
||||||
|
|
||||||
|
type Attributes(count: int) = record {
|
||||||
|
null_bitmap : bytestring &length=(count + 7) / 8;
|
||||||
|
send_types_to_server: uint8 &enforce(send_types_to_server == 1);
|
||||||
|
names : AttributeTypeAndName[count];
|
||||||
|
values : AttributeValue(
|
||||||
|
# Check if null_bitmap contains this attribute index. This
|
||||||
|
# will pass true if the attribute value is NULL and parsing
|
||||||
|
# skipped in AttributeValue above.
|
||||||
|
(null_bitmap[$context.connection.query_attr_idx() / 8] >> ($context.connection.query_attr_idx() % 8)) & 0x01,
|
||||||
|
names[$context.connection.query_attr_idx()].type
|
||||||
|
)[] &until($context.connection.query_attr_idx() >= count);
|
||||||
|
};
|
||||||
|
|
||||||
|
type Query_Attributes = record {
|
||||||
|
count : LengthEncodedInteger;
|
||||||
|
set_count: LengthEncodedInteger;
|
||||||
|
have_attr: case ( attr_count > 0 ) of {
|
||||||
|
true -> attrs: Attributes(attr_count);
|
||||||
|
false -> none: empty;
|
||||||
|
} &requires(new_query_attrs);
|
||||||
|
} &let {
|
||||||
|
attr_count: int = to_int()(count);
|
||||||
|
new_query_attrs = $context.connection.new_query_attrs();
|
||||||
|
};
|
||||||
|
|
||||||
# Command Request
|
# Command Request
|
||||||
|
|
||||||
type Command_Request_Packet = record {
|
type Command_Request_Packet = record {
|
||||||
command: uint8;
|
command: uint8;
|
||||||
|
attrs : case ( command == COM_QUERY && $context.connection.get_client_query_attrs() && $context.connection.get_server_query_attrs() ) of {
|
||||||
|
true -> query_attrs: Query_Attributes;
|
||||||
|
false -> none: empty;
|
||||||
|
};
|
||||||
arg : bytestring &restofdata;
|
arg : bytestring &restofdata;
|
||||||
} &let {
|
} &let {
|
||||||
update_expectation: bool = $context.connection.set_next_expected_from_command(command);
|
update_expectation: bool = $context.connection.set_next_expected_from_command(command);
|
||||||
|
@ -292,6 +530,10 @@ type Command_Response_Status = record {
|
||||||
pkt_type: uint8;
|
pkt_type: uint8;
|
||||||
response: case pkt_type of {
|
response: case pkt_type of {
|
||||||
0x00 -> data_ok: OK_Packet;
|
0x00 -> data_ok: OK_Packet;
|
||||||
|
# When still in the CONNECTION_PHASE, the server can reply
|
||||||
|
# with AuthMoreData which is 0x01 stuffed opaque payload.
|
||||||
|
# https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_auth_more_data.html
|
||||||
|
0x01 -> auth_more_data: AuthMoreData(false);
|
||||||
0xfe -> data_eof: EOF_Packet(EOF_END);
|
0xfe -> data_eof: EOF_Packet(EOF_END);
|
||||||
0xff -> data_err: ERR_Packet;
|
0xff -> data_err: ERR_Packet;
|
||||||
default -> unknown: empty;
|
default -> unknown: empty;
|
||||||
|
@ -326,22 +568,22 @@ type ColumnDefinition = record {
|
||||||
};
|
};
|
||||||
|
|
||||||
# Only used to indicate the end of a result, no intermediate eofs here.
|
# Only used to indicate the end of a result, no intermediate eofs here.
|
||||||
type EOFOrOK = case $context.connection.get_deprecate_eof() of {
|
# MySQL spec says "You must check whether the packet length is less than 9
|
||||||
|
# to make sure that it is a EOF_Packet packet" so the value of 13 here
|
||||||
|
# comes from that 9, plus a 4-byte header.
|
||||||
|
type EOFOrOK(pkt_len: uint32) = case ( $context.connection.get_deprecate_eof() || pkt_len > 13 ) of {
|
||||||
false -> eof: EOF_Packet(EOF_END);
|
false -> eof: EOF_Packet(EOF_END);
|
||||||
true -> ok: OK_Packet;
|
true -> ok: OK_Packet;
|
||||||
};
|
};
|
||||||
|
|
||||||
type ColumnDefinitionOrEOF(pkt_len: uint32) = record {
|
type ColumnDefinitionOrEOF(pkt_len: uint32) = record {
|
||||||
marker : uint8;
|
marker : uint8;
|
||||||
def_or_eof: case is_eof of {
|
def_or_eof: case is_eof_or_ok of {
|
||||||
true -> eof: EOFOrOK;
|
true -> eof: EOFOrOK(pkt_len);
|
||||||
false -> def: ColumnDefinition41(marker);
|
false -> def: ColumnDefinition41(marker);
|
||||||
} &requires(is_eof);
|
} &requires(is_eof_or_ok);
|
||||||
} &let {
|
} &let {
|
||||||
# MySQL spec says "You must check whether the packet length is less than 9
|
is_eof_or_ok: bool = (marker == 0xfe);
|
||||||
# to make sure that it is a EOF_Packet packet" so the value of 13 here
|
|
||||||
# comes from that 9, plus a 4-byte header.
|
|
||||||
is_eof: bool = (marker == 0xfe && pkt_len < 13);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -350,22 +592,19 @@ type EOFIfLegacyThenResultset(pkt_len: uint32) = case $context.connection.get_de
|
||||||
true -> resultset: Resultset(pkt_len);
|
true -> resultset: Resultset(pkt_len);
|
||||||
} &let {
|
} &let {
|
||||||
update_result_seen: bool = $context.connection.set_results_seen(0);
|
update_result_seen: bool = $context.connection.set_results_seen(0);
|
||||||
update_expectation: bool = $context.connection.set_next_expected(EXPECT_RESULTSET);
|
update_expectation: bool = $context.connection.set_next_expected(EXPECT_RESULTSET) &if( ! $context.connection.get_deprecate_eof() );
|
||||||
};
|
};
|
||||||
|
|
||||||
type Resultset(pkt_len: uint32) = record {
|
type Resultset(pkt_len: uint32) = record {
|
||||||
marker : uint8;
|
marker : uint8;
|
||||||
row_or_eof: case is_eof of {
|
row_or_eof: case is_eof_or_ok of {
|
||||||
true -> eof: EOFOrOK;
|
true -> eof: EOFOrOK(pkt_len);
|
||||||
false -> row: ResultsetRow(marker);
|
false -> row: ResultsetRow(marker);
|
||||||
} &requires(is_eof);
|
} &requires(is_eof_or_ok);
|
||||||
} &let {
|
} &let {
|
||||||
# MySQL spec says "You must check whether the packet length is less than 9
|
is_eof_or_ok : bool = (marker == 0xfe);
|
||||||
# to make sure that it is a EOF_Packet packet" so the value of 13 here
|
|
||||||
# comes from that 9, plus a 4-byte header.
|
|
||||||
is_eof : bool = (marker == 0xfe && pkt_len < 13);
|
|
||||||
update_result_seen: bool = $context.connection.inc_results_seen();
|
update_result_seen: bool = $context.connection.inc_results_seen();
|
||||||
update_expectation: bool = $context.connection.set_next_expected(is_eof ? NO_EXPECTATION : EXPECT_RESULTSET);
|
update_expectation: bool = $context.connection.set_next_expected(is_eof_or_ok ? NO_EXPECTATION : EXPECT_RESULTSET);
|
||||||
};
|
};
|
||||||
|
|
||||||
type ResultsetRow(first_byte: uint8) = record {
|
type ResultsetRow(first_byte: uint8) = record {
|
||||||
|
@ -389,10 +628,20 @@ type ColumnDefinition41(first_byte: uint8) = record {
|
||||||
filler : padding[2];
|
filler : padding[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Opaque auth data exchanged during the connection phase between client and server.
|
||||||
|
type AuthMoreData(is_orig: bool) = record {
|
||||||
|
data : bytestring &restofdata;
|
||||||
|
};
|
||||||
|
|
||||||
type AuthSwitchRequest = record {
|
type AuthSwitchRequest = record {
|
||||||
status: uint8;
|
status: uint8 &enforce(status==254);
|
||||||
name : NUL_String;
|
name : NUL_String;
|
||||||
data : bytestring &restofdata;
|
data : bytestring &restofdata;
|
||||||
|
} &let {
|
||||||
|
update_auth_plugin : bool = $context.connection.set_auth_plugin(name);
|
||||||
|
update_conn_expectation: bool = $context.connection.set_next_conn_expected(EXPECT_AUTH_DATA);
|
||||||
|
# After an AuthSwitchRequest, server replies with OK_Packet, ERR_Packet or AuthMoreData.
|
||||||
|
update_expectation: bool = $context.connection.set_next_expected(EXPECT_STATUS);
|
||||||
};
|
};
|
||||||
|
|
||||||
type ColumnDefinition320 = record {
|
type ColumnDefinition320 = record {
|
||||||
|
@ -440,10 +689,15 @@ refine connection MySQL_Conn += {
|
||||||
uint8 previous_seq_id_;
|
uint8 previous_seq_id_;
|
||||||
int state_;
|
int state_;
|
||||||
Expected expected_;
|
Expected expected_;
|
||||||
|
ConnectionExpected conn_expected_;
|
||||||
uint32 col_count_;
|
uint32 col_count_;
|
||||||
uint32 remaining_cols_;
|
uint32 remaining_cols_;
|
||||||
uint32 results_seen_;
|
uint32 results_seen_;
|
||||||
bool deprecate_eof_;
|
bool deprecate_eof_;
|
||||||
|
bool server_query_attrs_;
|
||||||
|
bool client_query_attrs_;
|
||||||
|
std::string auth_plugin_;
|
||||||
|
int query_attr_idx_;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
%init{
|
%init{
|
||||||
|
@ -451,10 +705,14 @@ refine connection MySQL_Conn += {
|
||||||
previous_seq_id_ = 0;
|
previous_seq_id_ = 0;
|
||||||
state_ = CONNECTION_PHASE;
|
state_ = CONNECTION_PHASE;
|
||||||
expected_ = EXPECT_STATUS;
|
expected_ = EXPECT_STATUS;
|
||||||
|
conn_expected_ = EXPECT_HANDSHAKE;
|
||||||
col_count_ = 0;
|
col_count_ = 0;
|
||||||
remaining_cols_ = 0;
|
remaining_cols_ = 0;
|
||||||
results_seen_ = 0;
|
results_seen_ = 0;
|
||||||
deprecate_eof_ = false;
|
deprecate_eof_ = false;
|
||||||
|
server_query_attrs_ = false;
|
||||||
|
client_query_attrs_ = false;
|
||||||
|
query_attr_idx_ = 0;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
function get_version(): uint8
|
function get_version(): uint8
|
||||||
|
@ -487,6 +745,10 @@ refine connection MySQL_Conn += {
|
||||||
function update_state(s: state): bool
|
function update_state(s: state): bool
|
||||||
%{
|
%{
|
||||||
state_ = s;
|
state_ = s;
|
||||||
|
|
||||||
|
if ( s == COMMAND_PHASE )
|
||||||
|
conn_expected_ = EXPECT_HANDSHAKE; // Reset connection phase expectation
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -501,6 +763,41 @@ refine connection MySQL_Conn += {
|
||||||
return true;
|
return true;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
function get_server_query_attrs(): bool
|
||||||
|
%{
|
||||||
|
return server_query_attrs_;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function set_server_query_attrs(q: bool): bool
|
||||||
|
%{
|
||||||
|
server_query_attrs_ = q;
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function get_client_query_attrs(): bool
|
||||||
|
%{
|
||||||
|
return client_query_attrs_;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function set_client_query_attrs(q: bool): bool
|
||||||
|
%{
|
||||||
|
client_query_attrs_ = q;
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function set_auth_plugin(a: bytestring): bool
|
||||||
|
%{
|
||||||
|
// binpac::std_str() includes trailing \0 from parsing.
|
||||||
|
auto new_auth_plugin = std::string(binpac::c_str(a));
|
||||||
|
if ( ! auth_plugin_.empty() && new_auth_plugin != auth_plugin_ )
|
||||||
|
{
|
||||||
|
expected_ = EXPECT_AUTH_SWITCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
auth_plugin_ = std::move(new_auth_plugin);
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
|
|
||||||
function get_expectation(): Expected
|
function get_expectation(): Expected
|
||||||
%{
|
%{
|
||||||
return expected_;
|
return expected_;
|
||||||
|
@ -512,6 +809,17 @@ refine connection MySQL_Conn += {
|
||||||
return true;
|
return true;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
function get_conn_expectation(): ConnectionExpected
|
||||||
|
%{
|
||||||
|
return conn_expected_;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function set_next_conn_expected(c: ConnectionExpected): bool
|
||||||
|
%{
|
||||||
|
conn_expected_ = c;
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
|
|
||||||
function set_next_expected_from_command(cmd: uint8): bool
|
function set_next_expected_from_command(cmd: uint8): bool
|
||||||
%{
|
%{
|
||||||
switch ( cmd ) {
|
switch ( cmd ) {
|
||||||
|
@ -662,4 +970,21 @@ refine connection MySQL_Conn += {
|
||||||
++results_seen_;
|
++results_seen_;
|
||||||
return true;
|
return true;
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
function query_attr_idx(): int
|
||||||
|
%{
|
||||||
|
return query_attr_idx_;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function new_query_attrs(): bool
|
||||||
|
%{
|
||||||
|
query_attr_idx_ = 0;
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
|
|
||||||
|
function next_query_attr(): bool
|
||||||
|
%{
|
||||||
|
query_attr_idx_++;
|
||||||
|
return true;
|
||||||
|
%}
|
||||||
};
|
};
|
||||||
|
|
|
@ -413,7 +413,7 @@ type SMB2_error_response(header: SMB2_Header) = record {
|
||||||
byte_count : uint32;
|
byte_count : uint32;
|
||||||
# This is implemented incorrectly and is disabled for now.
|
# This is implemented incorrectly and is disabled for now.
|
||||||
#error_data : SMB2_error_data(header, byte_count);
|
#error_data : SMB2_error_data(header, byte_count);
|
||||||
stuff : bytestring &restofdata &transient;
|
stuff : bytestring &length=byte_count &transient;
|
||||||
} &byteorder = littleendian;
|
} &byteorder = littleendian;
|
||||||
|
|
||||||
type SMB2_logoff_request(header: SMB2_Header) = record {
|
type SMB2_logoff_request(header: SMB2_Header) = record {
|
||||||
|
|
|
@ -340,6 +340,43 @@ void Manager::InitPostScript() {
|
||||||
bstate->subscriber.add_topic(broker::topic::store_events(), true);
|
bstate->subscriber.add_topic(broker::topic::store_events(), true);
|
||||||
|
|
||||||
InitializeBrokerStoreForwarding();
|
InitializeBrokerStoreForwarding();
|
||||||
|
|
||||||
|
num_peers_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "broker_peers", {}, "Current number of peers connected via broker", "",
|
||||||
|
[]() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = static_cast<double>(broker_mgr->peer_count);
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
num_stores_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "broker_stores", {}, "Current number of stores connected via broker", "",
|
||||||
|
[]() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = static_cast<double>(broker_mgr->data_stores.size());
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
num_pending_queries_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "broker_pending_queries", {}, "Current number of pending broker queries",
|
||||||
|
"", []() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = static_cast<double>(broker_mgr->pending_queries.size());
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
num_events_incoming_metric = telemetry_mgr->CounterInstance("zeek", "broker_incoming_events", {},
|
||||||
|
"Total number of incoming events via broker");
|
||||||
|
num_events_outgoing_metric = telemetry_mgr->CounterInstance("zeek", "broker_outgoing_events", {},
|
||||||
|
"Total number of outgoing events via broker");
|
||||||
|
num_logs_incoming_metric =
|
||||||
|
telemetry_mgr->CounterInstance("zeek", "broker_incoming_logs", {}, "Total number of incoming logs via broker");
|
||||||
|
num_logs_outgoing_metric =
|
||||||
|
telemetry_mgr->CounterInstance("zeek", "broker_outgoing_logs", {}, "Total number of outgoing logs via broker");
|
||||||
|
num_ids_incoming_metric =
|
||||||
|
telemetry_mgr->CounterInstance("zeek", "broker_incoming_ids", {}, "Total number of incoming ids via broker");
|
||||||
|
num_ids_outgoing_metric =
|
||||||
|
telemetry_mgr->CounterInstance("zeek", "broker_outgoing_ids", {}, "Total number of outgoing ids via broker");
|
||||||
}
|
}
|
||||||
|
|
||||||
void Manager::InitializeBrokerStoreForwarding() {
|
void Manager::InitializeBrokerStoreForwarding() {
|
||||||
|
@ -528,7 +565,7 @@ bool Manager::PublishEvent(string topic, std::string name, broker::vector args,
|
||||||
DBG_LOG(DBG_BROKER, "Publishing event: %s", RenderEvent(topic, name, args).c_str());
|
DBG_LOG(DBG_BROKER, "Publishing event: %s", RenderEvent(topic, name, args).c_str());
|
||||||
broker::zeek::Event ev(std::move(name), std::move(args), broker::to_timestamp(ts));
|
broker::zeek::Event ev(std::move(name), std::move(args), broker::to_timestamp(ts));
|
||||||
bstate->endpoint.publish(std::move(topic), ev.move_data());
|
bstate->endpoint.publish(std::move(topic), ev.move_data());
|
||||||
++statistics.num_events_outgoing;
|
num_events_outgoing_metric->Inc();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,7 +625,7 @@ bool Manager::PublishIdentifier(std::string topic, std::string id) {
|
||||||
broker::zeek::IdentifierUpdate msg(std::move(id), std::move(data.value_));
|
broker::zeek::IdentifierUpdate msg(std::move(id), std::move(data.value_));
|
||||||
DBG_LOG(DBG_BROKER, "Publishing id-update: %s", RenderMessage(topic, msg.as_data()).c_str());
|
DBG_LOG(DBG_BROKER, "Publishing id-update: %s", RenderMessage(topic, msg.as_data()).c_str());
|
||||||
bstate->endpoint.publish(std::move(topic), msg.move_data());
|
bstate->endpoint.publish(std::move(topic), msg.move_data());
|
||||||
++statistics.num_ids_outgoing;
|
num_ids_outgoing_metric->Inc();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -715,8 +752,10 @@ bool Manager::PublishLogWrite(EnumVal* stream, EnumVal* writer, string path, int
|
||||||
++lb.message_count;
|
++lb.message_count;
|
||||||
lb.msgs[topic].add(std::move(msg));
|
lb.msgs[topic].add(std::move(msg));
|
||||||
|
|
||||||
if ( lb.message_count >= log_batch_size )
|
if ( lb.message_count >= log_batch_size ) {
|
||||||
statistics.num_logs_outgoing += lb.Flush(bstate->endpoint, log_batch_size);
|
auto outgoing_logs = static_cast<double>(lb.Flush(bstate->endpoint, log_batch_size));
|
||||||
|
num_logs_outgoing_metric->Inc(outgoing_logs);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -746,7 +785,8 @@ size_t Manager::FlushLogBuffers() {
|
||||||
for ( auto& lb : log_buffers )
|
for ( auto& lb : log_buffers )
|
||||||
rval += lb.Flush(bstate->endpoint, log_batch_size);
|
rval += lb.Flush(bstate->endpoint, log_batch_size);
|
||||||
|
|
||||||
statistics.num_logs_outgoing += rval;
|
num_logs_outgoing_metric->Inc(rval);
|
||||||
|
|
||||||
return rval;
|
return rval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1141,7 +1181,7 @@ void Manager::ProcessMessage(std::string_view topic, broker::zeek::Event& ev) {
|
||||||
ts = run_state::network_time;
|
ts = run_state::network_time;
|
||||||
|
|
||||||
DBG_LOG(DBG_BROKER, "Process event: %s (%.6f) %s", c_str_safe(name).c_str(), ts, RenderMessage(args).c_str());
|
DBG_LOG(DBG_BROKER, "Process event: %s (%.6f) %s", c_str_safe(name).c_str(), ts, RenderMessage(args).c_str());
|
||||||
++statistics.num_events_incoming;
|
num_events_incoming_metric->Inc();
|
||||||
auto handler = event_registry->Lookup(name);
|
auto handler = event_registry->Lookup(name);
|
||||||
|
|
||||||
if ( ! handler )
|
if ( ! handler )
|
||||||
|
@ -1286,7 +1326,7 @@ bool Manager::ProcessMessage(std::string_view, broker::zeek::LogWrite& lw) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
++statistics.num_logs_incoming;
|
num_logs_incoming_metric->Inc();
|
||||||
auto&& stream_id_name = lw.stream_id().name;
|
auto&& stream_id_name = lw.stream_id().name;
|
||||||
|
|
||||||
// Get stream ID.
|
// Get stream ID.
|
||||||
|
@ -1352,7 +1392,7 @@ bool Manager::ProcessMessage(std::string_view, broker::zeek::IdentifierUpdate& i
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
++statistics.num_ids_incoming;
|
num_ids_incoming_metric->Inc();
|
||||||
auto id_name = c_str_safe(iu.id_name());
|
auto id_name = c_str_safe(iu.id_name());
|
||||||
auto id_value = convert_if_broker_variant_or_move(iu.id_value());
|
auto id_value = convert_if_broker_variant_or_move(iu.id_value());
|
||||||
const auto& id = zeek::detail::global_scope()->Find(id_name);
|
const auto& id = zeek::detail::global_scope()->Find(id_name);
|
||||||
|
@ -1706,7 +1746,12 @@ const Stats& Manager::GetStatistics() {
|
||||||
statistics.num_stores = data_stores.size();
|
statistics.num_stores = data_stores.size();
|
||||||
statistics.num_pending_queries = pending_queries.size();
|
statistics.num_pending_queries = pending_queries.size();
|
||||||
|
|
||||||
// The other attributes are set as activity happens.
|
statistics.num_events_incoming = static_cast<size_t>(num_events_incoming_metric->Value());
|
||||||
|
statistics.num_events_outgoing = static_cast<size_t>(num_events_outgoing_metric->Value());
|
||||||
|
statistics.num_logs_incoming = static_cast<size_t>(num_logs_incoming_metric->Value());
|
||||||
|
statistics.num_logs_outgoing = static_cast<size_t>(num_logs_outgoing_metric->Value());
|
||||||
|
statistics.num_ids_incoming = static_cast<size_t>(num_ids_incoming_metric->Value());
|
||||||
|
statistics.num_ids_outgoing = static_cast<size_t>(num_ids_outgoing_metric->Value());
|
||||||
|
|
||||||
return statistics;
|
return statistics;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,8 +27,11 @@ using VectorTypePtr = IntrusivePtr<VectorType>;
|
||||||
using TableValPtr = IntrusivePtr<TableVal>;
|
using TableValPtr = IntrusivePtr<TableVal>;
|
||||||
|
|
||||||
namespace telemetry {
|
namespace telemetry {
|
||||||
class Manager;
|
class Gauge;
|
||||||
}
|
class Counter;
|
||||||
|
using GaugePtr = std::shared_ptr<Gauge>;
|
||||||
|
using CounterPtr = std::shared_ptr<Counter>;
|
||||||
|
} // namespace telemetry
|
||||||
|
|
||||||
namespace detail {
|
namespace detail {
|
||||||
class Frame;
|
class Frame;
|
||||||
|
@ -451,6 +454,16 @@ private:
|
||||||
std::string zeek_table_db_directory;
|
std::string zeek_table_db_directory;
|
||||||
|
|
||||||
static int script_scope;
|
static int script_scope;
|
||||||
|
|
||||||
|
telemetry::GaugePtr num_peers_metric;
|
||||||
|
telemetry::GaugePtr num_stores_metric;
|
||||||
|
telemetry::GaugePtr num_pending_queries_metric;
|
||||||
|
telemetry::CounterPtr num_events_incoming_metric;
|
||||||
|
telemetry::CounterPtr num_events_outgoing_metric;
|
||||||
|
telemetry::CounterPtr num_logs_incoming_metric;
|
||||||
|
telemetry::CounterPtr num_logs_outgoing_metric;
|
||||||
|
telemetry::CounterPtr num_ids_incoming_metric;
|
||||||
|
telemetry::CounterPtr num_ids_outgoing_metric;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Broker
|
} // namespace Broker
|
||||||
|
|
|
@ -161,8 +161,9 @@ RecordValPtr X509::ParseCertificate(X509Val* cert_val, file_analysis::File* f) {
|
||||||
#if ( OPENSSL_VERSION_NUMBER < 0x10100000L )
|
#if ( OPENSSL_VERSION_NUMBER < 0x10100000L )
|
||||||
i2a_ASN1_OBJECT(bio, ssl_cert->sig_alg->algorithm);
|
i2a_ASN1_OBJECT(bio, ssl_cert->sig_alg->algorithm);
|
||||||
#else
|
#else
|
||||||
const X509_ALGOR* sigalg = X509_get0_tbs_sigalg(ssl_cert);
|
const ASN1_OBJECT* alg;
|
||||||
i2a_ASN1_OBJECT(bio, sigalg->algorithm);
|
X509_ALGOR_get0(&alg, NULL, NULL, X509_get0_tbs_sigalg(ssl_cert));
|
||||||
|
i2a_ASN1_OBJECT(bio, alg);
|
||||||
#endif
|
#endif
|
||||||
len = BIO_gets(bio, buf, sizeof(buf));
|
len = BIO_gets(bio, buf, sizeof(buf));
|
||||||
pX509Cert->Assign(13, make_intrusive<StringVal>(len, buf));
|
pX509Cert->Assign(13, make_intrusive<StringVal>(len, buf));
|
||||||
|
|
|
@ -107,6 +107,8 @@ add_generic_analyzer_fuzz_target(dhcp udp)
|
||||||
add_generic_analyzer_fuzz_target(dnp3_tcp)
|
add_generic_analyzer_fuzz_target(dnp3_tcp)
|
||||||
add_generic_analyzer_fuzz_target(dtls udp)
|
add_generic_analyzer_fuzz_target(dtls udp)
|
||||||
add_generic_analyzer_fuzz_target(irc)
|
add_generic_analyzer_fuzz_target(irc)
|
||||||
|
add_generic_analyzer_fuzz_target(ldap_udp udp)
|
||||||
|
add_generic_analyzer_fuzz_target(ldap_tcp tcp)
|
||||||
add_generic_analyzer_fuzz_target(modbus)
|
add_generic_analyzer_fuzz_target(modbus)
|
||||||
add_generic_analyzer_fuzz_target(mqtt)
|
add_generic_analyzer_fuzz_target(mqtt)
|
||||||
add_generic_analyzer_fuzz_target(mysql)
|
add_generic_analyzer_fuzz_target(mysql)
|
||||||
|
|
BIN
src/fuzzers/corpora/ldap_tcp-corpus.zip
Normal file
BIN
src/fuzzers/corpora/ldap_tcp-corpus.zip
Normal file
Binary file not shown.
BIN
src/fuzzers/corpora/ldap_udp-corpus.zip
Normal file
BIN
src/fuzzers/corpora/ldap_udp-corpus.zip
Normal file
Binary file not shown.
|
@ -18,12 +18,6 @@ BloomFilter::BloomFilter(const detail::Hasher* arg_hasher) { hasher = arg_hasher
|
||||||
|
|
||||||
BloomFilter::~BloomFilter() { delete hasher; }
|
BloomFilter::~BloomFilter() { delete hasher; }
|
||||||
|
|
||||||
broker::expected<broker::data> BloomFilter::Serialize() const {
|
|
||||||
if ( auto res = SerializeData() )
|
|
||||||
return zeek::detail::BrokerDataAccess::Unbox(*res);
|
|
||||||
return {broker::make_error(broker::ec::serialization_failed)};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<BrokerData> BloomFilter::SerializeData() const {
|
std::optional<BrokerData> BloomFilter::SerializeData() const {
|
||||||
auto h = hasher->Serialize();
|
auto h = hasher->Serialize();
|
||||||
|
|
||||||
|
@ -43,10 +37,6 @@ std::optional<BrokerData> BloomFilter::SerializeData() const {
|
||||||
return std::move(builder).Build();
|
return std::move(builder).Build();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<BloomFilter> BloomFilter::Unserialize(const broker::data& data) {
|
|
||||||
return UnserializeData(BrokerDataView{&data});
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<BloomFilter> BloomFilter::UnserializeData(BrokerDataView data) {
|
std::unique_ptr<BloomFilter> BloomFilter::UnserializeData(BrokerDataView data) {
|
||||||
if ( ! data.IsList() )
|
if ( ! data.IsList() )
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
|
@ -105,9 +105,6 @@ public:
|
||||||
*/
|
*/
|
||||||
virtual std::string InternalState() const = 0;
|
virtual std::string InternalState() const = 0;
|
||||||
|
|
||||||
[[deprecated("Remove in v7.1: use SerializeData instead")]] broker::expected<broker::data> Serialize() const;
|
|
||||||
[[deprecated("Remove in v7.1: use UnserializeData instead")]] static std::unique_ptr<BloomFilter> Unserialize(
|
|
||||||
const broker::data& data);
|
|
||||||
std::optional<BrokerData> SerializeData() const;
|
std::optional<BrokerData> SerializeData() const;
|
||||||
static std::unique_ptr<BloomFilter> UnserializeData(BrokerDataView data);
|
static std::unique_ptr<BloomFilter> UnserializeData(BrokerDataView data);
|
||||||
|
|
||||||
|
|
|
@ -196,22 +196,7 @@ rule_attr:
|
||||||
if ( is_event($2) )
|
if ( is_event($2) )
|
||||||
current_rule->AddAction(new zeek::detail::RuleActionEvent(nullptr, $2));
|
current_rule->AddAction(new zeek::detail::RuleActionEvent(nullptr, $2));
|
||||||
else
|
else
|
||||||
{
|
rules_error("identifier is not an event", $2);
|
||||||
const char *msg = id_to_str($2);
|
|
||||||
if ( ! zeek::util::streq(msg, "<error>") )
|
|
||||||
zeek::reporter->Deprecation(zeek::util::fmt("Remove in v7.1: Using an identifier for msg is deprecated (%s:%d)",
|
|
||||||
current_rule_file, rules_line_number+1));
|
|
||||||
current_rule->AddAction(new zeek::detail::RuleActionEvent(msg));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
| TOK_EVENT TOK_IDENT TOK_IDENT
|
|
||||||
{
|
|
||||||
// Maybe remove in v7.1: Once we do not support msg as identifier,
|
|
||||||
// this extra messaging isn't all that useful anymore, but it
|
|
||||||
// beats a syntax error.
|
|
||||||
rules_error("custom event and identifier for msg unsupported");
|
|
||||||
zeek::detail::rule_matcher->SetParseError();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
| TOK_EVENT TOK_IDENT TOK_STRING
|
| TOK_EVENT TOK_IDENT TOK_STRING
|
||||||
|
|
|
@ -131,13 +131,8 @@ void activate_bodies__CPP(const char* fn, const char* module, bool exported, Typ
|
||||||
events.insert(cs.events.begin(), cs.events.end());
|
events.insert(cs.events.begin(), cs.events.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
for ( const auto& e : events ) {
|
for ( const auto& e : events )
|
||||||
auto eh = event_registry->Register(e);
|
event_registry->Register(e);
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
eh->SetUsed();
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
IDPtr lookup_global__CPP(const char* g, const TypePtr& t, bool exported) {
|
IDPtr lookup_global__CPP(const char* g, const TypePtr& t, bool exported) {
|
||||||
|
@ -191,13 +186,8 @@ FuncValPtr lookup_func__CPP(string name, int num_bodies, vector<p_hash_type> has
|
||||||
// This might register the same event more than once,
|
// This might register the same event more than once,
|
||||||
// if it's used in multiple bodies, but that's okay as
|
// if it's used in multiple bodies, but that's okay as
|
||||||
// the semantics for Register explicitly allow it.
|
// the semantics for Register explicitly allow it.
|
||||||
for ( auto& e : f.events ) {
|
for ( auto& e : f.events )
|
||||||
auto eh = event_registry->Register(e);
|
event_registry->Register(e);
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
eh->SetUsed();
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto sf = make_intrusive<ScriptFunc>(std::move(name), std::move(ft), std::move(bodies), std::move(priorities));
|
auto sf = make_intrusive<ScriptFunc>(std::move(name), std::move(ft), std::move(bodies), std::move(priorities));
|
||||||
|
|
|
@ -1238,12 +1238,30 @@ ExprPtr EqExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
|
||||||
if ( IsHasElementsTest() )
|
if ( IsHasElementsTest() )
|
||||||
return BuildHasElementsTest()->Reduce(c, red_stmt);
|
return BuildHasElementsTest()->Reduce(c, red_stmt);
|
||||||
|
|
||||||
if ( GetType()->Tag() == TYPE_BOOL && same_singletons(op1, op2) ) {
|
if ( GetType()->Tag() == TYPE_BOOL ) {
|
||||||
|
if ( same_singletons(op1, op2) ) {
|
||||||
bool t = Tag() == EXPR_EQ;
|
bool t = Tag() == EXPR_EQ;
|
||||||
auto res = with_location_of(make_intrusive<ConstExpr>(val_mgr->Bool(t)), this);
|
auto res = with_location_of(make_intrusive<ConstExpr>(val_mgr->Bool(t)), this);
|
||||||
return res->Reduce(c, red_stmt);
|
return res->Reduce(c, red_stmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( op1->GetType()->Tag() == TYPE_BOOL ) {
|
||||||
|
if ( op1->Tag() == EXPR_CONST )
|
||||||
|
std::swap(op1, op2);
|
||||||
|
|
||||||
|
if ( op2->Tag() == EXPR_CONST ) {
|
||||||
|
bool t = Tag() == EXPR_EQ;
|
||||||
|
if ( op2->AsConstExpr()->Value()->IsZero() )
|
||||||
|
t = ! t;
|
||||||
|
if ( t )
|
||||||
|
return op1->Reduce(c, red_stmt);
|
||||||
|
|
||||||
|
auto res = with_location_of(make_intrusive<NotExpr>(op1), this);
|
||||||
|
return res->Reduce(c, red_stmt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return BinaryExpr::Reduce(c, red_stmt);
|
return BinaryExpr::Reduce(c, red_stmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -417,13 +417,8 @@ static void use_CPP() {
|
||||||
f.SetBody(b);
|
f.SetBody(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
for ( auto& e : s->second.events ) {
|
for ( auto& e : s->second.events )
|
||||||
auto h = event_registry->Register(e);
|
event_registry->Register(e);
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
h->SetUsed();
|
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
}
|
|
||||||
|
|
||||||
auto finish = s->second.finish_init_func;
|
auto finish = s->second.finish_init_func;
|
||||||
if ( finish )
|
if ( finish )
|
||||||
|
|
|
@ -245,6 +245,12 @@ StmtPtr IfStmt::DoReduce(Reducer* c) {
|
||||||
red_e_stmt = cond_red_stmt;
|
red_e_stmt = cond_red_stmt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check again for negation given above reductions/replacements.
|
||||||
|
if ( e->Tag() == EXPR_NOT ) {
|
||||||
|
std::swap(s1, s2);
|
||||||
|
e = e->GetOp1();
|
||||||
|
}
|
||||||
|
|
||||||
StmtPtr sl;
|
StmtPtr sl;
|
||||||
|
|
||||||
if ( e->IsConst() ) {
|
if ( e->IsConst() ) {
|
||||||
|
|
|
@ -46,9 +46,9 @@ public:
|
||||||
|
|
||||||
ProtocolMap::iterator InitCounters(const std::string& protocol) {
|
ProtocolMap::iterator InitCounters(const std::string& protocol) {
|
||||||
auto active_family =
|
auto active_family =
|
||||||
telemetry_mgr->GaugeFamily("zeek", "active-sessions", {"protocol"}, "Active Zeek Sessions");
|
telemetry_mgr->GaugeFamily("zeek", "active_sessions", {"protocol"}, "Active Zeek Sessions");
|
||||||
auto total_family =
|
auto total_family =
|
||||||
telemetry_mgr->CounterFamily("zeek", "total-sessions", {"protocol"}, "Total number of sessions");
|
telemetry_mgr->CounterFamily("zeek", "total_sessions", {"protocol"}, "Total number of sessions");
|
||||||
|
|
||||||
auto [it, inserted] = entries.insert({protocol, Protocol{active_family, total_family, protocol}});
|
auto [it, inserted] = entries.insert({protocol, Protocol{active_family, total_family, protocol}});
|
||||||
|
|
||||||
|
@ -75,7 +75,17 @@ private:
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
Manager::Manager() { stats = new detail::ProtocolStats(); }
|
Manager::Manager() {
|
||||||
|
stats = new detail::ProtocolStats();
|
||||||
|
ended_sessions_metric_family = telemetry_mgr->CounterFamily("zeek", "ended_sessions", {"reason"},
|
||||||
|
"Number of sessions ended for specific reasons");
|
||||||
|
ended_by_inactivity_metric =
|
||||||
|
ended_sessions_metric_family->GetOrAdd({{"reason", "inactivity"}}, []() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.counter.value = static_cast<double>(zeek::detail::killed_by_inactivity);
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
Manager::~Manager() {
|
Manager::~Manager() {
|
||||||
Clear();
|
Clear();
|
||||||
|
|
|
@ -13,6 +13,13 @@
|
||||||
|
|
||||||
namespace zeek {
|
namespace zeek {
|
||||||
|
|
||||||
|
namespace telemetry {
|
||||||
|
class CounterFamily;
|
||||||
|
using CounterFamilyPtr = std::shared_ptr<CounterFamily>;
|
||||||
|
class Counter;
|
||||||
|
using CounterPtr = std::shared_ptr<Counter>;
|
||||||
|
} // namespace telemetry
|
||||||
|
|
||||||
namespace detail {
|
namespace detail {
|
||||||
class PacketFilter;
|
class PacketFilter;
|
||||||
}
|
}
|
||||||
|
@ -82,7 +89,7 @@ public:
|
||||||
void Weird(const char* name, const Packet* pkt, const char* addl = "", const char* source = "");
|
void Weird(const char* name, const Packet* pkt, const char* addl = "", const char* source = "");
|
||||||
void Weird(const char* name, const IP_Hdr* ip, const char* addl = "");
|
void Weird(const char* name, const IP_Hdr* ip, const char* addl = "");
|
||||||
|
|
||||||
unsigned int CurrentSessions() { return session_map.size(); }
|
size_t CurrentSessions() { return session_map.size(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using SessionMap = std::unordered_map<detail::Key, Session*, detail::KeyHash>;
|
using SessionMap = std::unordered_map<detail::Key, Session*, detail::KeyHash>;
|
||||||
|
@ -96,6 +103,8 @@ private:
|
||||||
|
|
||||||
SessionMap session_map;
|
SessionMap session_map;
|
||||||
detail::ProtocolStats* stats;
|
detail::ProtocolStats* stats;
|
||||||
|
telemetry::CounterFamilyPtr ended_sessions_metric_family;
|
||||||
|
telemetry::CounterPtr ended_by_inactivity_metric;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace session
|
} // namespace session
|
||||||
|
|
|
@ -61,7 +61,6 @@ void Manager::registerSpicyModuleEnd() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Manager::registerProtocolAnalyzer(const std::string& name, hilti::rt::Protocol proto,
|
void Manager::registerProtocolAnalyzer(const std::string& name, hilti::rt::Protocol proto,
|
||||||
const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports,
|
|
||||||
const std::string& parser_orig, const std::string& parser_resp,
|
const std::string& parser_orig, const std::string& parser_resp,
|
||||||
const std::string& replaces, const std::string& linker_scope) {
|
const std::string& replaces, const std::string& linker_scope) {
|
||||||
SPICY_DEBUG(hilti::rt::fmt("Have Spicy protocol analyzer %s", name));
|
SPICY_DEBUG(hilti::rt::fmt("Have Spicy protocol analyzer %s", name));
|
||||||
|
@ -74,7 +73,6 @@ void Manager::registerProtocolAnalyzer(const std::string& name, hilti::rt::Proto
|
||||||
info.name_zeek = hilti::rt::replace(name, "::", "_");
|
info.name_zeek = hilti::rt::replace(name, "::", "_");
|
||||||
info.name_zeekygen = hilti::rt::fmt("<Spicy-%s>", name);
|
info.name_zeekygen = hilti::rt::fmt("<Spicy-%s>", name);
|
||||||
info.protocol = proto;
|
info.protocol = proto;
|
||||||
info.ports = ports;
|
|
||||||
info.linker_scope = linker_scope;
|
info.linker_scope = linker_scope;
|
||||||
|
|
||||||
// We may have that analyzer already iff it was previously pre-registered
|
// We may have that analyzer already iff it was previously pre-registered
|
||||||
|
@ -701,25 +699,6 @@ void Manager::InitPostScript() {
|
||||||
if ( ! tag )
|
if ( ! tag )
|
||||||
reporter->InternalError("cannot get analyzer tag for '%s'", p.name_analyzer.c_str());
|
reporter->InternalError("cannot get analyzer tag for '%s'", p.name_analyzer.c_str());
|
||||||
|
|
||||||
for ( const auto& ports : p.ports ) {
|
|
||||||
const auto proto = ports.begin.protocol();
|
|
||||||
|
|
||||||
// Port ranges are closed intervals.
|
|
||||||
for ( auto port = ports.begin.port(); port <= ports.end.port(); ++port ) {
|
|
||||||
const auto port_ = hilti::rt::Port(port, proto);
|
|
||||||
SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port_));
|
|
||||||
analyzer_mgr->RegisterAnalyzerForPort(tag, transport_protocol(port_), port);
|
|
||||||
|
|
||||||
// Don't double register in case of single-port ranges.
|
|
||||||
if ( ports.begin.port() == ports.end.port() )
|
|
||||||
break;
|
|
||||||
|
|
||||||
// Explicitly prevent overflow.
|
|
||||||
if ( port == std::numeric_limits<decltype(port)>::max() )
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( p.parser_resp ) {
|
if ( p.parser_resp ) {
|
||||||
for ( auto port : p.parser_resp->ports ) {
|
for ( auto port : p.parser_resp->ports ) {
|
||||||
if ( port.direction != ::spicy::rt::Direction::Both &&
|
if ( port.direction != ::spicy::rt::Direction::Both &&
|
||||||
|
|
|
@ -85,7 +85,6 @@ public:
|
||||||
*
|
*
|
||||||
* @param name name of the analyzer as defined in its EVT file
|
* @param name name of the analyzer as defined in its EVT file
|
||||||
* @param proto analyzer's transport-layer protocol
|
* @param proto analyzer's transport-layer protocol
|
||||||
* @param prts well-known ports for the analyzer; it'll be activated automatically for these
|
|
||||||
* @param parser_orig name of the Spicy parser for the originator side; must match the name that
|
* @param parser_orig name of the Spicy parser for the originator side; must match the name that
|
||||||
* Spicy registers the unit's parser with
|
* Spicy registers the unit's parser with
|
||||||
* @param parser_resp name of the Spicy parser for the originator side; must match the name that
|
* @param parser_resp name of the Spicy parser for the originator side; must match the name that
|
||||||
|
@ -95,10 +94,9 @@ public:
|
||||||
* @param linker_scope scope of current HLTO file, which will restrict visibility of the
|
* @param linker_scope scope of current HLTO file, which will restrict visibility of the
|
||||||
* registration
|
* registration
|
||||||
*/
|
*/
|
||||||
void registerProtocolAnalyzer(const std::string& name, hilti::rt::Protocol proto,
|
void registerProtocolAnalyzer(const std::string& name, hilti::rt::Protocol proto, const std::string& parser_orig,
|
||||||
const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports,
|
const std::string& parser_resp, const std::string& replaces,
|
||||||
const std::string& parser_orig, const std::string& parser_resp,
|
const std::string& linker_scope);
|
||||||
const std::string& replaces, const std::string& linker_scope);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runtime method to register a file analyzer with its Zeek-side
|
* Runtime method to register a file analyzer with its Zeek-side
|
||||||
|
@ -343,7 +341,6 @@ private:
|
||||||
std::string name_parser_resp;
|
std::string name_parser_resp;
|
||||||
std::string name_replaces;
|
std::string name_replaces;
|
||||||
hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef;
|
hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef;
|
||||||
hilti::rt::Vector<::zeek::spicy::rt::PortRange> ports;
|
|
||||||
std::string linker_scope;
|
std::string linker_scope;
|
||||||
|
|
||||||
// Computed and available once the analyzer has been registered.
|
// Computed and available once the analyzer has been registered.
|
||||||
|
@ -357,7 +354,7 @@ private:
|
||||||
bool operator==(const ProtocolAnalyzerInfo& other) const {
|
bool operator==(const ProtocolAnalyzerInfo& other) const {
|
||||||
return name_analyzer == other.name_analyzer && name_parser_orig == other.name_parser_orig &&
|
return name_analyzer == other.name_analyzer && name_parser_orig == other.name_parser_orig &&
|
||||||
name_parser_resp == other.name_parser_resp && name_replaces == other.name_replaces &&
|
name_parser_resp == other.name_parser_resp && name_replaces == other.name_replaces &&
|
||||||
protocol == other.protocol && ports == other.ports && linker_scope == other.linker_scope;
|
protocol == other.protocol && linker_scope == other.linker_scope;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator!=(const ProtocolAnalyzerInfo& other) const { return ! (*this == other); }
|
bool operator!=(const ProtocolAnalyzerInfo& other) const { return ! (*this == other); }
|
||||||
|
|
|
@ -26,12 +26,11 @@ void rt::register_spicy_module_begin(const std::string& name, const std::string&
|
||||||
|
|
||||||
void rt::register_spicy_module_end() { spicy_mgr->registerSpicyModuleEnd(); }
|
void rt::register_spicy_module_end() { spicy_mgr->registerSpicyModuleEnd(); }
|
||||||
|
|
||||||
void rt::register_protocol_analyzer(const std::string& name, hilti::rt::Protocol proto,
|
void rt::register_protocol_analyzer(const std::string& name, hilti::rt::Protocol proto, const std::string& parser_orig,
|
||||||
const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports,
|
const std::string& parser_resp, const std::string& replaces,
|
||||||
const std::string& parser_orig, const std::string& parser_resp,
|
const std::string& linker_scope) {
|
||||||
const std::string& replaces, const std::string& linker_scope) {
|
|
||||||
auto _ = hilti::rt::profiler::start("zeek/rt/register_protocol_analyzer");
|
auto _ = hilti::rt::profiler::start("zeek/rt/register_protocol_analyzer");
|
||||||
spicy_mgr->registerProtocolAnalyzer(name, proto, ports, parser_orig, parser_resp, replaces, linker_scope);
|
spicy_mgr->registerProtocolAnalyzer(name, proto, parser_orig, parser_resp, replaces, linker_scope);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rt::register_file_analyzer(const std::string& name, const hilti::rt::Vector<std::string>& mime_types,
|
void rt::register_file_analyzer(const std::string& name, const hilti::rt::Vector<std::string>& mime_types,
|
||||||
|
|
|
@ -80,7 +80,7 @@ public:
|
||||||
: ParameterMismatch(_fmt(have, want)) {}
|
: ParameterMismatch(_fmt(have, want)) {}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::string _fmt(const std::string_view& have, const TypePtr& want) {
|
static std::string _fmt(const std::string_view& have, const TypePtr& want) {
|
||||||
ODesc d;
|
ODesc d;
|
||||||
want->Describe(&d);
|
want->Describe(&d);
|
||||||
return hilti::rt::fmt("cannot convert Spicy value of type '%s' to Zeek value of type '%s'", have,
|
return hilti::rt::fmt("cannot convert Spicy value of type '%s' to Zeek value of type '%s'", have,
|
||||||
|
@ -106,10 +106,9 @@ void register_spicy_module_begin(const std::string& id, const std::string& descr
|
||||||
* Registers a Spicy protocol analyzer with its EVT meta information with the
|
* Registers a Spicy protocol analyzer with its EVT meta information with the
|
||||||
* plugin's runtime.
|
* plugin's runtime.
|
||||||
*/
|
*/
|
||||||
void register_protocol_analyzer(const std::string& id, hilti::rt::Protocol proto,
|
void register_protocol_analyzer(const std::string& id, hilti::rt::Protocol proto, const std::string& parser_orig,
|
||||||
const hilti::rt::Vector<::zeek::spicy::rt::PortRange>& ports,
|
const std::string& parser_resp, const std::string& replaces,
|
||||||
const std::string& parser_orig, const std::string& parser_resp,
|
const std::string& linker_scope);
|
||||||
const std::string& replaces, const std::string& linker_scope);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Registers a Spicy file analyzer with its EVT meta information with the
|
* Registers a Spicy file analyzer with its EVT meta information with the
|
||||||
|
|
|
@ -260,79 +260,6 @@ static std::string extract_expr(const std::string& chunk, size_t* i) {
|
||||||
return expr;
|
return expr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static hilti::rt::Port extract_port(const std::string& chunk, size_t* i) {
|
|
||||||
eat_spaces(chunk, i);
|
|
||||||
|
|
||||||
std::string s;
|
|
||||||
size_t j = *i;
|
|
||||||
|
|
||||||
while ( j < chunk.size() && isdigit(chunk[j]) )
|
|
||||||
++j;
|
|
||||||
|
|
||||||
if ( *i == j )
|
|
||||||
throw ParseError("cannot parse port specification");
|
|
||||||
|
|
||||||
hilti::rt::Protocol proto;
|
|
||||||
uint64_t port = std::numeric_limits<uint64_t>::max();
|
|
||||||
|
|
||||||
s = chunk.substr(*i, j - *i);
|
|
||||||
hilti::util::atoi_n(s.begin(), s.end(), 10, &port);
|
|
||||||
|
|
||||||
if ( port > 65535 )
|
|
||||||
throw ParseError("port outside of valid range");
|
|
||||||
|
|
||||||
*i = j;
|
|
||||||
|
|
||||||
if ( chunk[*i] != '/' )
|
|
||||||
throw ParseError("cannot parse port specification");
|
|
||||||
|
|
||||||
(*i)++;
|
|
||||||
|
|
||||||
if ( looking_at(chunk, *i, "tcp") ) {
|
|
||||||
proto = hilti::rt::Protocol::TCP;
|
|
||||||
eat_token(chunk, i, "tcp");
|
|
||||||
}
|
|
||||||
|
|
||||||
else if ( looking_at(chunk, *i, "udp") ) {
|
|
||||||
proto = hilti::rt::Protocol::UDP;
|
|
||||||
eat_token(chunk, i, "udp");
|
|
||||||
}
|
|
||||||
|
|
||||||
else if ( looking_at(chunk, *i, "icmp") ) {
|
|
||||||
proto = hilti::rt::Protocol::ICMP;
|
|
||||||
eat_token(chunk, i, "icmp");
|
|
||||||
}
|
|
||||||
|
|
||||||
else
|
|
||||||
throw ParseError("cannot parse port specification");
|
|
||||||
|
|
||||||
return {static_cast<uint16_t>(port), proto};
|
|
||||||
}
|
|
||||||
|
|
||||||
static ::zeek::spicy::rt::PortRange extract_port_range(const std::string& chunk, size_t* i) {
|
|
||||||
auto start = extract_port(chunk, i);
|
|
||||||
auto end = std::optional<hilti::rt::Port>();
|
|
||||||
|
|
||||||
if ( looking_at(chunk, *i, "-") ) {
|
|
||||||
eat_token(chunk, i, "-");
|
|
||||||
end = extract_port(chunk, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( end ) {
|
|
||||||
if ( start.protocol() != end->protocol() )
|
|
||||||
throw ParseError("start and end of port range must have same protocol");
|
|
||||||
|
|
||||||
if ( start.port() > end->port() )
|
|
||||||
throw ParseError("start of port range cannot be after its end");
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( ! end )
|
|
||||||
// EVT port ranges are a closed.
|
|
||||||
end = hilti::rt::Port(start.port(), start.protocol());
|
|
||||||
|
|
||||||
return {start, *end};
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlueCompiler::init(Driver* driver, int zeek_version) {
|
void GlueCompiler::init(Driver* driver, int zeek_version) {
|
||||||
_driver = driver;
|
_driver = driver;
|
||||||
_zeek_version = zeek_version;
|
_zeek_version = zeek_version;
|
||||||
|
@ -704,25 +631,11 @@ glue::ProtocolAnalyzer GlueCompiler::parseProtocolAnalyzer(const std::string& ch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
else if ( looking_at(chunk, i, "ports") ) {
|
else if ( looking_at(chunk, i, "ports") || looking_at(chunk, i, "port") ) {
|
||||||
eat_token(chunk, &i, "ports");
|
throw ParseError(hilti::rt::fmt(
|
||||||
eat_token(chunk, &i, "{");
|
"Analyzer %s is using the removed 'port' or 'ports' keyword to register "
|
||||||
|
"well-known ports. Use Analyzer::register_for_ports() in the accompanying Zeek script instead.",
|
||||||
while ( true ) {
|
a.name));
|
||||||
a.ports.push_back(extract_port_range(chunk, &i));
|
|
||||||
|
|
||||||
if ( looking_at(chunk, i, "}") ) {
|
|
||||||
eat_token(chunk, &i, "}");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
eat_token(chunk, &i, ",");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
else if ( looking_at(chunk, i, "port") ) {
|
|
||||||
eat_token(chunk, &i, "port");
|
|
||||||
a.ports.push_back(extract_port_range(chunk, &i));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
else if ( looking_at(chunk, i, "replaces") ) {
|
else if ( looking_at(chunk, i, "replaces") ) {
|
||||||
|
@ -739,14 +652,6 @@ glue::ProtocolAnalyzer GlueCompiler::parseProtocolAnalyzer(const std::string& ch
|
||||||
eat_token(chunk, &i, ",");
|
eat_token(chunk, &i, ",");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( ! a.ports.empty() )
|
|
||||||
hilti::logger().warning(
|
|
||||||
hilti::rt::
|
|
||||||
fmt("Remove in v7.1: Analyzer %s is using the deprecated 'port' or 'ports' keyword to register "
|
|
||||||
"well-known ports. Use Analyzer::register_for_ports() in the accompanying Zeek script instead.",
|
|
||||||
a.name),
|
|
||||||
a.location);
|
|
||||||
|
|
||||||
return a;
|
return a;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1034,13 +939,6 @@ bool GlueCompiler::compile() {
|
||||||
|
|
||||||
preinit_body.addCall("zeek_rt::register_protocol_analyzer",
|
preinit_body.addCall("zeek_rt::register_protocol_analyzer",
|
||||||
{builder()->stringMutable(a.name.str()), builder()->id(protocol),
|
{builder()->stringMutable(a.name.str()), builder()->id(protocol),
|
||||||
builder()->vector(
|
|
||||||
hilti::util::transform(a.ports,
|
|
||||||
[this](const auto& p) -> hilti::Expression* {
|
|
||||||
return builder()->call("zeek_rt::make_port_range",
|
|
||||||
{builder()->port(p.begin),
|
|
||||||
builder()->port(p.end)});
|
|
||||||
})),
|
|
||||||
builder()->stringMutable(a.unit_name_orig.str()),
|
builder()->stringMutable(a.unit_name_orig.str()),
|
||||||
builder()->stringMutable(a.unit_name_resp.str()), builder()->stringMutable(a.replaces),
|
builder()->stringMutable(a.unit_name_resp.str()), builder()->stringMutable(a.replaces),
|
||||||
builder()->scope()});
|
builder()->scope()});
|
||||||
|
|
|
@ -45,7 +45,6 @@ struct ProtocolAnalyzer {
|
||||||
hilti::Location location; /**< Location where the analyzer was defined. */
|
hilti::Location location; /**< Location where the analyzer was defined. */
|
||||||
hilti::ID name; /**< Name of the analyzer. */
|
hilti::ID name; /**< Name of the analyzer. */
|
||||||
hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef; /**< The transport layer the analyzer uses. */
|
hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef; /**< The transport layer the analyzer uses. */
|
||||||
std::vector<::zeek::spicy::rt::PortRange> ports; /**< The ports associated with the analyzer. */
|
|
||||||
hilti::ID unit_name_orig; /**< The fully-qualified name of the unit type to parse the originator
|
hilti::ID unit_name_orig; /**< The fully-qualified name of the unit type to parse the originator
|
||||||
side. */
|
side. */
|
||||||
hilti::ID unit_name_resp; /**< The fully-qualified name of the unit type to parse the originator
|
hilti::ID unit_name_resp; /**< The fully-qualified name of the unit type to parse the originator
|
||||||
|
|
|
@ -83,7 +83,7 @@ function get_conn_stats%(%): ConnStats
|
||||||
|
|
||||||
r->Assign(n++, Connection::TotalConnections());
|
r->Assign(n++, Connection::TotalConnections());
|
||||||
r->Assign(n++, Connection::CurrentConnections());
|
r->Assign(n++, Connection::CurrentConnections());
|
||||||
r->Assign(n++, session_mgr->CurrentSessions());
|
r->Assign(n++, static_cast<uint64_t>(session_mgr->CurrentSessions()));
|
||||||
|
|
||||||
session::Stats s;
|
session::Stats s;
|
||||||
if ( session_mgr )
|
if ( session_mgr )
|
||||||
|
@ -252,10 +252,10 @@ function get_dns_stats%(%): DNSStats
|
||||||
r->Assign(n++, static_cast<uint64_t>(dstats.successful));
|
r->Assign(n++, static_cast<uint64_t>(dstats.successful));
|
||||||
r->Assign(n++, static_cast<uint64_t>(dstats.failed));
|
r->Assign(n++, static_cast<uint64_t>(dstats.failed));
|
||||||
r->Assign(n++, static_cast<uint64_t>(dstats.pending));
|
r->Assign(n++, static_cast<uint64_t>(dstats.pending));
|
||||||
r->Assign(n++, static_cast<uint64_t>(dstats.cached_hosts));
|
r->Assign(n++, static_cast<uint64_t>(dstats.cached.hosts));
|
||||||
r->Assign(n++, static_cast<uint64_t>(dstats.cached_addresses));
|
r->Assign(n++, static_cast<uint64_t>(dstats.cached.addresses));
|
||||||
r->Assign(n++, static_cast<uint64_t>(dstats.cached_texts));
|
r->Assign(n++, static_cast<uint64_t>(dstats.cached.texts));
|
||||||
r->Assign(n++, static_cast<uint64_t>(dstats.cached_total));
|
r->Assign(n++, static_cast<uint64_t>(dstats.cached.total));
|
||||||
|
|
||||||
return std::move(r);
|
return std::move(r);
|
||||||
%}
|
%}
|
||||||
|
@ -337,7 +337,7 @@ function get_thread_stats%(%): ThreadStats
|
||||||
auto r = zeek::make_intrusive<zeek::RecordVal>(ThreadStats);
|
auto r = zeek::make_intrusive<zeek::RecordVal>(ThreadStats);
|
||||||
int n = 0;
|
int n = 0;
|
||||||
|
|
||||||
r->Assign(n++, zeek::thread_mgr->NumThreads());
|
r->Assign(n++, static_cast<uint64_t>(zeek::thread_mgr->NumThreads()));
|
||||||
|
|
||||||
return std::move(r);
|
return std::move(r);
|
||||||
%}
|
%}
|
||||||
|
|
|
@ -9,6 +9,7 @@ zeek_add_subdir_library(
|
||||||
ProcessStats.cc
|
ProcessStats.cc
|
||||||
Utils.cc
|
Utils.cc
|
||||||
BIFS
|
BIFS
|
||||||
|
consts.bif
|
||||||
telemetry.bif)
|
telemetry.bif)
|
||||||
|
|
||||||
# We don't need to include the civetweb headers across the whole project, only
|
# We don't need to include the civetweb headers across the whole project, only
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
using namespace zeek::telemetry;
|
using namespace zeek::telemetry;
|
||||||
|
|
||||||
Counter::Counter(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
Counter::Counter(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
||||||
: handle(family->Add(labels)), labels(labels) {
|
: family(family), handle(family->Add(labels)), labels(labels) {
|
||||||
if ( callback ) {
|
if ( callback ) {
|
||||||
handle.AddCollectCallback(std::move(callback));
|
handle.AddCollectCallback(std::move(callback));
|
||||||
has_callback = true;
|
has_callback = true;
|
||||||
|
|
|
@ -56,6 +56,7 @@ public:
|
||||||
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
FamilyType* family = nullptr;
|
||||||
Handle& handle;
|
Handle& handle;
|
||||||
prometheus::Labels labels;
|
prometheus::Labels labels;
|
||||||
bool has_callback = false;
|
bool has_callback = false;
|
||||||
|
|
|
@ -15,7 +15,7 @@ double Gauge::Value() const noexcept {
|
||||||
|
|
||||||
|
|
||||||
Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
||||||
: handle(family->Add(labels)), labels(labels) {
|
: family(family), handle(family->Add(labels)), labels(labels) {
|
||||||
if ( callback ) {
|
if ( callback ) {
|
||||||
handle.AddCollectCallback(std::move(callback));
|
handle.AddCollectCallback(std::move(callback));
|
||||||
has_callback = true;
|
has_callback = true;
|
||||||
|
|
|
@ -74,6 +74,7 @@ public:
|
||||||
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
FamilyType* family = nullptr;
|
||||||
Handle& handle;
|
Handle& handle;
|
||||||
prometheus::Labels labels;
|
prometheus::Labels labels;
|
||||||
bool has_callback = false;
|
bool has_callback = false;
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
// CivetServer is from the civetweb submodule in prometheus-cpp
|
// CivetServer is from the civetweb submodule in prometheus-cpp
|
||||||
#include <CivetServer.h>
|
#include <CivetServer.h>
|
||||||
|
#include <prometheus/collectable.h>
|
||||||
#include <prometheus/exposer.h>
|
#include <prometheus/exposer.h>
|
||||||
#include <prometheus/registry.h>
|
#include <prometheus/registry.h>
|
||||||
#include <rapidjson/document.h>
|
#include <rapidjson/document.h>
|
||||||
|
@ -16,19 +17,32 @@
|
||||||
|
|
||||||
#include "zeek/3rdparty/doctest.h"
|
#include "zeek/3rdparty/doctest.h"
|
||||||
#include "zeek/ID.h"
|
#include "zeek/ID.h"
|
||||||
|
#include "zeek/RunState.h"
|
||||||
#include "zeek/ZeekString.h"
|
#include "zeek/ZeekString.h"
|
||||||
#include "zeek/broker/Manager.h"
|
#include "zeek/broker/Manager.h"
|
||||||
|
#include "zeek/iosource/Manager.h"
|
||||||
#include "zeek/telemetry/ProcessStats.h"
|
#include "zeek/telemetry/ProcessStats.h"
|
||||||
#include "zeek/telemetry/Timer.h"
|
#include "zeek/telemetry/Timer.h"
|
||||||
|
#include "zeek/telemetry/consts.bif.h"
|
||||||
#include "zeek/telemetry/telemetry.bif.h"
|
#include "zeek/telemetry/telemetry.bif.h"
|
||||||
#include "zeek/threading/formatters/detail/json.h"
|
#include "zeek/threading/formatters/detail/json.h"
|
||||||
|
|
||||||
namespace zeek::telemetry {
|
namespace zeek::telemetry {
|
||||||
|
|
||||||
Manager::Manager() { prometheus_registry = std::make_shared<prometheus::Registry>(); }
|
/**
|
||||||
|
* Prometheus Collectable interface used to insert Zeek callback processing
|
||||||
|
* before the Prometheus registry's collection of metric data.
|
||||||
|
*/
|
||||||
|
class ZeekCollectable : public prometheus::Collectable {
|
||||||
|
public:
|
||||||
|
std::vector<prometheus::MetricFamily> Collect() const override {
|
||||||
|
telemetry_mgr->WaitForPrometheusCallbacks();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Manager::Manager() : IOSource(true) { prometheus_registry = std::make_shared<prometheus::Registry>(); }
|
||||||
|
|
||||||
// This can't be defined as =default because of the use of unique_ptr with a forward-declared type
|
|
||||||
// in Manager.h
|
|
||||||
Manager::~Manager() {}
|
Manager::~Manager() {}
|
||||||
|
|
||||||
void Manager::InitPostScript() {
|
void Manager::InitPostScript() {
|
||||||
|
@ -75,7 +89,9 @@ void Manager::InitPostScript() {
|
||||||
|
|
||||||
if ( ! getenv("ZEEKCTL_CHECK_CONFIG") ) {
|
if ( ! getenv("ZEEKCTL_CHECK_CONFIG") ) {
|
||||||
try {
|
try {
|
||||||
prometheus_exposer = std::make_unique<prometheus::Exposer>(prometheus_url, 2, callbacks);
|
prometheus_exposer =
|
||||||
|
std::make_unique<prometheus::Exposer>(prometheus_url, BifConst::Telemetry::civetweb_threads,
|
||||||
|
callbacks);
|
||||||
|
|
||||||
// CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here
|
// CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here
|
||||||
delete callbacks;
|
delete callbacks;
|
||||||
|
@ -84,19 +100,26 @@ void Manager::InitPostScript() {
|
||||||
prometheus_url.c_str());
|
prometheus_url.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This has to be inserted before the registry below. The exposer
|
||||||
|
// processes the collectors in order of insertion. We want to make
|
||||||
|
// sure that the callbacks get called and the values in the metrics
|
||||||
|
// are updated before prometheus-cpp scrapes them.
|
||||||
|
zeek_collectable = std::make_shared<ZeekCollectable>();
|
||||||
|
prometheus_exposer->RegisterCollectable(zeek_collectable);
|
||||||
|
|
||||||
prometheus_exposer->RegisterCollectable(prometheus_registry);
|
prometheus_exposer->RegisterCollectable(prometheus_registry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_PROCESS_STAT_METRICS
|
#ifdef HAVE_PROCESS_STAT_METRICS
|
||||||
static auto get_stats = [this]() -> const detail::process_stats* {
|
static auto get_stats = []() -> const detail::process_stats* {
|
||||||
double now = util::current_time();
|
double now = util::current_time();
|
||||||
if ( this->process_stats_last_updated < now - 0.01 ) {
|
if ( telemetry_mgr->process_stats_last_updated < now - 0.01 ) {
|
||||||
this->current_process_stats = detail::get_process_stats();
|
telemetry_mgr->current_process_stats = detail::get_process_stats();
|
||||||
this->process_stats_last_updated = now;
|
telemetry_mgr->process_stats_last_updated = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
return &this->current_process_stats;
|
return &telemetry_mgr->current_process_stats;
|
||||||
};
|
};
|
||||||
rss_gauge = GaugeInstance("process", "resident_memory", {}, "Resident memory size", "bytes",
|
rss_gauge = GaugeInstance("process", "resident_memory", {}, "Resident memory size", "bytes",
|
||||||
[]() -> prometheus::ClientMetric {
|
[]() -> prometheus::ClientMetric {
|
||||||
|
@ -114,11 +137,19 @@ void Manager::InitPostScript() {
|
||||||
return metric;
|
return metric;
|
||||||
});
|
});
|
||||||
|
|
||||||
cpu_gauge = GaugeInstance("process", "cpu", {}, "Total user and system CPU time spent", "seconds",
|
cpu_user_counter = CounterInstance("process", "cpu_user", {}, "Total user CPU time spent", "seconds",
|
||||||
[]() -> prometheus::ClientMetric {
|
[]() -> prometheus::ClientMetric {
|
||||||
auto* s = get_stats();
|
auto* s = get_stats();
|
||||||
prometheus::ClientMetric metric;
|
prometheus::ClientMetric metric;
|
||||||
metric.gauge.value = s->cpu;
|
metric.gauge.value = s->cpu_user;
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
cpu_system_counter = CounterInstance("process", "cpu_system", {}, "Total system CPU time spent", "seconds",
|
||||||
|
[]() -> prometheus::ClientMetric {
|
||||||
|
auto* s = get_stats();
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = s->cpu_system;
|
||||||
return metric;
|
return metric;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -130,6 +161,23 @@ void Manager::InitPostScript() {
|
||||||
return metric;
|
return metric;
|
||||||
});
|
});
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if ( ! iosource_mgr->RegisterFd(collector_flare.FD(), this) ) {
|
||||||
|
reporter->FatalError("Failed to register telemetry collector descriptor");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Manager::Terminate() {
|
||||||
|
// Notify the collector condition so that it doesn't hang waiting for
|
||||||
|
// a collector request to complete.
|
||||||
|
collector_cv.notify_all();
|
||||||
|
|
||||||
|
// Shut down the exposer first of all so we stop getting requests for
|
||||||
|
// data. This keeps us from getting a request on another thread while
|
||||||
|
// we're shutting down.
|
||||||
|
prometheus_exposer.reset();
|
||||||
|
|
||||||
|
iosource_mgr->UnregisterFd(collector_flare.FD(), this);
|
||||||
}
|
}
|
||||||
|
|
||||||
// -- collect metric stuff -----------------------------------------------------
|
// -- collect metric stuff -----------------------------------------------------
|
||||||
|
@ -545,6 +593,39 @@ HistogramPtr Manager::HistogramInstance(std::string_view prefix, std::string_vie
|
||||||
return HistogramInstance(prefix, name, lbls, bounds_span, helptext, unit);
|
return HistogramInstance(prefix, name, lbls, bounds_span, helptext, unit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Manager::ProcessFd(int fd, int flags) {
|
||||||
|
std::unique_lock<std::mutex> lk(collector_cv_mtx);
|
||||||
|
|
||||||
|
collector_flare.Extinguish();
|
||||||
|
|
||||||
|
prometheus_registry->UpdateViaCallbacks();
|
||||||
|
collector_response_idx = collector_request_idx;
|
||||||
|
|
||||||
|
lk.unlock();
|
||||||
|
collector_cv.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Manager::WaitForPrometheusCallbacks() {
|
||||||
|
std::unique_lock<std::mutex> lk(collector_cv_mtx);
|
||||||
|
|
||||||
|
++collector_request_idx;
|
||||||
|
uint64_t expected_idx = collector_request_idx;
|
||||||
|
collector_flare.Fire();
|
||||||
|
|
||||||
|
// It should *not* take 5 seconds to go through all of the callbacks, but
|
||||||
|
// set this to have a timeout anyways just to avoid a deadlock.
|
||||||
|
bool res = collector_cv.wait_for(lk,
|
||||||
|
std::chrono::microseconds(
|
||||||
|
static_cast<long>(BifConst::Telemetry::callback_timeout * 1000000)),
|
||||||
|
[expected_idx]() {
|
||||||
|
return telemetry_mgr->collector_response_idx >= expected_idx ||
|
||||||
|
zeek::run_state::terminating;
|
||||||
|
});
|
||||||
|
|
||||||
|
if ( ! res )
|
||||||
|
fprintf(stderr, "Timeout waiting for prometheus callbacks\n");
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace zeek::telemetry
|
} // namespace zeek::telemetry
|
||||||
|
|
||||||
// -- unit tests ---------------------------------------------------------------
|
// -- unit tests ---------------------------------------------------------------
|
||||||
|
@ -552,18 +633,6 @@ HistogramPtr Manager::HistogramInstance(std::string_view prefix, std::string_vie
|
||||||
using namespace std::literals;
|
using namespace std::literals;
|
||||||
using namespace zeek::telemetry;
|
using namespace zeek::telemetry;
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
template<class T>
|
|
||||||
auto toVector(zeek::Span<T> xs) {
|
|
||||||
std::vector<std::remove_const_t<T>> result;
|
|
||||||
for ( auto&& x : xs )
|
|
||||||
result.emplace_back(x);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
SCENARIO("telemetry managers provide access to counter families") {
|
SCENARIO("telemetry managers provide access to counter families") {
|
||||||
GIVEN("a telemetry manager") {
|
GIVEN("a telemetry manager") {
|
||||||
Manager mgr;
|
Manager mgr;
|
||||||
|
|
|
@ -9,8 +9,10 @@
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "zeek/Flare.h"
|
||||||
#include "zeek/IntrusivePtr.h"
|
#include "zeek/IntrusivePtr.h"
|
||||||
#include "zeek/Span.h"
|
#include "zeek/Span.h"
|
||||||
|
#include "zeek/iosource/IOSource.h"
|
||||||
#include "zeek/telemetry/Counter.h"
|
#include "zeek/telemetry/Counter.h"
|
||||||
#include "zeek/telemetry/Gauge.h"
|
#include "zeek/telemetry/Gauge.h"
|
||||||
#include "zeek/telemetry/Histogram.h"
|
#include "zeek/telemetry/Histogram.h"
|
||||||
|
@ -29,15 +31,16 @@ class Registry;
|
||||||
|
|
||||||
namespace zeek::telemetry {
|
namespace zeek::telemetry {
|
||||||
|
|
||||||
|
class ZeekCollectable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manages a collection of metric families.
|
* Manages a collection of metric families.
|
||||||
*/
|
*/
|
||||||
class Manager final {
|
class Manager final : public iosource::IOSource {
|
||||||
public:
|
public:
|
||||||
Manager();
|
Manager();
|
||||||
|
|
||||||
Manager(const Manager&) = delete;
|
Manager(const Manager&) = delete;
|
||||||
|
|
||||||
Manager& operator=(const Manager&) = delete;
|
Manager& operator=(const Manager&) = delete;
|
||||||
|
|
||||||
~Manager();
|
~Manager();
|
||||||
|
@ -50,6 +53,8 @@ public:
|
||||||
*/
|
*/
|
||||||
void InitPostScript();
|
void InitPostScript();
|
||||||
|
|
||||||
|
void Terminate();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return A VectorVal containing all counter and gauge metrics and their values matching prefix and name.
|
* @return A VectorVal containing all counter and gauge metrics and their values matching prefix and name.
|
||||||
* @param prefix The prefix pattern to use for filtering. Supports globbing.
|
* @param prefix The prefix pattern to use for filtering. Supports globbing.
|
||||||
|
@ -88,8 +93,8 @@ public:
|
||||||
* @param labels Values for all label dimensions of the metric.
|
* @param labels Values for all label dimensions of the metric.
|
||||||
* @param helptext Short explanation of the metric.
|
* @param helptext Short explanation of the metric.
|
||||||
* @param unit Unit of measurement.
|
* @param unit Unit of measurement.
|
||||||
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called by
|
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called
|
||||||
* the metrics subsystem whenever data is requested.
|
* by the metrics subsystem whenever data is requested.
|
||||||
*/
|
*/
|
||||||
CounterPtr CounterInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
CounterPtr CounterInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
||||||
std::string_view helptext, std::string_view unit = "",
|
std::string_view helptext, std::string_view unit = "",
|
||||||
|
@ -124,8 +129,8 @@ public:
|
||||||
* @param labels Values for all label dimensions of the metric.
|
* @param labels Values for all label dimensions of the metric.
|
||||||
* @param helptext Short explanation of the metric.
|
* @param helptext Short explanation of the metric.
|
||||||
* @param unit Unit of measurement.
|
* @param unit Unit of measurement.
|
||||||
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called by
|
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called
|
||||||
* the metrics subsystem whenever data is requested.
|
* by the metrics subsystem whenever data is requested.
|
||||||
*/
|
*/
|
||||||
GaugePtr GaugeInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
GaugePtr GaugeInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
||||||
std::string_view helptext, std::string_view unit = "",
|
std::string_view helptext, std::string_view unit = "",
|
||||||
|
@ -212,6 +217,12 @@ public:
|
||||||
*/
|
*/
|
||||||
std::shared_ptr<prometheus::Registry> GetRegistry() const { return prometheus_registry; }
|
std::shared_ptr<prometheus::Registry> GetRegistry() const { return prometheus_registry; }
|
||||||
|
|
||||||
|
// IOSource interface
|
||||||
|
double GetNextTimeout() override { return -1.0; }
|
||||||
|
void Process() override {}
|
||||||
|
const char* Tag() override { return "Telemetry::Manager"; }
|
||||||
|
void ProcessFd(int fd, int flags) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
template<class F>
|
template<class F>
|
||||||
static auto WithLabelNames(Span<const LabelView> xs, F continuation) {
|
static auto WithLabelNames(Span<const LabelView> xs, F continuation) {
|
||||||
|
@ -231,6 +242,15 @@ protected:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
friend class ZeekCollectable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fires the flare for prometheus-cpp callback handling and waits for it to complete.
|
||||||
|
* This can be called from other threads to ensure the callback handling happens on
|
||||||
|
* the main thread.
|
||||||
|
*/
|
||||||
|
void WaitForPrometheusCallbacks();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
RecordValPtr GetMetricOptsRecord(const prometheus::MetricFamily& metric_family);
|
RecordValPtr GetMetricOptsRecord(const prometheus::MetricFamily& metric_family);
|
||||||
void BuildClusterJson();
|
void BuildClusterJson();
|
||||||
|
@ -243,13 +263,22 @@ private:
|
||||||
|
|
||||||
GaugePtr rss_gauge;
|
GaugePtr rss_gauge;
|
||||||
GaugePtr vms_gauge;
|
GaugePtr vms_gauge;
|
||||||
GaugePtr cpu_gauge;
|
CounterPtr cpu_user_counter;
|
||||||
|
CounterPtr cpu_system_counter;
|
||||||
GaugePtr fds_gauge;
|
GaugePtr fds_gauge;
|
||||||
|
|
||||||
std::shared_ptr<prometheus::Registry> prometheus_registry;
|
std::shared_ptr<prometheus::Registry> prometheus_registry;
|
||||||
std::unique_ptr<prometheus::Exposer> prometheus_exposer;
|
std::unique_ptr<prometheus::Exposer> prometheus_exposer;
|
||||||
|
|
||||||
std::string cluster_json;
|
std::string cluster_json;
|
||||||
|
|
||||||
|
std::shared_ptr<ZeekCollectable> zeek_collectable;
|
||||||
|
zeek::detail::Flare collector_flare;
|
||||||
|
std::condition_variable collector_cv;
|
||||||
|
std::mutex collector_cv_mtx;
|
||||||
|
// Only modified under collector_cv_mtx!
|
||||||
|
uint64_t collector_request_idx = 0;
|
||||||
|
uint64_t collector_response_idx = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace zeek::telemetry
|
} // namespace zeek::telemetry
|
||||||
|
|
|
@ -34,10 +34,10 @@ process_stats get_process_stats() {
|
||||||
if ( task_info(mach_task_self(), TASK_THREAD_TIMES_INFO, reinterpret_cast<task_info_t>(&info), &count) ==
|
if ( task_info(mach_task_self(), TASK_THREAD_TIMES_INFO, reinterpret_cast<task_info_t>(&info), &count) ==
|
||||||
KERN_SUCCESS ) {
|
KERN_SUCCESS ) {
|
||||||
// Round to milliseconds.
|
// Round to milliseconds.
|
||||||
result.cpu += info.user_time.seconds;
|
result.cpu_user += info.user_time.seconds;
|
||||||
result.cpu += ceil(info.user_time.microseconds / 1000.0) / 1000.0;
|
result.cpu_user += ceil(info.user_time.microseconds / 1000.0) / 1000.0;
|
||||||
result.cpu += info.system_time.seconds;
|
result.cpu_system += info.system_time.seconds;
|
||||||
result.cpu += ceil(info.system_time.microseconds / 1000.0) / 1000.0;
|
result.cpu_system += ceil(info.system_time.microseconds / 1000.0) / 1000.0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fetch open file handles.
|
// Fetch open file handles.
|
||||||
|
@ -154,7 +154,8 @@ process_stats get_process_stats() {
|
||||||
|
|
||||||
result.rss = rss_pages * page_size;
|
result.rss = rss_pages * page_size;
|
||||||
result.vms = vmsize_bytes;
|
result.vms = vmsize_bytes;
|
||||||
result.cpu = static_cast<double>(utime_ticks + stime_ticks) / ticks_per_second;
|
result.cpu_user = static_cast<double>(utime_ticks) / ticks_per_second;
|
||||||
|
result.cpu_system = static_cast<double>(stime_ticks) / ticks_per_second;
|
||||||
|
|
||||||
result.fds = count_entries_in_directory("/proc/self/fd");
|
result.fds = count_entries_in_directory("/proc/self/fd");
|
||||||
}
|
}
|
||||||
|
@ -187,7 +188,10 @@ process_stats get_process_stats() {
|
||||||
if ( kp ) {
|
if ( kp ) {
|
||||||
result.vms = kp->ki_size;
|
result.vms = kp->ki_size;
|
||||||
result.rss = kp->ki_rssize * getpagesize();
|
result.rss = kp->ki_rssize * getpagesize();
|
||||||
result.cpu = static_cast<double>(kp->ki_runtime) / 1000000.0;
|
result.cpu_user = static_cast<double>(kp->ki_rusage.ru_utime.tv_sec) +
|
||||||
|
(static_cast<double>(kp->ki_rusage.ru_utime.tv_usec) / 1e6);
|
||||||
|
result.cpu_system = static_cast<double>(kp->ki_rusage.ru_stime.tv_sec) +
|
||||||
|
(static_cast<double>(kp->ki_rusage.ru_stime.tv_usec) / 1e6);
|
||||||
|
|
||||||
struct procstat* procstat = procstat_open_sysctl();
|
struct procstat* procstat = procstat_open_sysctl();
|
||||||
struct filestat_list* files = procstat_getfiles(procstat, kp, 0);
|
struct filestat_list* files = procstat_getfiles(procstat, kp, 0);
|
||||||
|
|
|
@ -9,7 +9,8 @@ namespace zeek::telemetry::detail {
|
||||||
struct process_stats {
|
struct process_stats {
|
||||||
int64_t rss = 0;
|
int64_t rss = 0;
|
||||||
int64_t vms = 0;
|
int64_t vms = 0;
|
||||||
double cpu = 0.0;
|
double cpu_user = 0.0;
|
||||||
|
double cpu_system = 0.0;
|
||||||
int64_t fds = 0;
|
int64_t fds = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
2
src/telemetry/consts.bif
Normal file
2
src/telemetry/consts.bif
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
const Telemetry::callback_timeout: interval;
|
||||||
|
const Telemetry::civetweb_threads: count;
|
|
@ -2,12 +2,14 @@
|
||||||
|
|
||||||
#include <sys/socket.h>
|
#include <sys/socket.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
#include "zeek/Event.h"
|
#include "zeek/Event.h"
|
||||||
#include "zeek/IPAddr.h"
|
#include "zeek/IPAddr.h"
|
||||||
#include "zeek/NetVar.h"
|
#include "zeek/NetVar.h"
|
||||||
#include "zeek/RunState.h"
|
#include "zeek/RunState.h"
|
||||||
#include "zeek/iosource/Manager.h"
|
#include "zeek/telemetry/Manager.h"
|
||||||
|
|
||||||
namespace zeek::threading {
|
namespace zeek::threading {
|
||||||
namespace detail {
|
namespace detail {
|
||||||
|
@ -22,6 +24,8 @@ void HeartbeatTimer::Dispatch(double t, bool is_expire) {
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
|
static std::vector<uint64_t> pending_bucket_brackets = {1, 10, 100, 1000, 10000, std::numeric_limits<uint64_t>::max()};
|
||||||
|
|
||||||
Manager::Manager() {
|
Manager::Manager() {
|
||||||
DBG_LOG(DBG_THREADING, "Creating thread manager ...");
|
DBG_LOG(DBG_THREADING, "Creating thread manager ...");
|
||||||
|
|
||||||
|
@ -36,6 +40,112 @@ Manager::~Manager() {
|
||||||
Terminate();
|
Terminate();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Manager::InitPostScript() {
|
||||||
|
static auto get_message_thread_stats = []() -> const BucketedMessages* {
|
||||||
|
if ( ! thread_mgr->terminating ) {
|
||||||
|
double now = util::current_time();
|
||||||
|
if ( thread_mgr->bucketed_messages_last_updated < now - 1 ) {
|
||||||
|
thread_mgr->current_bucketed_messages.pending_in_total = 0;
|
||||||
|
thread_mgr->current_bucketed_messages.pending_out_total = 0;
|
||||||
|
for ( auto& m : thread_mgr->current_bucketed_messages.pending_in )
|
||||||
|
m.second = 0;
|
||||||
|
for ( auto& m : thread_mgr->current_bucketed_messages.pending_out )
|
||||||
|
m.second = 0;
|
||||||
|
|
||||||
|
MsgThread::Stats thread_stats;
|
||||||
|
for ( const auto& t : thread_mgr->msg_threads ) {
|
||||||
|
t->GetStats(&thread_stats);
|
||||||
|
|
||||||
|
thread_mgr->current_bucketed_messages.pending_in_total += thread_stats.pending_in;
|
||||||
|
thread_mgr->current_bucketed_messages.pending_out_total += thread_stats.pending_out;
|
||||||
|
|
||||||
|
for ( auto upper_limit : pending_bucket_brackets ) {
|
||||||
|
if ( thread_stats.pending_in <= upper_limit )
|
||||||
|
thread_mgr->current_bucketed_messages.pending_in[upper_limit]++;
|
||||||
|
|
||||||
|
if ( thread_stats.pending_out <= upper_limit )
|
||||||
|
thread_mgr->current_bucketed_messages.pending_out[upper_limit]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
thread_mgr->bucketed_messages_last_updated = now;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &thread_mgr->current_bucketed_messages;
|
||||||
|
};
|
||||||
|
|
||||||
|
num_threads_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "msgthread_active_threads", {}, "Number of active threads", "",
|
||||||
|
[]() -> prometheus::ClientMetric {
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value =
|
||||||
|
thread_mgr ? static_cast<double>(thread_mgr->all_threads.size()) : 0.0;
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
total_threads_metric = telemetry_mgr->CounterInstance("zeek", "msgthread_threads", {}, "Total number of threads");
|
||||||
|
total_messages_in_metric =
|
||||||
|
telemetry_mgr->CounterInstance("zeek", "msgthread_in_messages", {}, "Number of inbound messages received", "");
|
||||||
|
|
||||||
|
total_messages_out_metric =
|
||||||
|
telemetry_mgr->CounterInstance("zeek", "msgthread_out_messages", {}, "Number of outbound messages sent", "");
|
||||||
|
|
||||||
|
pending_messages_in_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "msgthread_pending_in_messages", {}, "Pending number of inbound messages",
|
||||||
|
"", []() -> prometheus::ClientMetric {
|
||||||
|
auto* s = get_message_thread_stats();
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = static_cast<double>(s->pending_in_total);
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
pending_messages_out_metric =
|
||||||
|
telemetry_mgr->GaugeInstance("zeek", "msgthread_pending_out_messages", {},
|
||||||
|
"Pending number of outbound messages", "", []() -> prometheus::ClientMetric {
|
||||||
|
auto* s = get_message_thread_stats();
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value = static_cast<double>(s->pending_out_total);
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
|
||||||
|
pending_message_in_buckets_fam =
|
||||||
|
telemetry_mgr->GaugeFamily("zeek", "msgthread_pending_messages_in_buckets", {"le"},
|
||||||
|
"Number of threads with pending inbound messages split into buckets");
|
||||||
|
pending_message_out_buckets_fam =
|
||||||
|
telemetry_mgr->GaugeFamily("zeek", "msgthread_pending_messages_out_buckets", {"le"},
|
||||||
|
"Number of threads with pending outbound messages split into buckets");
|
||||||
|
|
||||||
|
for ( auto upper_limit : pending_bucket_brackets ) {
|
||||||
|
std::string upper_limit_str;
|
||||||
|
if ( upper_limit == std::numeric_limits<uint64_t>::max() )
|
||||||
|
upper_limit_str = "inf";
|
||||||
|
else
|
||||||
|
upper_limit_str = std::to_string(upper_limit);
|
||||||
|
|
||||||
|
current_bucketed_messages.pending_in[upper_limit] = 0;
|
||||||
|
current_bucketed_messages.pending_out[upper_limit] = 0;
|
||||||
|
|
||||||
|
pending_message_in_buckets[upper_limit] =
|
||||||
|
pending_message_in_buckets_fam->GetOrAdd({{"le", upper_limit_str}},
|
||||||
|
[upper_limit]() -> prometheus::ClientMetric {
|
||||||
|
auto* s = get_message_thread_stats();
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value =
|
||||||
|
static_cast<double>(s->pending_in.at(upper_limit));
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
pending_message_out_buckets[upper_limit] =
|
||||||
|
pending_message_out_buckets_fam->GetOrAdd({{"le", upper_limit_str}},
|
||||||
|
[upper_limit]() -> prometheus::ClientMetric {
|
||||||
|
auto* s = get_message_thread_stats();
|
||||||
|
prometheus::ClientMetric metric;
|
||||||
|
metric.gauge.value =
|
||||||
|
static_cast<double>(s->pending_out.at(upper_limit));
|
||||||
|
return metric;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Manager::Terminate() {
|
void Manager::Terminate() {
|
||||||
DBG_LOG(DBG_THREADING, "Terminating thread manager ...");
|
DBG_LOG(DBG_THREADING, "Terminating thread manager ...");
|
||||||
terminating = true;
|
terminating = true;
|
||||||
|
@ -78,6 +188,8 @@ void Manager::AddThread(BasicThread* thread) {
|
||||||
|
|
||||||
if ( ! heartbeat_timer_running )
|
if ( ! heartbeat_timer_running )
|
||||||
StartHeartbeatTimer();
|
StartHeartbeatTimer();
|
||||||
|
|
||||||
|
total_threads_metric->Inc();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Manager::AddMsgThread(MsgThread* thread) {
|
void Manager::AddMsgThread(MsgThread* thread) {
|
||||||
|
@ -133,6 +245,10 @@ void Manager::StartHeartbeatTimer() {
|
||||||
new detail::HeartbeatTimer(run_state::network_time + BifConst::Threading::heartbeat_interval));
|
new detail::HeartbeatTimer(run_state::network_time + BifConst::Threading::heartbeat_interval));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Manager::MessageIn() { total_messages_in_metric->Inc(); }
|
||||||
|
|
||||||
|
void Manager::MessageOut() { total_messages_out_metric->Inc(); }
|
||||||
|
|
||||||
// Raise everything in here as warnings so it is passed to scriptland without
|
// Raise everything in here as warnings so it is passed to scriptland without
|
||||||
// looking "fatal". In addition to these warnings, ReaderBackend will queue
|
// looking "fatal". In addition to these warnings, ReaderBackend will queue
|
||||||
// one reporter message.
|
// one reporter message.
|
||||||
|
|
|
@ -1,12 +1,23 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <list>
|
#include <list>
|
||||||
|
#include <map>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
#include "zeek/Timer.h"
|
#include "zeek/Timer.h"
|
||||||
#include "zeek/threading/MsgThread.h"
|
#include "zeek/threading/MsgThread.h"
|
||||||
|
|
||||||
namespace zeek {
|
namespace zeek {
|
||||||
|
|
||||||
|
namespace telemetry {
|
||||||
|
class Gauge;
|
||||||
|
using GaugePtr = std::shared_ptr<Gauge>;
|
||||||
|
class GaugeFamily;
|
||||||
|
using GaugeFamilyPtr = std::shared_ptr<GaugeFamily>;
|
||||||
|
class Counter;
|
||||||
|
using CounterPtr = std::shared_ptr<Counter>;
|
||||||
|
} // namespace telemetry
|
||||||
|
|
||||||
namespace threading {
|
namespace threading {
|
||||||
namespace detail {
|
namespace detail {
|
||||||
|
|
||||||
|
@ -46,6 +57,12 @@ public:
|
||||||
*/
|
*/
|
||||||
~Manager();
|
~Manager();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs initialization that can only happen after script parsing has
|
||||||
|
* completed.
|
||||||
|
*/
|
||||||
|
void InitPostScript();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Terminates the manager's processor. The method signals all threads
|
* Terminates the manager's processor. The method signals all threads
|
||||||
* to terminates and wait for them to do so. It then joins them and
|
* to terminates and wait for them to do so. It then joins them and
|
||||||
|
@ -77,7 +94,7 @@ public:
|
||||||
* threads that are not yet joined, including any potentially in
|
* threads that are not yet joined, including any potentially in
|
||||||
* Terminating() state.
|
* Terminating() state.
|
||||||
*/
|
*/
|
||||||
int NumThreads() const { return all_threads.size(); }
|
size_t NumThreads() const { return all_threads.size(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Signals a specific threads to terminate immediately.
|
* Signals a specific threads to terminate immediately.
|
||||||
|
@ -136,6 +153,16 @@ protected:
|
||||||
*/
|
*/
|
||||||
void StartHeartbeatTimer();
|
void StartHeartbeatTimer();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called by MsgThread::SendIn() to update metrics.
|
||||||
|
*/
|
||||||
|
void MessageIn();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called by MsgThread::SendOut() to update metrics.
|
||||||
|
*/
|
||||||
|
void MessageOut();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using all_thread_list = std::list<BasicThread*>;
|
using all_thread_list = std::list<BasicThread*>;
|
||||||
all_thread_list all_threads;
|
all_thread_list all_threads;
|
||||||
|
@ -151,6 +178,27 @@ private:
|
||||||
msg_stats_list stats;
|
msg_stats_list stats;
|
||||||
|
|
||||||
bool heartbeat_timer_running = false;
|
bool heartbeat_timer_running = false;
|
||||||
|
telemetry::GaugePtr num_threads_metric;
|
||||||
|
telemetry::CounterPtr total_threads_metric;
|
||||||
|
telemetry::CounterPtr total_messages_in_metric;
|
||||||
|
telemetry::CounterPtr total_messages_out_metric;
|
||||||
|
telemetry::GaugePtr pending_messages_in_metric;
|
||||||
|
telemetry::GaugePtr pending_messages_out_metric;
|
||||||
|
|
||||||
|
telemetry::GaugeFamilyPtr pending_message_in_buckets_fam;
|
||||||
|
telemetry::GaugeFamilyPtr pending_message_out_buckets_fam;
|
||||||
|
std::map<uint64_t, telemetry::GaugePtr> pending_message_in_buckets;
|
||||||
|
std::map<uint64_t, telemetry::GaugePtr> pending_message_out_buckets;
|
||||||
|
|
||||||
|
struct BucketedMessages {
|
||||||
|
uint64_t pending_in_total;
|
||||||
|
uint64_t pending_out_total;
|
||||||
|
std::map<uint64_t, uint64_t> pending_in;
|
||||||
|
std::map<uint64_t, uint64_t> pending_out;
|
||||||
|
};
|
||||||
|
|
||||||
|
BucketedMessages current_bucketed_messages;
|
||||||
|
double bucketed_messages_last_updated = 0.0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace threading
|
} // namespace threading
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "zeek/Obj.h"
|
#include "zeek/Obj.h"
|
||||||
#include "zeek/RunState.h"
|
#include "zeek/RunState.h"
|
||||||
#include "zeek/iosource/Manager.h"
|
#include "zeek/iosource/Manager.h"
|
||||||
|
#include "zeek/telemetry/Manager.h"
|
||||||
#include "zeek/threading/Manager.h"
|
#include "zeek/threading/Manager.h"
|
||||||
|
|
||||||
// Set by Zeek's main signal handler.
|
// Set by Zeek's main signal handler.
|
||||||
|
@ -387,6 +388,8 @@ void MsgThread::SendIn(BasicInputMessage* msg, bool force) {
|
||||||
|
|
||||||
queue_in.Put(msg);
|
queue_in.Put(msg);
|
||||||
++cnt_sent_in;
|
++cnt_sent_in;
|
||||||
|
|
||||||
|
zeek::thread_mgr->MessageIn();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MsgThread::SendOut(BasicOutputMessage* msg, bool force) {
|
void MsgThread::SendOut(BasicOutputMessage* msg, bool force) {
|
||||||
|
@ -399,6 +402,8 @@ void MsgThread::SendOut(BasicOutputMessage* msg, bool force) {
|
||||||
|
|
||||||
++cnt_sent_out;
|
++cnt_sent_out;
|
||||||
|
|
||||||
|
zeek::thread_mgr->MessageOut();
|
||||||
|
|
||||||
if ( io_source )
|
if ( io_source )
|
||||||
io_source->Fire();
|
io_source->Fire();
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue