mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 14:48:21 +00:00
Compare commits
98 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7c8a7680ba | ||
![]() |
26b50908e1 | ||
![]() |
c2f2388f18 | ||
![]() |
d745d746bc | ||
![]() |
5fbb6b4599 | ||
![]() |
7c463b5f92 | ||
![]() |
e7f694bcbb | ||
![]() |
f54416eae4 | ||
![]() |
68bfe8d1c0 | ||
![]() |
cf97ed6ac1 | ||
![]() |
35cd891d6e | ||
![]() |
f300ddb9fe | ||
![]() |
fa5a7c4a5b | ||
![]() |
56b596a3e3 | ||
![]() |
91067b32cc | ||
![]() |
43ab74b70f | ||
![]() |
887d92e26c | ||
![]() |
b1fec3284e | ||
![]() |
5ce0f2edb6 | ||
![]() |
d5c3cdf33a | ||
![]() |
7ed52733d2 | ||
![]() |
056b70bd2d | ||
![]() |
f697670668 | ||
![]() |
826d5e6fb7 | ||
![]() |
1c3be97fe9 | ||
![]() |
107c0da15d | ||
![]() |
e3845060dc | ||
![]() |
34ef830b9c | ||
![]() |
3ebe867193 | ||
![]() |
300b7a11ac | ||
![]() |
f5fefd17df | ||
![]() |
3281aa6284 | ||
![]() |
bcfd47c28d | ||
![]() |
10d5ca5948 | ||
![]() |
f693f22192 | ||
![]() |
c86f9267ff | ||
![]() |
dfbeb3e71f | ||
![]() |
fabb4023c9 | ||
![]() |
9eb3ada8c8 | ||
![]() |
7a73f81792 | ||
![]() |
ea44c30272 | ||
![]() |
c988bd2e4d | ||
![]() |
5579494d48 | ||
![]() |
121170a5de | ||
![]() |
0e4f2a2bab | ||
![]() |
270429bfea | ||
![]() |
815001f2aa | ||
![]() |
88c37d0be8 | ||
![]() |
40db8463df | ||
![]() |
fb51e3a88f | ||
![]() |
5a0e2bf771 | ||
![]() |
95e7c5a63e | ||
![]() |
024304bddf | ||
![]() |
2cc6c735d3 | ||
![]() |
3bf8bfaac6 | ||
![]() |
89b9f9a456 | ||
![]() |
8de8fb8fae | ||
![]() |
595cdf8b55 | ||
![]() |
74b832fa39 | ||
![]() |
15be682f63 | ||
![]() |
8f9c5f79c6 | ||
![]() |
382b4b5473 | ||
![]() |
6f65b88f1b | ||
![]() |
cfe47f40a4 | ||
![]() |
0fd6672dde | ||
![]() |
e7ab18b343 | ||
![]() |
8a92b150a5 | ||
![]() |
dd4597865a | ||
![]() |
056bbe04ea | ||
![]() |
f6b8864584 | ||
![]() |
d1f6e91988 | ||
![]() |
6bbaef3e09 | ||
![]() |
55d36fc2cd | ||
![]() |
f8fbeca504 | ||
![]() |
72ff343f17 | ||
![]() |
b76096a9ee | ||
![]() |
b9e4669632 | ||
![]() |
5974613cae | ||
![]() |
3a44bda957 | ||
![]() |
51262d02c7 | ||
![]() |
b46aeefbab | ||
![]() |
a4b746e5e8 | ||
![]() |
746ae4d2cc | ||
![]() |
a65a339aa8 | ||
![]() |
8014c4b8c3 | ||
![]() |
d9dc121e9a | ||
![]() |
5a56ff92d2 | ||
![]() |
b13dfa3b16 | ||
![]() |
d17a1f9822 | ||
![]() |
5cdddd92d5 | ||
![]() |
b8d11f4688 | ||
![]() |
91b23a6e2e | ||
![]() |
a8c56c1f25 | ||
![]() |
5f6df68463 | ||
![]() |
ac95484382 | ||
![]() |
962b03a431 | ||
![]() |
92a685df50 | ||
![]() |
1bf439cd58 |
210 changed files with 3196 additions and 631 deletions
43
.cirrus.yml
43
.cirrus.yml
|
@ -10,7 +10,7 @@ btest_jobs: &BTEST_JOBS 4
|
|||
btest_retries: &BTEST_RETRIES 2
|
||||
memory: &MEMORY 16GB
|
||||
|
||||
config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||
config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror -D FETCHCONTENT_FULLY_DISCONNECTED:BOOL=ON
|
||||
no_spicy_config: &NO_SPICY_CONFIG --build-type=release --disable-broker-tests --disable-spicy --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||
static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||
binary_config: &BINARY_CONFIG --prefix=$CIRRUS_WORKING_DIR/install --libdir=$CIRRUS_WORKING_DIR/install/lib --binary-package --enable-static-broker --enable-static-binpac --disable-broker-tests --build-type=Release --ccache --enable-werror
|
||||
|
@ -35,8 +35,7 @@ macos_environment: &MACOS_ENVIRONMENT
|
|||
|
||||
freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE
|
||||
cpu: 8
|
||||
# Not allowed to request less than 8GB for an 8 CPU FreeBSD VM.
|
||||
memory: 8GB
|
||||
memory: *MEMORY
|
||||
# For greediness, see https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4
|
||||
greedy: true
|
||||
|
||||
|
@ -67,6 +66,12 @@ skip_task_on_pr: &SKIP_TASK_ON_PR
|
|||
skip: >
|
||||
($CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ '.*fullci.*')
|
||||
|
||||
zam_skip_task_on_pr: &ZAM_SKIP_TASK_ON_PR
|
||||
# Skip this task on PRs if it does not have the fullci or zamci label,
|
||||
# it continues to run for direct pushes to master/release.
|
||||
skip: >
|
||||
($CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ '.*fullci.*' && $CIRRUS_PR_LABELS !=~ '.*zamci.*')
|
||||
|
||||
benchmark_only_if_template: &BENCHMARK_ONLY_IF_TEMPLATE
|
||||
# only_if condition for cron-triggered benchmarking tests.
|
||||
# These currently do not run for release/.*
|
||||
|
@ -389,6 +394,21 @@ asan_sanitizer_task:
|
|||
# Use absolute paths for coverage files.
|
||||
CCACHE_BASEDIR:
|
||||
|
||||
# ASAN task executing btests with zam alternative.
|
||||
asan_sanitizer_zam_task:
|
||||
container:
|
||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
||||
<< : *RESOURCES_TEMPLATE
|
||||
|
||||
<< : *CI_TEMPLATE
|
||||
env:
|
||||
ZEEK_CI_CONFIGURE_FLAGS: *ASAN_SANITIZER_CONFIG
|
||||
ASAN_OPTIONS: detect_leaks=1:detect_odr_violation=0
|
||||
ZEEK_CI_SKIP_UNIT_TESTS: 1
|
||||
ZEEK_CI_SKIP_EXTERNAL_BTESTS: 1
|
||||
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
||||
<< : *ZAM_SKIP_TASK_ON_PR
|
||||
|
||||
ubsan_sanitizer_task:
|
||||
container:
|
||||
# Just uses a recent/common distro to run undefined behavior checks.
|
||||
|
@ -406,6 +426,23 @@ ubsan_sanitizer_task:
|
|||
ZEEK_TAILORED_UB_CHECKS: 1
|
||||
UBSAN_OPTIONS: print_stacktrace=1
|
||||
|
||||
ubsan_sanitizer_zam_task:
|
||||
container:
|
||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
||||
<< : *RESOURCES_TEMPLATE
|
||||
|
||||
<< : *CI_TEMPLATE
|
||||
env:
|
||||
CC: clang-18
|
||||
CXX: clang++-18
|
||||
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
|
||||
ZEEK_TAILORED_UB_CHECKS: 1
|
||||
UBSAN_OPTIONS: print_stacktrace=1
|
||||
ZEEK_CI_SKIP_UNIT_TESTS: 1
|
||||
ZEEK_CI_SKIP_EXTERNAL_BTESTS: 1
|
||||
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
||||
<< : *ZAM_SKIP_TASK_ON_PR
|
||||
|
||||
tsan_sanitizer_task:
|
||||
container:
|
||||
# Just uses a recent/common distro to run memory error/leak checks.
|
||||
|
|
2
.github/workflows/generate-docs.yml
vendored
2
.github/workflows/generate-docs.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
|||
permissions:
|
||||
contents: write # for Git to git push
|
||||
if: github.repository == 'zeek/zeek'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
# We only perform a push if the action was triggered via a schedule
|
||||
|
|
|
@ -30,6 +30,7 @@ extend-ignore-re = [
|
|||
"\"BaR\"",
|
||||
"\"xFoObar\"",
|
||||
"\"FoO\"",
|
||||
"Steve Smoot",
|
||||
]
|
||||
|
||||
extend-ignore-identifiers-re = [
|
||||
|
|
561
CHANGES
561
CHANGES
|
@ -1,3 +1,564 @@
|
|||
7.0.5 | 2024-12-16 11:12:33 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.5 release (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.4-10 | 2024-12-16 10:21:46 -0700
|
||||
|
||||
* QUIC/decrypt_crypto: Actually check if decryption was successful (Arne Welzel, Corelight)
|
||||
|
||||
...and bail if it wasn't.
|
||||
|
||||
PCAP was produced using OSS-Fuzz input from issue 383379789.
|
||||
|
||||
* QUIC/decrypt_crypto: Limit payload_length to 10k (Arne Welzel, Corelight)
|
||||
|
||||
Given we dynamically allocate memory for decryption, employ a limit
|
||||
that is unlikely to be hit, but allows for large payloads produced
|
||||
by the fuzzer or jumbo frames.
|
||||
|
||||
* QUIC/decrypt_crypto: Fix decrypting into too small stack buffer (Arne Welzel, Corelight)
|
||||
|
||||
A QUIC initial packet larger than 1500 bytes could lead to crashes
|
||||
due to the usage of a fixed size stack buffer for decryption.
|
||||
|
||||
Allocate the necessary memory dynamically on the heap instead.
|
||||
|
||||
7.0.4-5 | 2024-12-13 12:25:43 -0700
|
||||
|
||||
* fix for memory management associated with ZAM table iteration (Vern Paxson, Corelight)
|
||||
|
||||
(cherry picked from commit 805e9db58840671c866c85461ad88198eeeec967)
|
||||
|
||||
7.0.4-4 | 2024-12-12 13:12:30 -0700
|
||||
|
||||
* Fix ZAM's implementation of Analyzer::name() BiF (Christian Kreibich, Corelight)
|
||||
|
||||
(cherry picked from commit e100a8e698d1dba0fc339eae800d13b298e55d46)
|
||||
|
||||
7.0.4-3 | 2024-12-12 13:04:44 -0700
|
||||
|
||||
* GH-4052: More robust memory management for ZAM execution - fixes #4052 (Vern Paxson, Corelight)
|
||||
|
||||
(cherry picked from commit c3b30b187e44de593d0ec685dc313e6aa10ff5e5)
|
||||
|
||||
7.0.4-2 | 2024-12-12 12:44:36 -0700
|
||||
|
||||
* Bump zeekjs to v0.14.0 (Arne Welzel, Corelight)
|
||||
79b0c21 version: 0.14.0
|
||||
b75b384 docker: Use Fedora 41 packages
|
||||
d65cbc8 Minor header cleanup
|
||||
4fd7e8b scripts: Remove __preload__.zeek and types.zeek
|
||||
93a8113 CreateEnvironment: Unset kOwnsProcessState and kOwnsInspector
|
||||
3b1f5d3 Instance::Init(): Replace parameters with Options struct
|
||||
|
||||
(cherry picked from commit aac640ebffdd58b1e25e42eee538b7c8669b1dd4)
|
||||
|
||||
* Fix documentation for `have_spicy` [skip CI] (Benjamin Bannier, Corelight)
|
||||
|
||||
The continued line was incorrectly marked up (as a plain comment instead
|
||||
of a Zeekygen comment) which caused only half of the docstring to be
|
||||
rendered by Zeekygen.
|
||||
|
||||
(cherry picked from commit 4a96d34af6fe68bad6afe12dffff6bd795e6b4b1)
|
||||
|
||||
7.0.4 | 2024-11-19 12:31:05 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.4 release (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.3-27 | 2024-11-19 11:19:00 -0700
|
||||
|
||||
* Bump zeekjs to 0.13.2 (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 6e916efe8d9470cdca7b4f4933c44b8c1cab66b0)
|
||||
|
||||
7.0.3-26 | 2024-11-19 10:56:06 -0700
|
||||
|
||||
* Merge remote-tracking branch 'origin/topic/timw/speed-up-zam-ci-testing' (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* origin/topic/timw/speed-up-zam-ci-testing:
|
||||
CI: Use test.sh script for running ZAM tests, but disable parts of it
|
||||
|
||||
(cherry picked from commit d9a74680e08553b34cb164847f61b0ea00ad1f5f)
|
||||
|
||||
7.0.3-24 | 2024-11-19 09:32:54 -0700
|
||||
|
||||
* account for spaces encoding to plus signs in sqli regex detection (Cooper Grill)
|
||||
|
||||
(cherry picked from commit 5200b84fb3ce6c19e9d114b3d8fb0d964639446d)
|
||||
|
||||
7.0.3-23 | 2024-11-14 19:06:56 -0700
|
||||
|
||||
* btest: Skip core.script-args under TSAN (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 159f40a4bff10e7000cb5f5c8a08c6d2b236baef)
|
||||
|
||||
* Disable core.expr-execption btest under ZAM to fix CI builds (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Fix ubsan warning with ZAM and mmdb btest (Tim Wojtulewicz)
|
||||
|
||||
7.0.3-19 | 2024-11-14 12:15:54 -0700
|
||||
|
||||
* ci: Add asan and ubsan sanitizer tasks for ZAM (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 8945b2b186e633f1f7c3a07f9e343da278c037ec)
|
||||
|
||||
7.0.3-18 | 2024-11-14 12:13:59 -0700
|
||||
|
||||
* policy/community-id: Populate conn$community_id in new_connection() (Arne Welzel, Corelight)
|
||||
|
||||
This wasn't possible before #3028 was fixed, but now it's safe to set
|
||||
the value in new_connection() and allow other users access to the
|
||||
field much earlier. We do not have to deal with connection_flipped()
|
||||
because the community-id hash is symmetric.
|
||||
|
||||
(cherry picked from commit d3579c1f34fa679ac05df6f1f2cfbe11fc77b328)
|
||||
|
||||
* Update zeekjs submodule to latest tagged version (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This picks up the changes to support Node.js v22.11.0.
|
||||
|
||||
7.0.3-16 | 2024-11-14 11:57:54 -0700
|
||||
|
||||
* GH-4006: Fix nullptr deref in Spicy accept/decline input (Evan Typanski, Corelight)
|
||||
|
||||
(cherry picked from commit 1d38c310719088ca0e9610ec8458fe4cf8e4a318)
|
||||
|
||||
7.0.3-15 | 2024-11-14 11:56:06 -0700
|
||||
|
||||
* Fix potential nullptr deref in Spicy plugin runtime (Benjamin Bannier, Corelight)
|
||||
|
||||
If we added a file but the other side of the connection had already run
|
||||
into a protocol violation and shut down we could previously have
|
||||
dereferenced a null cookie. This patch fixes the code so it now throws
|
||||
in such scenarios.
|
||||
|
||||
(cherry picked from commit 2e8d6e86e75bc7b0c7be67ab9c38738a1318f6ff)
|
||||
|
||||
* Assume no Spicy cookie in most places (Benjamin Bannier, Corelight)
|
||||
|
||||
We would previously assert that it was available which could have lead
|
||||
to aborts since when the analyzer for either side of a connection shuts
|
||||
down the connection cookie could get cleared and become nil. This patch
|
||||
reworks the code slightly so we now never assume it is available. We do
|
||||
this by either throwing or by making the whole operation requesting the
|
||||
cookie a noop.
|
||||
|
||||
(cherry picked from commit 2e8d6e86e75bc7b0c7be67ab9c38738a1318f6ff)
|
||||
|
||||
7.0.3-14 | 2024-11-14 11:52:34 -0700
|
||||
|
||||
* Fix up minor warnings in touched files (Evan Typanski, Corelight)
|
||||
|
||||
(cherry picked from commit 36af0591a6f2c7270c68deaee4c4d733fa4086b1)
|
||||
|
||||
* Fix Clang 19 deprecation failure (Evan Typanski, Corelight)
|
||||
|
||||
Clang 19 with libc++ started failing to compile because the default
|
||||
implementation of `std::char_traits` was removed, making uses of
|
||||
`std::char_traits<unsigned char>` invalid. This was more of used for
|
||||
convenience before, but it should be roughly the same behavior with
|
||||
`char`.
|
||||
|
||||
(cherry picked from commit 985f4f7c725ae1a9f85dbc112e5bc340a34a034b)
|
||||
|
||||
7.0.3-12 | 2024-11-14 11:33:09 -0700
|
||||
|
||||
* GH-3978: Bump zeekjs to 0.12.1 (Arne Welzel, Corelight)
|
||||
7ec0953 ci: Bump actions/upload-artifact
|
||||
8e5914d ci/docker: Bump distros, update some OBS repos
|
||||
129b737 Fix crash during shutdown
|
||||
|
||||
(cherry picked from commit d74b073852b748aca7793f557f0a5378cb30ca19)
|
||||
|
||||
7.0.3-11 | 2024-11-14 11:31:35 -0700
|
||||
|
||||
* GH-3962: Prevent non-Modbus on port 502 to be reported as Modbus (Emmanuele Zambon)
|
||||
|
||||
This commit prevents most non-Modbus TCP traffic on port 502 to be
|
||||
reported as Modbus in conn.log as well as in modbus.log.
|
||||
To do so, we have introduced two &enforce checks in the Modbus
|
||||
protocol definition that checks that some specific fields of the
|
||||
(supposedly) Modbus header are compatible with values specified in
|
||||
the specs.
|
||||
|
||||
(cherry picked from commit 4763282f36d13808b58948cc378a7df00201c9f5)
|
||||
|
||||
7.0.3-10 | 2024-11-14 11:30:00 -0700
|
||||
|
||||
* GH-3957: input/Raw: Rework GetLine() (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 2a23e9fc1962419e41133689c2a682455d24e35e)
|
||||
|
||||
* GH-215: POP3: Rework unbounded pending command fix (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 2a23e9fc1962419e41133689c2a682455d24e35e)
|
||||
|
||||
7.0.3-9 | 2024-11-14 10:21:55 -0700
|
||||
|
||||
* import of GH-4022 BTest additions (Vern Paxson, Corelight)
|
||||
ZAM baseline update
|
||||
|
||||
* fix for setting object locations to avoid use-after-free situation (Vern Paxson, Corelight)
|
||||
|
||||
* fixes for script optimization of coerce-to-any expressions (Vern Paxson, Corelight)
|
||||
|
||||
* porting of GH-4022 (Vern Paxson, Corelight)
|
||||
|
||||
* porting of GH-4016 (Vern Paxson, Corelight)
|
||||
|
||||
* porting of GH-4013 (Vern Paxson, Corelight)
|
||||
|
||||
* fixed access to uninitialized memory in ZAM's "cat" built-in (Vern Paxson, Corelight)
|
||||
|
||||
7.0.3-1 | 2024-10-18 17:15:02 -0700
|
||||
|
||||
* Merge remote-tracking branch 'origin/topic/bbannier/fix-docs-ci-again' (Christian Kreibich, Corelight)
|
||||
|
||||
* origin/topic/bbannier/fix-docs-ci-again:
|
||||
Fix installation of Python packages in generate docs CI job again
|
||||
|
||||
(cherry picked from commit c28442a9a178b735e3fe1b5f5938f922a5aa7a66)
|
||||
|
||||
7.0.3 | 2024-10-04 15:42:14 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.3 release (Christian Kreibich, Corelight)
|
||||
|
||||
7.0.2-5 | 2024-10-04 10:46:01 -0700
|
||||
|
||||
* Merge remote-tracking branch 'security/topic/awelzel/215-pop3-mail-null-deref' (Christian Kreibich, Corelight)
|
||||
|
||||
* security/topic/awelzel/215-pop3-mail-null-deref:
|
||||
POP3: Rework unbounded pending command fix
|
||||
|
||||
(cherry picked from commit 7fea32c6edc5d4d14646366f87c9208c8c9cf555)
|
||||
|
||||
7.0.2-4 | 2024-10-04 10:28:13 -0700
|
||||
|
||||
* Update docs submodule [nomail] [skip ci] (Christian Kreibich, Corelight)
|
||||
|
||||
7.0.2-3 | 2024-10-04 09:54:48 -0700
|
||||
|
||||
* Bump auxil/spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
* Install procps in OpenSuse Leap images (Benjamin Bannier, Corelight)
|
||||
|
||||
(cherry picked from commit a27066e3fc58d70401359887fcf34bd0bb83d433)
|
||||
|
||||
7.0.2 | 2024-09-23 12:01:55 -0700
|
||||
|
||||
* pop3: Remove unused headers (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 702fb031a4ea2b00364d6a7321384a45551ce3a2)
|
||||
|
||||
* pop3: Prevent unbounded state growth (Arne Welzel, Corelight)
|
||||
|
||||
The cmds list may grow unbounded due to the POP3 analyzer being in
|
||||
multiLine mode after seeing `AUTH` in a Redis connection, but never
|
||||
a `.` terminator. This can easily be provoked by the Redis ping
|
||||
command.
|
||||
|
||||
This adds two heuristics: 1) Forcefully process the oldest commands in
|
||||
the cmds list and cap it at max_pending_commands. 2) Start raising
|
||||
analyzer violations if the client has been using more than
|
||||
max_unknown_client_commands commands (default 10).
|
||||
|
||||
(cherry picked from commit 702fb031a4ea2b00364d6a7321384a45551ce3a2)
|
||||
|
||||
* btest/pop3: Add somewhat more elaborate testing (Arne Welzel, Corelight)
|
||||
|
||||
PCAP taken from here: https://tranalyzer.com/tutorial/pop and reference
|
||||
added to Traces/README.
|
||||
|
||||
(cherry picked from commit 702fb031a4ea2b00364d6a7321384a45551ce3a2)
|
||||
|
||||
7.0.1-6 | 2024-09-23 10:01:55 -0700
|
||||
|
||||
* Remove core.negative-time btest (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This test is failing with libpcap libpcap 1.10.5; for more information
|
||||
see https://github.com/zeek/zeek/issues/3921
|
||||
|
||||
(cherry picked from commit 899f7297d7b8c83ef2640e7cf40d3f230e42cc6b)
|
||||
|
||||
* Update broker submodule (Arne Welzel, Corelight)
|
||||
|
||||
...to pin prometheus to 1.2.4
|
||||
|
||||
(cherry picked from commit f24bc1ee882b3b266ca9dc325a1b5584addb155e)
|
||||
|
||||
* telemetry: Move callbacks to Zeek (Arne Welzel, Corelight)
|
||||
|
||||
Now that we run callbacks on the main loop, we can move callback support
|
||||
for Counter and Gauge instances directly into Zeek and don't need to patch
|
||||
prometheus-cpp anymore.
|
||||
|
||||
(cherry picked from commit f24bc1ee882b3b266ca9dc325a1b5584addb155e)
|
||||
|
||||
* auxil/prometheus-cpp: Pin to 1.2.4 (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit f24bc1ee882b3b266ca9dc325a1b5584addb155e)
|
||||
|
||||
* btest/ldap: Add regression test for #3919 (Arne Welzel, Corelight)
|
||||
|
||||
This works as expected in master, it's just that we forgot to backport
|
||||
PR #3845 to 7.0.1. Add the PCAP from Martin anyhow.
|
||||
|
||||
(cherry picked from commit a339cfa4c007762c6fbb16a35576220851fe4a82)
|
||||
|
||||
* GH-3853: ldap: Recognize SASL+SPNEGO+NTLMSSP (Arne Welzel, Corelight)
|
||||
|
||||
The ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap file was harvested
|
||||
from the CTU-SME-11 (Experiment-VM-Microsoft-Windows7AD-1) dataset
|
||||
at https://zenodo.org/records/7958259 (DOI 10.5281/zenodo.7958258).
|
||||
|
||||
(cherry picked from commit 152bbbd680e6c329255dc28d57cd506e7d2c09ff)
|
||||
|
||||
* Bump auxil/spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
7.0.1 | 2024-09-03 13:04:23 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.1 release (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update zeek-aux submodule to pick up zeek-archiver permissions fix (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.0-14 | 2024-09-03 09:02:19 -0700
|
||||
|
||||
* Bump auxil/spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
7.0.0-11 | 2024-08-30 12:38:59 -0700
|
||||
|
||||
* Spicy: Register well-known ports through an event handler. (Robin Sommer, Corelight)
|
||||
|
||||
This avoids the earlier problem of not tracking ports correctly in
|
||||
scriptland, while still supporting `port` in EVT files and `%port` in
|
||||
Spicy files.
|
||||
|
||||
As it turns out we are already following the same approach for file
|
||||
analyzers' MIME types, so I'm applying the same pattern: it's one
|
||||
event per port, without further customization points. That leaves the
|
||||
patch pretty small after all while fixing the original issue.
|
||||
|
||||
(cherry picked from commit a2079bcda6e40180b888240a281c12cc0ca735be)
|
||||
|
||||
* Revert "Remove deprecated port/ports fields for spicy analyzers" (Robin Sommer, Corelight)
|
||||
|
||||
This reverts commit 15d404dd191a723960e4efd956eec22739d3f1c2.
|
||||
|
||||
(cherry picked from commit a2079bcda6e40180b888240a281c12cc0ca735be)
|
||||
|
||||
7.0.0-9 | 2024-08-30 11:47:39 -0700
|
||||
|
||||
* ldap: Promote uint8 to uint64 before shifting (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 97fa7cdc0a49869ee6605fac9cfc15f11d8c855b)
|
||||
|
||||
* ldap: Add heuristic for wrap tokens (Arne Welzel, Corelight)
|
||||
|
||||
Instead of dissecting the GSSAPI handshake, add another heuristic
|
||||
into MaybeEncrypted to check for the WRAP token identifier.
|
||||
|
||||
After this change, the pcap on the following ticket is processed
|
||||
nicely: https://gitlab.com/wireshark/migration-test/-/issues/9398
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Ignore ec/rrc for sealed wrap tokens (Arne Welzel, Corelight)
|
||||
|
||||
It shouldn't matter for the encrypted payload that we'll
|
||||
just consume and ignore.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Add LDAP sample with SASL-SRP mechanism (Arne Welzel, Corelight)
|
||||
|
||||
This is what @dopheide-esnet actually saw. Produced with a custom
|
||||
cyrus-sasl and openldap build :-(
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Reintroduce encryption after SASL heuristic (Arne Welzel, Corelight)
|
||||
|
||||
@dopheide-esnet provided sample captures where SASL SRP is used as
|
||||
a SASL mechanism and the follow-up LDAP messages are encrypted. It's
|
||||
not clear how to determine whether encryption will or will not happen,
|
||||
so re-add a heuristic to determine this based on the first byte of
|
||||
the first message *after* the successful bindResponse handshake. If
|
||||
that byte is 0x30, assume cleartext.
|
||||
|
||||
I haven't been able to produce such pcaps, unfortunately, but the
|
||||
cleartext path is tested via the existing sasl-ntlm.pcap.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Fix assuming GSS-SPNEGO for all bindResponses (Arne Welzel, Corelight)
|
||||
|
||||
In retrospect that's an obvious bug.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Implement extended request/response and StartTLS support (Arne Welzel, Corelight)
|
||||
|
||||
PCAP was produced with a local OpenLDAP server configured to support StartTLS.
|
||||
|
||||
This puts the Zeek calls into a separate ldap_zeek.spicy file/module
|
||||
to separate it from LDAP.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Remove MessageWrapper with magic 0x30 searching (Arne Welzel, Corelight)
|
||||
|
||||
This unit implements a heuristic to search for the 0x30 sequence
|
||||
byte if Message couldn't readily be parsed. Remove it with the
|
||||
idea of explicit and predictable support for SASL mechanisms.
|
||||
|
||||
(cherry picked from commit 2ea3a651bd83b0dfa15924417e4667241531b57b)
|
||||
|
||||
* ldap: Harden parsing a bit (Arne Welzel, Corelight)
|
||||
|
||||
ASN1Message(True) may go off parsing arbitrary input data as
|
||||
"something ASN.1" This could be GBs of octet strings or just very
|
||||
long sequences. Avoid this by open-coding some top-level types expected.
|
||||
|
||||
This also tries to avoid some of the &parse-from usages that result
|
||||
in unnecessary copies of data.
|
||||
|
||||
Adds a locally generated PCAP with addRequest/addResponse that we
|
||||
don't currently handle.
|
||||
|
||||
(cherry picked from commit 2ea3a651bd83b0dfa15924417e4667241531b57b)
|
||||
|
||||
* ldap: Handle integrity-only KRB wrap tokens (Arne Welzel, Corelight)
|
||||
|
||||
Mostly staring at the PCAPs and opened a few RFCs. For now, only if the
|
||||
MS_KRB5 OID is used and accepted in a bind response, start stripping
|
||||
KRB5 wrap tokens for both, client and server traffic.
|
||||
|
||||
Would probably be nice to forward the GSS-API data to the analyzer...
|
||||
|
||||
(cherry picked from commit 2ea3a651bd83b0dfa15924417e4667241531b57b)
|
||||
|
||||
* http: fix password capture when enabled (Pierre Lalet)
|
||||
|
||||
The current implementation would only log, if the password contains a
|
||||
colon, the part before the first colon (e.g., the password
|
||||
`password:password` would be logged as `password`).
|
||||
|
||||
(cherry picked from commit c27e18631c5d9c6f04c230bd421c9750a1f02342)
|
||||
|
||||
* Analyzer: Do not add child analyzers when finished (Arne Welzel, Corelight)
|
||||
|
||||
Depending on an analyzer's implementation, its Done() method may
|
||||
attempt to access analyzer or connection state when executing.
|
||||
When this happens in the destructor of the parent analyzer during
|
||||
the process of destructing a connection, this state may have been
|
||||
deleted, resulting in use-after-free crashes or worse memory
|
||||
corruption.
|
||||
|
||||
The following cases have been observed in the wild for when this happens.
|
||||
|
||||
* PIA matching during Done() for undelivered TCP data enables a Spicy
|
||||
based analyzer which in turn attempts to raise an analyzer violation
|
||||
during Done()->EndOfData().
|
||||
|
||||
* Spicy analyzers attaching new analyzers during their Done() processing
|
||||
which in turn attempt to use TCP() (to call FindChild()) during Done()
|
||||
while the analyzer tree / connection is being destructed.
|
||||
|
||||
The second scenario was previously found to happen in the HTTP analyzer
|
||||
and fixed with 6ef9423f3cff13e6c73f97eb6a3a27d6f64cc320.
|
||||
|
||||
Plug these scenarios by short-circuiting AddChildAnalyzer() if the analyzer
|
||||
or connection have finished or are being finished.
|
||||
|
||||
(cherry picked from commit 45b33bf5c17d5e8cf6c777a9bd57e4a803dfad19)
|
||||
|
||||
* TCP_Reassembler: Fix IsOrig() position in Match() call (Arne Welzel, Corelight)
|
||||
|
||||
Found during a debug session with @rsmmr. Undelivered TCP data
|
||||
would only be matched for the responder and eol set to IsOrig().
|
||||
|
||||
(cherry picked from commit 4a4cbf25765f387f0aa20277afd133918292b9c4)
|
||||
|
||||
* Process metric callbacks from the main-loop thread (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This avoids the callbacks from being processed on the worker thread
|
||||
spawned by Civetweb. It fixes data race issues with lookups involving
|
||||
global variables, amongst other threading issues.
|
||||
|
||||
(cherry picked from commit 3c3853dc7da9aad94a9b2d5a143cc7bd9476ea7a)
|
||||
|
||||
* CI: Use 16GB of memory for FreeBSD builds (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit 9d9cc51e9dd93668cd332aa1aef283c9dc23a677)
|
||||
|
||||
7.0.0 | 2024-07-31 09:37:03 -0700
|
||||
|
||||
* Release 7.0.0.
|
||||
|
||||
7.0.0-rc4.4 | 2024-07-31 09:36:51 -0700
|
||||
|
||||
* Allowlist a name for typos check (Benjamin Bannier, Corelight)
|
||||
|
||||
* Bump Spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
7.0.0-rc4 | 2024-07-26 10:12:34 -0700
|
||||
|
||||
* Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight)
|
||||
|
||||
This in particular pulls in a fix for zeek/spicy#1808.
|
||||
|
||||
(cherry picked from commit 4c0c7581c835b4dcd5339a4b34c2b82fcfc40dc3)
|
||||
|
||||
7.0.0-rc3 | 2024-07-25 10:52:29 -0700
|
||||
|
||||
* Generate docs for 7.0.0-rc3 (Tim Wojtulewicz)
|
||||
|
||||
* Bump zeek-testing-cluster to reflect deprecation of prometheus.zeek (Christian Kreibich, Corelight)
|
||||
|
||||
(cherry picked from commit 146cf99ff62d729705c155b44199a674911ade09)
|
||||
|
||||
* Update 7.0 NEWS with blurb about multi-PDU parsing causing increased load [nomail] [skip ci] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit bd208f4c54f66074315479071c810d792e69f96b)
|
||||
|
||||
* Fix handling of zero-length SMB2 error responses (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit bd208f4c54f66074315479071c810d792e69f96b)
|
||||
|
||||
* Update Mozilla CA list and CT list (Johanna Amann, Corelight)
|
||||
|
||||
(cherry picked from commit cb88f6316c7341da7a2af397932a145be3a0cc29)
|
||||
|
||||
* Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight)
|
||||
|
||||
(cherry picked from commit da7c3d91385195a7a4ba957e46743bc52a9d4ecb)
|
||||
|
||||
7.0.0-rc2.7 | 2024-07-24 17:00:51 -0700
|
||||
|
||||
* Add contributors to 7.0.0 NEWS entry (Christian Kreibich, Corelight)
|
||||
|
||||
* telemetry: Deprecate prometheus.zeek policy script (Arne Welzel, Corelight)
|
||||
|
||||
* Update broker submodule [nomail] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.0-rc2 | 2024-07-18 14:31:49 -0700
|
||||
|
||||
* Bump zeek-testing-cluster to pull in tee SIGPIPE fix (Christian Kreibich, Corelight)
|
||||
|
||||
(cherry picked from commit b51a46f94d4012119fd27d5e46328c70af7270a2)
|
||||
|
||||
* CI: Set FETCH_CONTENT_FULLY_DISCONNECTED flag for configure (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update broker and cmake submodules [nomail] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Fix warning about grealpath when running 'make dist' on Linux (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit e4716b6c912f86cf6b2afd6979c38667c45add95)
|
||||
|
||||
7.0.0-rc1 | 2024-07-11 12:21:02 -0700
|
||||
|
||||
* Updating submodule(s) [nomail] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.0-dev.467 | 2024-07-11 12:14:52 -0700
|
||||
|
||||
* Update the scripts.base.frameworks.telemetry.internal-metrics test (Christian Kreibich, Corelight)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -9,7 +9,7 @@ BUILD=build
|
|||
REPO=$$(cd $(CURDIR) && basename $$(git config --get remote.origin.url | sed 's/^[^:]*://g'))
|
||||
VERSION_FULL=$(REPO)-$$(cd $(CURDIR) && cat VERSION)
|
||||
GITDIR=$$(test -f .git && echo $$(cut -d" " -f2 .git) || echo .git)
|
||||
REALPATH=$$($$(realpath --relative-to=$(pwd) . >/dev/null 2>&1) && echo 'realpath' || echo 'grealpath')
|
||||
REALPATH=$$($$(realpath --relative-to=$(shell pwd) . >/dev/null 2>&1) && echo 'realpath' || echo 'grealpath')
|
||||
|
||||
all: configured
|
||||
$(MAKE) -C $(BUILD) $@
|
||||
|
|
112
NEWS
112
NEWS
|
@ -3,9 +3,111 @@ This document summarizes the most important changes in the current Zeek
|
|||
release. For an exhaustive list of changes, see the ``CHANGES`` file
|
||||
(note that submodules, such as Broker, come with their own ``CHANGES``.)
|
||||
|
||||
Zeek 7.0.5
|
||||
==========
|
||||
|
||||
This release fixes the following security issues:
|
||||
|
||||
- Large QUIC packets can cause Zeek to overflow memory and potentially
|
||||
crash. Due to the possibility of receiving these packets from remote hosts,
|
||||
this is a DoS risk. The fix included limits the payload length to 10000 bytes
|
||||
and reports an error for those cases, as well as fixing the memory allocation
|
||||
to not use a fixed-size buffer for all packets.
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- The ZAM script optimization feature gained some fixes for some minor memory
|
||||
leaks.
|
||||
|
||||
- The ZeekJS submodule was updated to v0.14.0. In certain environment, ZeekJS
|
||||
would fail to start a debugging thread due to limited stack size, producing
|
||||
spurious log messages. This was fixed by not starting the debugging thread by
|
||||
default.
|
||||
|
||||
Zeek 7.0.4
|
||||
==========
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- The community-id-logging.zeek policy script was used to set
|
||||
``c$conn$community_id`` during ``new_connection()`` rather than
|
||||
``connection_state_remove()``, allowing other scripts to reuse its value
|
||||
early.
|
||||
|
||||
- The input framework will no longer get stuck and use 100% of the CPU when
|
||||
encountering lines not immediately terminated by a new line.
|
||||
|
||||
- The Modbus analyzer added some additional protocol checks and should no longer
|
||||
over-match on traffic that's not specifically on port 502.
|
||||
|
||||
- ZeekJS was updated to version v0.13.2, which brings support for newer versions
|
||||
of Node.js and a fix for a segfault when running under Alpine.
|
||||
|
||||
- A minor bug was fixed in the detect-sqli policy script to handle spaces being
|
||||
encoded as plus signs.
|
||||
|
||||
Zeek 7.0.3
|
||||
==========
|
||||
|
||||
This release fixes the following security issue:
|
||||
|
||||
- Adding to the POP3 hardening in 7.0.2, the parser now simply discards too many
|
||||
pending commands, rather than any attempting to process them. Further, invalid
|
||||
server responses do not result in command completion anymore. Processing
|
||||
out-of-order commands or finishing commands based on invalid server responses
|
||||
could result in inconsistent analyzer state, potentially triggering null
|
||||
pointer references for crafted traffic.
|
||||
|
||||
This release ships with Spicy 1.11.3, a bugfix release. Please refer to its
|
||||
release notes for details.
|
||||
|
||||
Zeek 7.0.2
|
||||
==========
|
||||
|
||||
This release fixes the following security issues:
|
||||
|
||||
- The POP3 parser has been hardened to avoid unbounded state growth in the
|
||||
face of one-sided traffic capture or when enabled for non-POP3 traffic.
|
||||
Concretely, the Redis protocol's AUTH mechanism enables the POP3 analyzer
|
||||
for such connections through DPD.
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- Support for SASL+SPNEGO+NTLMSSP was added to the LDAP analyzer.
|
||||
|
||||
- Telemetry callbacks are now handled via Zeek instead of depending on the
|
||||
prometehus-cpp library to handle them.
|
||||
|
||||
Zeek 7.0.1
|
||||
==========
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- HTTP passwords with colon characters in them are now correctly logged.
|
||||
|
||||
- The LDAP analyzer now supports handling of non-sealed GSS-API WRAP tokens.
|
||||
|
||||
- Heuristics for parsing SASL encrypted and signed LDAP traffic have been made
|
||||
more strict and predictable. Please provide input if this results in less
|
||||
visibility in your environment.
|
||||
|
||||
- StartTLS support was added to the LDAP analyzer. The SSL analyzer is enabled
|
||||
for connections where client and server negotiate to TLS through the extended
|
||||
request/response mechanism.
|
||||
|
||||
- Specify less-strict permissions for directories and files created by
|
||||
zeek-archiver to play more nicely with user's umask setting.
|
||||
|
||||
Zeek 7.0.0
|
||||
==========
|
||||
|
||||
We would like to thank the following people for their contributions to this
|
||||
release: Christopher Knill (cknill), Jan Grashöfer (J-Gras), Martin van
|
||||
Hensbergen (mvhensbergen), Matti Bispham (mbispham), Mike Dopheide
|
||||
(dopheide-esnet), Oleksandr Pastushkov (opastushkov), Peter Cullen (pbcullen),
|
||||
Steve Smoot (stevesmoot), Tanner Kvarfordt (Kardbord), Victor Dvornikov
|
||||
(lydiym).
|
||||
|
||||
Breaking Changes
|
||||
----------------
|
||||
|
||||
|
@ -20,7 +122,7 @@ Breaking Changes
|
|||
|
||||
All of the metrics-related script-level options, type, and methods have been
|
||||
moved to the Telemetry framework:
|
||||
* Option ``Broker::metrics_port` is now ``Telemetry::metrics_port``
|
||||
* Option ``Broker::metrics_port`` is now ``Telemetry::metrics_port``
|
||||
* Option ``Broker::metrics_export_endpoint_name`` is now ``Telemetry::metrics_endpoint_name``
|
||||
|
||||
The following options have been removed:
|
||||
|
@ -85,7 +187,8 @@ New Functionality
|
|||
environment variable configures the addition.
|
||||
|
||||
- SMB2 packets containing multiple PDUs now correctly parse all of the headers,
|
||||
instead of just the first one and ignoring the rest.
|
||||
instead of just the first one and ignoring the rest. This may cause increased
|
||||
CPU load on SMB2-heavy networks.
|
||||
|
||||
- The new built-in function ``lookup_connection_analyzer_id()`` retrieves the
|
||||
numeric identifier of an analyzer associated with a connection. This enables
|
||||
|
@ -167,6 +270,11 @@ Deprecated Functionality
|
|||
- The ``--disable-archiver`` configure flag no longer does anything and will be
|
||||
removed in 7.1. zeek-archiver has moved into the zeek-aux repository.
|
||||
|
||||
- The policy/frameworks/telemetry/prometheus.zeek script has been deprecated
|
||||
and will be removed with Zeek 7.1. Setting the ``metrics_port`` field on a
|
||||
``Cluster::Node`` implies listening on that port and exposing telemetry
|
||||
in Prometheus format.
|
||||
|
||||
Zeek 6.2.0
|
||||
==========
|
||||
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
7.0.0-dev.467
|
||||
7.0.5
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit fada26ae504981f7f5524bf2a5c82ae49acd556d
|
||||
Subproject commit a80bf420aa6f55b4eb959ae89c184522a096a119
|
|
@ -1 +1 @@
|
|||
Subproject commit 2fec7205d1a9cb4829b86c943d599696d53de85c
|
||||
Subproject commit ad99e21f4706193670c42b36c9824dc997f4c475
|
|
@ -1 +1 @@
|
|||
Subproject commit 6581b1855a5ea8cc102c66b4ac6a431fc67484a0
|
||||
Subproject commit 31880e215ff6f85ad95507d7a760e81ae2e0999d
|
|
@ -1 +1 @@
|
|||
Subproject commit 8a66cd60fb29a1237b5070854cb194f43a3f7a30
|
||||
Subproject commit e850412ab5dea10ee2ebb98e42527d80fcf9a7ed
|
|
@ -1 +1 @@
|
|||
Subproject commit 230f53c1596ee88289e96397f0810ca60ed897e3
|
||||
Subproject commit 79b0c2126fa0178dbc2e37536588fcd1db9f4443
|
|
@ -21,6 +21,7 @@ RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.5
|
|||
libpcap-devel \
|
||||
make \
|
||||
openssh \
|
||||
procps \
|
||||
python311 \
|
||||
python311-devel \
|
||||
python311-pip \
|
||||
|
|
|
@ -21,6 +21,7 @@ RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.6
|
|||
libpcap-devel \
|
||||
make \
|
||||
openssh \
|
||||
procps \
|
||||
python312 \
|
||||
python312-devel \
|
||||
python312-pip \
|
||||
|
|
14
ci/test.sh
14
ci/test.sh
|
@ -40,6 +40,11 @@ function banner {
|
|||
}
|
||||
|
||||
function run_unit_tests {
|
||||
if [[ ${ZEEK_CI_SKIP_UNIT_TESTS} -eq 1 ]]; then
|
||||
printf "Skipping unit tests as requested by task configureation\n\n"
|
||||
return 0
|
||||
fi
|
||||
|
||||
banner "Running unit tests"
|
||||
|
||||
pushd build
|
||||
|
@ -60,7 +65,7 @@ function run_btests {
|
|||
pushd testing/btest
|
||||
|
||||
ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \
|
||||
${BTEST} -z ${ZEEK_CI_BTEST_RETRIES} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} || result=1
|
||||
${BTEST} -z ${ZEEK_CI_BTEST_RETRIES} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} ${ZEEK_CI_BTEST_EXTRA_ARGS} || result=1
|
||||
make coverage
|
||||
prep_artifacts
|
||||
popd
|
||||
|
@ -68,11 +73,16 @@ function run_btests {
|
|||
}
|
||||
|
||||
function run_external_btests {
|
||||
if [[ ${ZEEK_CI_SKIP_EXTERNAL_BTESTS} -eq 1 ]]; then
|
||||
printf "Skipping external tests as requested by task configuration\n\n"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local zeek_testing_pid=""
|
||||
local zeek_testing_pid_private=""
|
||||
pushd testing/external/zeek-testing
|
||||
ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \
|
||||
${BTEST} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} >btest.out 2>&1 &
|
||||
${BTEST} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} ${ZEEK_CI_BTEST_EXTRA_ARGS} >btest.out 2>&1 &
|
||||
zeek_testing_pid=$!
|
||||
popd
|
||||
|
||||
|
|
2
cmake
2
cmake
|
@ -1 +1 @@
|
|||
Subproject commit 690483f76c149ffa8e035b612b406b0964f9886f
|
||||
Subproject commit 2d42baf8e63a7494224aa9d02afa2cb43ddb96b8
|
2
doc
2
doc
|
@ -1 +1 @@
|
|||
Subproject commit f65820ff0faf2887799fe691a443b5db39eeed54
|
||||
Subproject commit 6fc35ef451358576c8a730ed78e54d5509897ee8
|
|
@ -47,12 +47,18 @@ export {
|
|||
|
||||
# Marked with &is_used to suppress complaints when there aren't any
|
||||
# Spicy file analyzers loaded, and hence this event can't be generated.
|
||||
# The attribute is only supported for Zeek 5.0 and higher.
|
||||
event spicy_analyzer_for_mime_type(a: Files::Tag, mt: string) &is_used
|
||||
{
|
||||
Files::register_for_mime_type(a, mt);
|
||||
}
|
||||
|
||||
# Marked with &is_used to suppress complaints when there aren't any
|
||||
# Spicy protocol analyzers loaded, and hence this event can't be generated.
|
||||
event spicy_analyzer_for_port(a: Analyzer::Tag, p: port) &is_used
|
||||
{
|
||||
Analyzer::register_for_port(a, p);
|
||||
}
|
||||
|
||||
function enable_protocol_analyzer(tag: Analyzer::Tag) : bool
|
||||
{
|
||||
return Spicy::__toggle_analyzer(tag, T);
|
||||
|
|
|
@ -1,3 +1 @@
|
|||
@load ./main
|
||||
|
||||
@load base/frameworks/cluster
|
||||
|
|
|
@ -5,10 +5,28 @@
|
|||
##! enabled by setting :zeek:see:`Telemetry::metrics_port`.
|
||||
|
||||
@load base/misc/version
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@load base/frameworks/telemetry/options
|
||||
|
||||
module Telemetry;
|
||||
|
||||
# In a cluster configuration, open the port number for metrics
|
||||
# from the cluster node configuration for exporting data to
|
||||
# Prometheus.
|
||||
#
|
||||
# The manager node will also provide a ``/services.json`` endpoint
|
||||
# for the HTTP Service Discovery system in Prometheus to use for
|
||||
# configuration. This endpoint will include information for all of
|
||||
# the other nodes in the cluster.
|
||||
@if ( Cluster::is_enabled() )
|
||||
redef Telemetry::metrics_endpoint_name = Cluster::node;
|
||||
|
||||
@if ( Cluster::local_node_metrics_port() != 0/unknown )
|
||||
redef Telemetry::metrics_port = Cluster::local_node_metrics_port();
|
||||
@endif
|
||||
@endif
|
||||
|
||||
export {
|
||||
## Alias for a vector of label values.
|
||||
type labels_vector: vector of string;
|
||||
|
|
|
@ -2903,6 +2903,22 @@ export {
|
|||
|
||||
} # end export
|
||||
|
||||
module POP3;
|
||||
|
||||
export {
|
||||
## How many commands a POP3 client may have pending
|
||||
## before Zeek forcefully removes the oldest.
|
||||
##
|
||||
## Setting this value to 0 removes the limit.
|
||||
const max_pending_commands = 10 &redef;
|
||||
|
||||
## How many invalid commands a POP3 client may use
|
||||
## before Zeek starts raising analyzer violations.
|
||||
##
|
||||
## Setting this value to 0 removes the limit.
|
||||
const max_unknown_client_commands = 10 &redef;
|
||||
|
||||
} # end export
|
||||
|
||||
module Threading;
|
||||
|
||||
|
@ -5883,6 +5899,13 @@ export {
|
|||
|
||||
type MetricVector : vector of Metric;
|
||||
type HistogramMetricVector : vector of HistogramMetric;
|
||||
|
||||
## Maximum amount of time for CivetWeb HTTP threads to
|
||||
## wait for metric callbacks to complete on the IO loop.
|
||||
const callback_timeout: interval = 5sec &redef;
|
||||
|
||||
## Number of CivetWeb threads to use.
|
||||
const civetweb_threads: count = 2 &redef;
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
|
|
@ -338,8 +338,8 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
|
|||
if ( /^[bB][aA][sS][iI][cC] / in value )
|
||||
{
|
||||
local userpass = decode_base64_conn(c$id, sub(value, /[bB][aA][sS][iI][cC][[:blank:]]+/, ""));
|
||||
local up = split_string(userpass, /:/);
|
||||
if ( |up| >= 2 )
|
||||
local up = split_string1(userpass, /:/);
|
||||
if ( |up| == 2 )
|
||||
{
|
||||
c$http$username = up[0];
|
||||
if ( c$http$capture_password )
|
||||
|
|
|
@ -120,4 +120,11 @@ export {
|
|||
"searching", [ LDAP::SearchDerefAlias_DEREF_FINDING_BASE ] =
|
||||
"finding", [ LDAP::SearchDerefAlias_DEREF_ALWAYS ] = "always", }
|
||||
&default="unknown";
|
||||
|
||||
const EXTENDED_REQUESTS = {
|
||||
# StartTLS, https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1
|
||||
[ "1.3.6.1.4.1.1466.20037" ] = "StartTLS",
|
||||
# whoami, https://datatracker.ietf.org/doc/html/rfc4532#section-2
|
||||
[ "1.3.6.1.4.1.4203.1.11.3" ] = "whoami",
|
||||
} &default="unknown" &redef;
|
||||
}
|
||||
|
|
|
@ -258,6 +258,9 @@ event LDAP::message(c: connection,
|
|||
}
|
||||
|
||||
m$object = object;
|
||||
|
||||
if ( opcode == LDAP::ProtocolOpcode_EXTENDED_REQUEST )
|
||||
m$object += fmt(" (%s)", EXTENDED_REQUESTS[object]);
|
||||
}
|
||||
|
||||
if ( argument != "" ) {
|
||||
|
|
|
@ -98,3 +98,44 @@ global LDAP::search_result_entry: event (
|
|||
message_id: int,
|
||||
object_name: string
|
||||
);
|
||||
|
||||
## Event generated for each ExtendedRequest in LDAP messages.
|
||||
##
|
||||
## c: The connection.
|
||||
##
|
||||
## message_id: The messageID element.
|
||||
##
|
||||
## request_name: The name of the extended request.
|
||||
##
|
||||
## request_value: The value of the extended request (empty if missing).
|
||||
global LDAP::extended_request: event (
|
||||
c: connection,
|
||||
message_id: int,
|
||||
request_name: string,
|
||||
request_value: string
|
||||
);
|
||||
|
||||
## Event generated for each ExtendedResponse in LDAP messages.
|
||||
##
|
||||
## c: The connection.
|
||||
##
|
||||
## message_id: The messageID element.
|
||||
##
|
||||
## result: The result code of the response.
|
||||
##
|
||||
## response_name: The name of the extended response (empty if missing).
|
||||
##
|
||||
## response_value: The value of the extended response (empty if missing).
|
||||
global LDAP::extended_response: event (
|
||||
c: connection,
|
||||
message_id: int,
|
||||
result: LDAP::ResultCode,
|
||||
response_name: string,
|
||||
response_value: string
|
||||
);
|
||||
|
||||
## Event generated when a plaintext LDAP connection switched to TLS.
|
||||
##
|
||||
## c: The connection.
|
||||
##
|
||||
global LDAP::starttls: event(c: connection);
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
#
|
||||
# Do not edit this file. This file is automatically generated by gen-ct-list.pl
|
||||
# File generated at Fri Feb 23 11:37:01 2024
|
||||
# File generated at Tue Jul 23 16:04:45 2024
|
||||
# File generated from https://www.gstatic.com/ct/log_list/v3/log_list.json
|
||||
# Source file generated at: 2024-02-22T12:56:21Z
|
||||
# Source file version: 32.9
|
||||
# Source file generated at: 2024-07-23T13:06:08Z
|
||||
# Source file version: 39.1
|
||||
#
|
||||
|
||||
@load base/protocols/ssl
|
||||
|
@ -12,21 +12,32 @@ redef ct_logs += {
|
|||
["\xee\xcd\xd0\x64\xd5\xdb\x1a\xce\xc5\x5c\xb7\x9d\xb4\xcd\x13\xa2\x32\x87\x46\x7c\xbc\xec\xde\xc3\x51\x48\x59\x46\x71\x1f\xb5\x9b"] = CTInfo($description="Google 'Argon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1d\xb9\x6c\xa9\xcb\x69\x94\xc5\x5c\xe6\xb6\xa6\x03\xbb\xd2\xb8\xdc\x54\x43\x17\x28\x99\x0c\x06\x01\x50\x1d\x9d\x64\xc0\x59\x46\x2b\xdc\xc8\x03\x1d\x05\xb4\x2d\xa8\x09\xf7\x99\x41\xed\x04\xfb\xe5\x57\xba\x26\x04\xf6\x11\x52\xce\x14\x65\x3b\x2f\x76\x2b\xc0"),
|
||||
["\x4e\x75\xa3\x27\x5c\x9a\x10\xc3\x38\x5b\x6c\xd4\xdf\x3f\x52\xeb\x1d\xf0\xe0\x8e\x1b\x8d\x69\xc0\xb1\xfa\x64\xb1\x62\x9a\x39\xdf"] = CTInfo($description="Google 'Argon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x20\x82\xa1\xf9\x67\x68\xa8\xe4\xdb\x94\x98\xe2\xe1\x68\x87\xe4\x09\x6d\x20\x35\x33\x38\x3c\xaf\x14\xaa\xd7\x08\x18\xf0\xfd\x16\x9b\xd3\xff\x7c\x27\x82\xd4\x87\xb7\x4e\x24\x46\x3b\xfb\xae\xbe\xc8\x23\x52\x20\x2b\xaa\x44\x05\xfe\x54\xf9\xd5\xf1\x1d\x45\x9a"),
|
||||
["\x12\xf1\x4e\x34\xbd\x53\x72\x4c\x84\x06\x19\xc3\x8f\x3f\x7a\x13\xf8\xe7\xb5\x62\x87\x88\x9c\x6d\x30\x05\x84\xeb\xe5\x86\x26\x3a"] = CTInfo($description="Google 'Argon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaf\xe4\xf3\x94\x2c\xdf\xa6\x27\xb5\xfe\xb2\x61\x83\x19\xc8\x21\x3a\x23\xa8\xa9\x3d\x54\xaf\xbc\x31\x9a\x1c\xd3\xc1\xe3\xb6\xc2\xf3\x0f\xc7\xb9\xca\x3b\x1d\x79\x65\x61\x22\x25\x82\x56\x4e\x98\xe8\xaa\x26\x29\x36\x1e\x28\x60\x6f\xeb\x15\x6e\xf7\x7c\xd0\xba"),
|
||||
["\x0e\x57\x94\xbc\xf3\xae\xa9\x3e\x33\x1b\x2c\x99\x07\xb3\xf7\x90\xdf\x9b\xc2\x3d\x71\x32\x25\xdd\x21\xa9\x25\xac\x61\xc5\x4e\x21"] = CTInfo($description="Google 'Argon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x07\xfc\x1e\xe8\x63\x8e\xff\x1c\x31\x8a\xfc\xb8\x1e\x19\x2b\x60\x50\x00\x3e\x8e\x9e\xda\x77\x37\xe3\xa5\xa8\xda\x8d\x94\xf8\x6b\xe8\x3d\x64\x8f\x27\x3f\x75\xb3\xfc\x6b\x12\xf0\x37\x06\x4f\x64\x58\x75\x14\x5d\x56\x52\xe6\x6a\x2b\x14\x4c\xec\x81\xd1\xea\x3e"),
|
||||
["\xd7\x6d\x7d\x10\xd1\xa7\xf5\x77\xc2\xc7\xe9\x5f\xd7\x00\xbf\xf9\x82\xc9\x33\x5a\x65\xe1\xd0\xb3\x01\x73\x17\xc0\xc8\xc5\x69\x77"] = CTInfo($description="Google 'Argon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2a\x3a\x67\x8b\xfe\xba\x0c\x86\x2b\x4a\x51\x8a\xe9\x17\xfe\x7b\xa1\x76\x73\xfd\xbc\x65\x4b\xc3\x27\xbf\x4d\xf3\x5f\xa0\xca\x29\x80\x11\x20\x32\x78\xd6\x7e\xf9\x34\x60\x8c\x75\xa0\xf5\x35\x50\x9c\xa1\xd3\x49\x4d\x13\xd5\x3b\x6a\x0e\xea\x45\x9d\x24\x13\x22"),
|
||||
["\x76\xff\x88\x3f\x0a\xb6\xfb\x95\x51\xc2\x61\xcc\xf5\x87\xba\x34\xb4\xa4\xcd\xbb\x29\xdc\x68\x42\x0a\x9f\xe6\x67\x4c\x5a\x3a\x74"] = CTInfo($description="Google 'Xenon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xb9\x60\xe0\x34\x1e\x35\xe4\x65\x00\x93\x4f\x90\x09\xbd\x5a\xec\x44\xdd\x8c\x0f\xce\xed\x11\x3e\x2a\x59\x46\x9a\x31\xb6\xc7\x99\xf7\xdc\xef\x3d\xcd\x8f\x86\xc2\x35\xa5\x3e\xdc\x29\xba\xbb\xf2\x54\xe2\xa8\x0c\x83\x08\x51\x06\xde\x21\x6d\x36\x50\x8e\x38\x4d"),
|
||||
["\xcf\x11\x56\xee\xd5\x2e\x7c\xaf\xf3\x87\x5b\xd9\x69\x2e\x9b\xe9\x1a\x71\x67\x4a\xb0\x17\xec\xac\x01\xd2\x5b\x77\xce\xcc\x3b\x08"] = CTInfo($description="Google 'Xenon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x82\xe2\xce\x90\x40\x3f\x81\x0e\xdf\xea\xe1\x20\x2b\x5e\x2e\x30\x54\x46\x81\xb9\x58\xed\xaf\xbd\xff\x36\xa7\x9e\x0b\x5f\x6a\x6b\x91\xa5\xc1\x98\xe1\xf2\xcd\xeb\x17\x20\x70\xca\x2a\x12\xe6\x54\x78\x50\xdc\xff\x6d\xfd\x1c\xa7\xb6\x3a\x1f\xf9\x26\xa9\x1b\xbd"),
|
||||
["\xdd\xdc\xca\x34\x95\xd7\xe1\x16\x05\xe7\x95\x32\xfa\xc7\x9f\xf8\x3d\x1c\x50\xdf\xdb\x00\x3a\x14\x12\x76\x0a\x2c\xac\xbb\xc8\x2a"] = CTInfo($description="Google 'Xenon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x6b\xe0\xaf\xed\x06\x7c\x3d\xef\xd9\x0e\xe4\x58\x4b\x04\xd8\x2a\x47\x99\x90\x89\x7a\xb9\x36\xa5\x75\xc8\x04\xb8\xcb\xe2\xaa\x2b\xb5\x68\x9d\x88\x29\xa2\xa5\xcf\xce\x2b\x9a\x15\x9b\xa0\x3e\x9d\x94\x1c\xb2\xb7\x4a\xf2\x51\xec\x40\xed\x62\x47\xa4\x03\x49\x86"),
|
||||
["\x96\x97\x64\xbf\x55\x58\x97\xad\xf7\x43\x87\x68\x37\x08\x42\x77\xe9\xf0\x3a\xd5\xf6\xa4\xf3\x36\x6e\x46\xa4\x3f\x0f\xca\xa9\xc6"] = CTInfo($description="Google 'Xenon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x3a\x1f\xc8\xbb\xce\xd5\x90\x47\x34\xca\xca\x01\x04\x27\x21\x1c\xe2\x29\x3d\x92\xbb\x91\x45\xc7\x5a\x3e\xa5\xd4\xf2\x12\xe6\xe8\xe6\x43\xba\xf3\x7b\xc2\x38\xaf\xfc\x23\x8a\x05\x56\xeb\x03\x0a\x30\xcc\x63\x6c\xd9\x3c\xbe\xf5\x7b\x94\xba\x94\xd3\xbf\x88\x4c"),
|
||||
["\xd8\x09\x55\x3b\x94\x4f\x7a\xff\xc8\x16\x19\x6f\x94\x4f\x85\xab\xb0\xf8\xfc\x5e\x87\x55\x26\x0f\x15\xd1\x2e\x72\xbb\x45\x4b\x14"] = CTInfo($description="Google 'Xenon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe5\x77\x78\x95\x71\x28\xb3\x95\xc9\xa5\xcc\x7a\x4c\xe8\x32\x03\x96\x7b\xfc\x2e\x1d\xb9\xa4\xdb\x43\xa0\xbd\x69\x72\xf9\x45\xba\x9a\xc3\xe9\x96\xd5\x70\xe7\x0d\x7e\xc9\x95\x15\x27\x8a\x72\x30\x65\x86\x43\x53\xdc\x11\x44\x18\x49\x98\x25\x68\xa7\x3c\x05\xbf"),
|
||||
["\xda\xb6\xbf\x6b\x3f\xb5\xb6\x22\x9f\x9b\xc2\xbb\x5c\x6b\xe8\x70\x91\x71\x6c\xbb\x51\x84\x85\x34\xbd\xa4\x3d\x30\x48\xd7\xfb\xab"] = CTInfo($description="Cloudflare 'Nimbus2024' Log", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x77\xb1\x9b\x7b\x8f\xe6\x8b\x35\xfe\x3a\x92\x29\x2d\xac\x8a\x8d\x51\x8a\x25\xfc\x93\xb6\xd7\xa0\x8b\x29\x37\x71\x1d\x33\xca\xcc\x33\xea\x28\xb9\x1f\xe2\xac\xc3\xa9\x5d\xdd\x97\xbe\xf6\x9e\x94\x25\xdd\x36\x81\xd1\xeb\x5d\x29\xc3\x2b\x44\xf1\x5b\xca\x15\x48"),
|
||||
["\xcc\xfb\x0f\x6a\x85\x71\x09\x65\xfe\x95\x9b\x53\xce\xe9\xb2\x7c\x22\xe9\x85\x5c\x0d\x97\x8d\xb6\xa9\x7e\x54\xc0\xfe\x4c\x0d\xb0"] = CTInfo($description="Cloudflare 'Nimbus2025'", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2025/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1a\x80\x1a\x15\x19\x19\x23\x79\xb4\xfa\xa0\x79\x8e\x8d\xd5\xc1\xdc\xc2\xb5\x96\x92\x7e\x94\xe0\xc3\x7e\x14\x7c\x0a\x0d\x2d\x46\xa8\x9d\x1b\xb1\x41\x65\x0c\x5f\x98\xc4\x5a\x17\x79\x81\x5b\x4a\x14\x41\xec\xaf\xa9\x5d\x0e\xab\x12\x19\x71\xcd\x43\xef\xbb\x97"),
|
||||
["\x48\xb0\xe3\x6b\xda\xa6\x47\x34\x0f\xe5\x6a\x02\xfa\x9d\x30\xeb\x1c\x52\x01\xcb\x56\xdd\x2c\x81\xd9\xbb\xbf\xab\x39\xd8\x84\x73"] = CTInfo($description="DigiCert Yeti2024 Log", $operator="DigiCert", $url="https://yeti2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x57\xb8\xc1\x6f\x30\xa4\x7f\x2e\xe4\xf0\xd0\xd9\x60\x62\x13\x95\xe3\x7a\xe3\x4e\x53\xc3\xb3\xb8\x73\x85\xc1\x18\x0d\x23\x0e\x58\x84\xd2\x78\xef\x9b\xb3\x1e\x2c\x1a\xde\xc1\x8f\x81\x1b\x19\x44\x58\xb7\x00\x77\x60\x20\x1a\x72\xd8\x82\xde\xae\x9e\xb1\xc6\x4b"),
|
||||
["\x7d\x59\x1e\x12\xe1\x78\x2a\x7b\x1c\x61\x67\x7c\x5e\xfd\xf8\xd0\x87\x5c\x14\xa0\x4e\x95\x9e\xb9\x03\x2f\xd9\x0e\x8c\x2e\x79\xb8"] = CTInfo($description="DigiCert Yeti2025 Log", $operator="DigiCert", $url="https://yeti2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdf\x95\x00\x5e\x10\xc1\x01\xf7\x37\xe3\x10\x74\xd1\xff\xb2\xca\x90\xed\x32\x99\x5f\x0c\x39\xfe\xa1\xd1\x13\x11\xac\xd1\xb3\x73\x93\x20\xc2\x13\x3c\x4c\xb5\x7a\x52\x86\x86\x3d\xe3\x95\x24\x7c\xd8\x91\x98\x48\x3b\xf0\xf0\xdf\x21\xf1\xb0\x81\x5a\x59\x25\x43"),
|
||||
["\x73\xd9\x9e\x89\x1b\x4c\x96\x78\xa0\x20\x7d\x47\x9d\xe6\xb2\xc6\x1c\xd0\x51\x5e\x71\x19\x2a\x8c\x6b\x80\x10\x7a\xc1\x77\x72\xb5"] = CTInfo($description="DigiCert Nessie2024 Log", $operator="DigiCert", $url="https://nessie2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2d\xfc\xa2\x7b\x36\xbf\x56\x91\xe9\xfe\x3f\xe8\x3d\xfc\xc3\xa7\xe0\x61\x52\xea\x2c\xe9\x05\xa3\x9f\x27\x17\x81\x05\x70\x6b\x81\x61\x44\x8a\xf8\x3b\x10\x80\x42\xed\x03\x2f\x00\x50\x21\xfc\x41\x54\x84\xa3\x54\xd5\x2e\xb2\x7a\x16\x4b\x2a\x1f\x2b\x66\x04\x2b"),
|
||||
["\xe6\xd2\x31\x63\x40\x77\x8c\xc1\x10\x41\x06\xd7\x71\xb9\xce\xc1\xd2\x40\xf6\x96\x84\x86\xfb\xba\x87\x32\x1d\xfd\x1e\x37\x8e\x50"] = CTInfo($description="DigiCert Nessie2025 Log", $operator="DigiCert", $url="https://nessie2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\xf0\xf0\xa7\x8b\x81\x2e\x09\x39\x3b\x9f\x42\xda\x38\x44\x5f\xb4\xcc\xed\x36\xbb\xd8\x43\x7f\x16\x49\x57\x87\x04\x7f\xa5\x01\x34\xf7\xe8\x68\x3f\xb7\x78\x1f\x60\x66\x2d\x67\x9a\x75\x80\xb7\x53\xa7\x85\xd5\xbc\xab\x47\x06\x55\xdb\xb5\xdf\x88\xa1\x6f\x38"),
|
||||
["\xb6\x9d\xdc\xbc\x3c\x1a\xbd\xef\x6f\x9f\xd6\x0c\x88\xb1\x06\x7b\x77\xf0\x82\x68\x8b\x2d\x78\x65\xd0\x4b\x39\xab\xe9\x27\xa5\x75"] = CTInfo($description="DigiCert 'Wyvern2024h1' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2024h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x68\xa6\x79\x14\xd1\x58\xe7\xab\xaa\x29\x69\x7f\x60\xed\x68\xe8\x10\xf6\x07\x84\xc0\xfb\x59\x04\x5a\x09\xc9\x1d\xe1\x4b\xfb\xcd\xdc\x03\xf3\xa8\x2a\x46\xb9\x84\x4d\x69\x30\xec\x23\x35\xc1\x8e\xfc\x9f\xb4\x20\x24\xd7\x15\xac\x87\xf7\x1e\xc1\x0b\x3c\x76\x1a"),
|
||||
["\x0c\x2a\xef\x2c\x4a\x5b\x98\x83\xd4\xdd\xa3\x82\xfe\x50\xfb\x51\x88\xb3\xe9\x73\x33\xa1\xec\x53\xa0\x9d\xc9\xa7\x9d\x0d\x08\x20"] = CTInfo($description="DigiCert 'Wyvern2024h2' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2024h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa8\x73\x12\x9c\x54\xd0\x7a\x7d\xc5\xb5\x17\x2b\x71\x52\x89\x04\x90\xbb\x42\xf1\x9d\xf8\x1c\xde\x4c\xcf\x82\x3c\xbd\x37\x1b\x74\x4c\x3c\xc7\xa3\x13\x87\x01\x51\x13\x14\xda\xa2\x12\x98\x84\xce\x1c\xbe\xcf\x4f\x7a\xef\x15\xfa\xd0\xee\xed\xed\x07\xad\x71\x6d"),
|
||||
["\x73\x20\x22\x0f\x08\x16\x8a\xf9\xf3\xc4\xa6\x8b\x0a\xb2\x6a\x9a\x4a\x00\xee\xf5\x77\x85\x8a\x08\x4d\x05\x00\xd4\xa5\x42\x44\x59"] = CTInfo($description="DigiCert 'Wyvern2025h1' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\xcb\x80\x61\x86\x1b\x1f\xb5\xab\x2b\x20\x76\x59\x83\x66\x0e\xce\xae\xb8\x6f\x3b\x88\x02\xeb\x43\xf4\x87\x90\xcb\x8b\xda\xac\x0e\x19\x50\xe0\xf9\x24\x0e\xab\x26\x93\x8c\x3f\x9e\x0d\x96\x58\x44\x9d\x3b\x8a\x80\xc5\xc8\xbe\xe1\x89\x46\x6b\x48\x4c\xd6\x09"),
|
||||
["\xed\x3c\x4b\xd6\xe8\x06\xc2\xa4\xa2\x00\x57\xdb\xcb\x24\xe2\x38\x01\xdf\x51\x2f\xed\xc4\x86\xc5\x70\x0f\x20\xdd\xb7\x3e\x3f\xe0"] = CTInfo($description="DigiCert 'Wyvern2025h2' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe0\xdb\x41\xef\xe4\x04\xbd\xcb\x6b\x2e\x4c\xcc\xf1\x6c\xde\x41\x58\x7f\xfe\x94\xf6\x7a\xf6\x60\xed\x8b\x76\x72\xa3\xa2\x1c\x31\x13\x32\x35\xa1\xf2\x08\xd2\x68\xc5\x34\xa7\x56\x08\x1c\x63\xde\x95\xe2\x81\x69\x97\x8d\x1e\xa8\xb7\x66\x51\x25\x75\x4d\x78\x2e"),
|
||||
["\xdb\x07\x6c\xde\x6a\x8b\x78\xec\x58\xd6\x05\x64\x96\xeb\x6a\x26\xa8\xc5\x9e\x72\x12\x93\xe8\xac\x03\x27\xdd\xde\x89\xdb\x5a\x2a"] = CTInfo($description="DigiCert 'Sphinx2024h1' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2024h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xc6\xe4\x29\x69\x98\xfe\x28\x92\x57\x12\x4d\x9e\xed\x0e\xe7\x32\xa2\xe6\x9c\x27\x78\xa4\x29\x7c\x99\xd5\xdb\xfa\x22\xc1\xdd\x5e\xa7\xf4\xd8\xea\xc8\xd7\x44\x8d\xe0\xf1\x8c\x0a\x01\x1d\xd8\x22\xa8\xd3\xeb\xc9\x22\x8e\x36\xfb\x4a\xb1\x70\x9c\x5d\xc1\xe8\x33"),
|
||||
["\xdc\xc9\x5e\x6f\xa2\x99\xb9\xb0\xfd\xbd\x6c\xa6\xa3\x6e\x1d\x72\xc4\x21\x2f\xdd\x1e\x0f\x47\x55\x3a\x36\xd6\xcf\x1a\xd1\x1d\x8d"] = CTInfo($description="DigiCert 'Sphinx2024h2' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2024h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdb\x09\x41\x84\xe7\xd1\xf1\x5b\x25\x09\x7b\xe8\xc6\x98\x51\x5e\x29\x85\xfd\x81\xde\x89\xd7\xd0\x86\xa4\xb0\xe5\x15\xec\x5d\x7b\x17\x55\x5f\xc9\x79\x8d\xe4\x22\x36\xe7\xe9\xbf\x38\x3f\xd1\xe9\xd4\x09\x84\x81\xbe\xb6\xc1\xed\x1b\x17\xea\x26\x97\xba\xe9\x9a"),
|
||||
["\xde\x85\x81\xd7\x50\x24\x7c\x6b\xcd\xcb\xaf\x56\x37\xc5\xe7\x81\xc6\x4c\xe4\x6e\xd6\x17\x63\x9f\x8f\x34\xa7\x26\xc9\xe2\xbd\x37"] = CTInfo($description="DigiCert 'Sphinx2025h1' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe3\x2f\x1f\x4d\x89\x05\x75\x29\x78\xbb\x22\x3d\x07\x62\x51\x14\x70\x94\xe7\x3c\xea\xf5\xee\xae\xa6\x48\x9a\x86\x52\x4e\x9e\x5c\xe3\x95\x97\x28\xbb\x52\x4b\x2a\xfd\xc8\xc9\x89\x4e\x45\x31\x17\xd3\x8d\xf2\xe7\xce\x18\x11\x58\x98\x2c\x60\x6f\x58\x20\x36\x6e"),
|
||||
["\xa4\x42\xc5\x06\x49\x60\x61\x54\x8f\x0f\xd4\xea\x9c\xfb\x7a\x2d\x26\x45\x4d\x87\xa9\x7f\x2f\xdf\x45\x59\xf6\x27\x4f\x3a\x84\x54"] = CTInfo($description="DigiCert 'Sphinx2025h2' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x41\x8c\x50\x13\x54\xb1\x19\x05\xb7\x7f\x4a\x20\x6e\xa3\x75\x63\xca\x34\xf4\xcc\x74\xea\x32\x3b\xb6\x8b\x03\x14\xa8\x52\x7f\x32\x87\x5e\x59\x9e\x0f\xab\x18\x9e\x29\x6c\xb5\x72\x77\x1a\x27\x54\x85\x5d\xc1\x7b\x24\xa8\x34\xe3\xcd\x88\xce\xd4\x50\x1b\xbe\x69"),
|
||||
["\x55\x81\xd4\xc2\x16\x90\x36\x01\x4a\xea\x0b\x9b\x57\x3c\x53\xf0\xc0\xe4\x38\x78\x70\x25\x08\x17\x2f\xa3\xaa\x1d\x07\x13\xd3\x0c"] = CTInfo($description="Sectigo 'Sabre' CT log", $operator="Sectigo", $url="https://sabre.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\x6f\xd2\x89\x0f\x3f\xc5\xf8\x87\x1e\xab\x65\xb3\xd9\xbb\x17\x23\x8c\x06\x0e\x09\x55\x96\x3d\x0a\x08\xa2\xc5\x71\xb3\xd1\xa9\x2f\x28\x3e\x83\x10\xbf\x12\xd0\x44\x66\x15\xef\x54\xe1\x98\x80\xd0\xce\x24\x6d\x3e\x67\x9a\xe9\x37\x23\xce\x52\x93\x86\xda\x80"),
|
||||
["\xa2\xe2\xbf\xd6\x1e\xde\x2f\x2f\x07\xa0\xd6\x4e\x6d\x37\xa7\xdc\x65\x43\xb0\xc6\xb5\x2e\xa2\xda\xb7\x8a\xf8\x9a\x6d\xf5\x17\xd8"] = CTInfo($description="Sectigo 'Sabre2024h1'", $operator="Sectigo", $url="https://sabre2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2c\x01\xf6\xce\x31\xbc\xaa\x14\x61\x51\xfe\x6b\x7a\x87\xae\xa6\xd3\x9b\xc7\x87\x2d\x0a\x5a\xc8\x4f\xb5\x54\xdc\xc9\x93\xa0\x00\xee\xca\x1c\xb9\xa7\xb6\x7b\x47\x3b\xe5\x4f\xaa\x6c\x16\x1c\x70\x2e\xc8\xec\x53\x5a\x4c\x21\x4c\x7e\x27\x0b\x13\x14\x5e\xfc\x85"),
|
||||
["\x19\x98\x10\x71\x09\xf0\xd6\x52\x2e\x30\x80\xd2\x9e\x3f\x64\xbb\x83\x6e\x28\xcc\xf9\x0f\x52\x8e\xee\xdf\xce\x4a\x3f\x16\xb4\xca"] = CTInfo($description="Sectigo 'Sabre2024h2'", $operator="Sectigo", $url="https://sabre2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7a\x10\x4c\x8a\xe7\x22\x7b\x6d\x2a\xba\x8e\xfa\x6b\x4a\x81\xd5\x85\xae\x03\xef\xff\x4b\xfc\x4d\x53\x3d\xb7\x8c\xbb\x75\x09\xc9\xea\x16\x7e\xc1\x77\x16\xd2\xc2\x45\x74\x6d\x8d\xc4\xe1\x88\x37\xdf\xd4\xf3\x60\x65\xfc\xa0\x75\xf0\x20\x66\x8e\x4a\xcc\x19\xda"),
|
||||
["\xe0\x92\xb3\xfc\x0c\x1d\xc8\xe7\x68\x36\x1f\xde\x61\xb9\x96\x4d\x0a\x52\x78\x19\x8a\x72\xd6\x72\xc4\xb0\x4d\xa5\x6d\x6f\x54\x04"] = CTInfo($description="Sectigo 'Sabre2025h1'", $operator="Sectigo", $url="https://sabre2025h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7e\x2f\x39\xf1\xe8\x23\x8e\xb3\x32\x04\xaf\x4d\x57\xf6\xdb\xc5\x74\xa4\x7a\x6d\x3b\x07\x51\x0c\x5a\xfb\x80\x30\x05\xc6\x5a\x0c\xc4\x76\xd6\x06\xa8\x57\x4d\xfb\xdf\xe4\x82\x90\xc2\x41\xae\x70\xb3\x31\xa2\xe3\xfa\x3d\x5f\x2c\x5d\x04\xcd\xb4\x9d\x55\xab\x41"),
|
||||
["\x1a\x04\xff\x49\xd0\x54\x1d\x40\xaf\xf6\xa0\xc3\xbf\xf1\xd8\xc4\x67\x2f\x4e\xec\xee\x23\x40\x68\x98\x6b\x17\x40\x2e\xdc\x89\x7d"] = CTInfo($description="Sectigo 'Sabre2025h2'", $operator="Sectigo", $url="https://sabre2025h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x13\x11\x2d\x7b\xf3\x93\x81\xe4\xb9\x7c\xd9\x64\x3b\xe7\xb5\x83\x99\x66\x79\x59\x47\x6a\x42\x5e\xd6\xbd\x63\x2e\xb7\x91\x4b\xae\xbc\x56\xc4\xc5\x6e\x09\xa0\xd7\x64\x1a\xc8\xc1\xaf\x89\x8b\xf5\x58\xd8\xba\xeb\x7b\x83\x52\xe9\xf4\xe0\xa5\xcd\xcd\x92\xcc"),
|
||||
["\x6f\x53\x76\xac\x31\xf0\x31\x19\xd8\x99\x00\xa4\x51\x15\xff\x77\x15\x1c\x11\xd9\x02\xc1\x00\x29\x06\x8d\xb2\x08\x9a\x37\xd9\x13"] = CTInfo($description="Sectigo 'Mammoth' CT log", $operator="Sectigo", $url="https://mammoth.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xef\xe4\x7d\x74\x2e\x15\x15\xb6\xe9\xbb\x23\x8b\xfb\x2c\xb5\xe1\xc7\x80\x98\x47\xfb\x40\x69\x68\xfc\x49\xad\x61\x4e\x83\x47\x3c\x1a\xb7\x8d\xdf\xff\x7b\x30\xb4\xba\xff\x2f\xcb\xa0\x14\xe3\xad\xd5\x85\x3f\x44\x59\x8c\x8c\x60\x8b\xd7\xb8\xb1\xbf\xae\x8c\x67"),
|
||||
["\x29\xd0\x3a\x1b\xb6\x74\xaa\x71\x1c\xd3\x03\x5b\x65\x57\xc1\x4f\x8a\xa7\x8b\x4f\xe8\x38\x94\x49\xec\xa4\x53\xf9\x44\xbd\x24\x68"] = CTInfo($description="Sectigo 'Mammoth2024h1'", $operator="Sectigo", $url="https://mammoth2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa4\x59\x90\xf3\x71\x24\x24\xf7\xc3\x55\x27\x56\x9c\xa3\x59\x1e\xf7\xb7\x9f\xce\xab\x4e\x19\x66\x4d\xd0\x8a\xfa\x9d\x62\xa4\x24\xf0\x3b\x20\xe4\x1d\x14\x67\xc8\xfc\xe4\x37\xf2\x4b\x38\x54\x5a\xcf\x9f\x6b\x07\x90\xd0\x0e\x7e\x3d\x4c\x87\xb2\xe8\x3f\x07\xcc"),
|
||||
["\x50\x85\x01\x58\xdc\xb6\x05\x95\xc0\x0e\x92\xa8\x11\x02\xec\xcd\xfe\x3f\x6b\x78\x58\x42\x9f\x57\x98\x35\x38\xc9\xda\x52\x50\x63"] = CTInfo($description="Sectigo 'Mammoth2024h1b'", $operator="Sectigo", $url="https://mammoth2024h1b.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa3\xd5\x07\x28\x7a\x04\x34\xae\xca\xbe\x80\x79\x4f\x3e\xf6\x41\xf4\x24\x04\xe1\xd6\x36\x5a\x1a\x09\xf2\xd1\xba\x84\x17\xae\x1e\xa1\x7c\x00\x1d\x54\x73\x90\x75\x21\xa8\xd1\xda\x5e\x10\xe1\x8c\xec\xb2\x8a\x8c\xc8\xe7\xdd\xcd\xe2\x07\xf0\x4e\x16\x02\x57\x37"),
|
||||
["\xdf\xe1\x56\xeb\xaa\x05\xaf\xb5\x9c\x0f\x86\x71\x8d\xa8\xc0\x32\x4e\xae\x56\xd9\x6e\xa7\xf5\xa5\x6a\x01\xd1\xc1\x3b\xbe\x52\x5c"] = CTInfo($description="Sectigo 'Mammoth2024h2'", $operator="Sectigo", $url="https://mammoth2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x66\x22\x24\x6e\xbe\x52\x62\x0a\xa0\xaf\xc3\x25\x1a\x36\x2e\xa7\x60\x89\xa2\x65\xbf\xa4\x5f\xbd\x85\x6a\x94\x05\x81\x35\x90\x54\x31\x95\xe7\x11\x9e\xa3\x2e\x0f\x85\xef\xa7\x88\x57\x8b\x63\x1a\x81\xc1\x41\x9d\x7d\xec\x01\x3a\xdb\xb9\xc1\x27\xf4\x65\x1e"),
|
||||
|
@ -39,4 +50,6 @@ redef ct_logs += {
|
|||
["\x87\x4f\xb5\x0d\xc0\x29\xd9\x93\x1d\xe5\x73\xe9\xf2\x89\x9e\x8e\x45\x33\xb3\x92\xd3\x8b\x0a\x46\x25\x74\xbf\x0f\xee\xb2\xfc\x1e"] = CTInfo($description="Trust Asia Log2024-2", $operator="TrustAsia", $url="https://ct2024.trustasia.com/log2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x64\xe2\x79\x81\x3f\x61\xd7\xec\xc6\xf8\x65\x28\x1d\xa0\xb4\x66\x33\xc3\x25\xd5\x0a\x95\x78\x9c\x8f\xfe\xa4\x2a\xd8\x8f\x7e\x72\xe0\xfe\xa8\x7f\xf8\xb1\x2d\x85\xc0\x8e\x12\x74\x0d\x2f\x8c\xab\xd7\x7f\x7a\x1e\xd9\x84\x33\x39\xe8\xfd\x89\x5f\x96\x48\x08"),
|
||||
["\x28\xe2\x81\x38\xfd\x83\x21\x45\xe9\xa9\xd6\xaa\x75\x37\x6d\x83\x77\xa8\x85\x12\xb3\xc0\x7f\x72\x41\x48\x21\xdc\xbd\xe9\x8c\x66"] = CTInfo($description="TrustAsia Log2025a", $operator="TrustAsia", $url="https://ct2025-a.trustasia.com/log2025a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x70\xe5\xb1\xa4\x09\x79\x2b\x9d\xf8\xa3\xa0\xdf\x18\xef\x95\x5d\x03\x6c\x7b\xa1\x91\xa9\xb8\x80\x7d\xec\x5c\x02\x08\xe2\x6e\x2f\x7c\x32\x70\xbd\x96\x84\x5f\xa6\x62\xe9\x65\xb5\x7c\x90\x58\xba\x22\xd5\xf9\xf5\x69\x54\xb7\xa8\x94\x4e\x32\x09\xae\x26\x11\x4d"),
|
||||
["\x28\x2c\x8b\xdd\x81\x0f\xf9\x09\x12\x0a\xce\x16\xd6\xe0\xec\x20\x1b\xea\x82\xa3\xa4\xaf\x19\xd9\xef\xfb\x59\xe8\x3f\xdc\x42\x68"] = CTInfo($description="TrustAsia Log2025b", $operator="TrustAsia", $url="https://ct2025-b.trustasia.com/log2025b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaa\xa0\x8b\xdb\x67\x14\x5d\x97\x89\x1d\x08\x8d\x06\xd7\xc1\x94\x8e\xb0\xfa\x4c\x46\xd5\x53\x08\x78\x2b\x04\x53\x6c\xf3\xde\xb1\xd1\x53\x40\xda\x90\x57\xe6\x1a\x9e\x3c\xc7\x03\xb8\xbd\x2f\xa9\xcf\xe8\x7b\x5e\xe1\x4b\x60\xe5\x38\x43\x60\x97\xc1\x5b\x2f\x65"),
|
||||
["\x74\xdb\x9d\x58\xf7\xd4\x7e\x9d\xfd\x78\x7a\x16\x2a\x99\x1c\x18\xcf\x69\x8d\xa7\xc7\x29\x91\x8c\x9a\x18\xb0\x45\x0d\xba\x44\xbc"] = CTInfo($description="TrustAsia 'log2026a'", $operator="TrustAsia", $url="https://ct2026-a.trustasia.com/log2026a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x4e\x7a\xc9\xa6\x07\xf9\xff\x74\xec\x98\xcb\x49\xe1\x00\x24\xb3\x59\x2e\x83\xfd\xc0\x70\x35\x33\x4c\x63\xca\x74\x83\xc0\x3c\x5b\x53\x40\x7c\x31\x1f\x35\xa4\x5f\x0f\xe4\xee\x4f\x89\x17\xe8\x5b\x2e\xc5\xac\x00\x05\xc9\x76\x37\x45\x97\x03\x15\xff\x60\x59"),
|
||||
["\x25\xb7\xef\xde\xa1\x13\x01\x93\xed\x93\x07\x97\x70\xaa\x32\x2a\x26\x62\x0d\xe3\x5a\xc8\xaa\x7c\x75\x19\x7d\xe0\xb1\xa9\xe0\x65"] = CTInfo($description="TrustAsia 'log2026b'", $operator="TrustAsia", $url="https://ct2026-b.trustasia.com/log2026b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x0f\x12\x8c\xa9\xe6\xe3\xec\x62\xee\xdf\x58\xc8\x50\xe6\x26\x70\x76\x10\xb7\x04\x39\xb3\xa7\xf8\x4c\x73\x3b\xc3\x38\x5a\x12\x00\x4c\xe0\xda\x0e\x16\x8a\x45\x32\x0a\x31\xaa\x22\xc7\x9d\x7d\x05\x53\xc7\x9e\x94\xea\x9b\x57\x46\xbf\x4f\xa4\x7e\xfb\xdf\xfa\x85"),
|
||||
};
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -20,6 +20,12 @@ export {
|
|||
|
||||
hook Notice::notice(n: Notice::Info)
|
||||
{
|
||||
if ( CommunityID::Notice::enabled && n?$conn )
|
||||
n$community_id = community_id_v1(n$conn$id, CommunityID::seed, CommunityID::do_base64);
|
||||
if ( CommunityID::Notice::enabled && n?$conn && n$conn?$conn )
|
||||
{
|
||||
local info = n$conn$conn;
|
||||
# This is set during new_connection(), so it should
|
||||
# always be there, but better safe than sorry.
|
||||
if ( info?$community_id )
|
||||
n$community_id = info$community_id;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,19 +1,2 @@
|
|||
##! In a cluster configuration, open the port number for metrics
|
||||
##! from the cluster node configuration for exporting data to
|
||||
##! Prometheus.
|
||||
##!
|
||||
##! The manager node will also provide a ``/services.json`` endpoint
|
||||
##! for the HTTP Service Discovery system in Prometheus to use for
|
||||
##! configuration. This endpoint will include information for all of
|
||||
##! the other nodes in the cluster.
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
|
||||
redef Telemetry::metrics_endpoint_name = Cluster::node;
|
||||
|
||||
@if ( Cluster::local_node_metrics_port() != 0/unknown )
|
||||
redef Telemetry::metrics_port = Cluster::local_node_metrics_port();
|
||||
@endif
|
||||
|
||||
@endif
|
||||
@deprecated "Remove in v7.1: Cluster nodes now implicitly listen on metrics port if set in cluster-layout."
|
||||
@load base/frameworks/telemetry
|
||||
|
|
|
@ -17,7 +17,10 @@ export {
|
|||
};
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection)
|
||||
module Conn;
|
||||
|
||||
event new_connection(c: connection) &priority=5
|
||||
{
|
||||
Conn::set_conn(c, F); # likely first to access :-/
|
||||
c$conn$community_id = community_id_v1(c$id, CommunityID::seed, CommunityID::do_base64);
|
||||
}
|
||||
|
|
|
@ -46,11 +46,11 @@ export {
|
|||
|
||||
## Regular expression is used to match URI based SQL injections.
|
||||
const match_sql_injection_uri =
|
||||
/[\?&][^[:blank:]\x00-\x1f\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f]|\/\*.*?\*\/)+/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|]+?=[\-0-9%]+([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x1f]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f]+?=[\-0-9%]*([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|]+?=([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/
|
||||
| /[\?&][^[:blank:]\x00-\x1f]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/
|
||||
/[\?&][^[:blank:]\x00-\x1f\|\+]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)+/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|\+]+?=[\-0-9%]+([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\+]+?=[\-0-9%]*([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|\+]+?=([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\+]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/
|
||||
| /\/\*![[:digit:]]{5}.*?\*\// &redef;
|
||||
|
||||
## A hook that can be used to prevent specific requests from being counted
|
||||
|
|
|
@ -94,10 +94,6 @@ redef digest_salt = "Please change this value.";
|
|||
# telemetry_histogram.log.
|
||||
@load frameworks/telemetry/log
|
||||
|
||||
# Enable Prometheus metrics scraping in the cluster: each Zeek node will listen
|
||||
# on the metrics port defined in its Cluster::nodes entry.
|
||||
# @load frameworks/telemetry/prometheus
|
||||
|
||||
# Uncomment the following line to enable detection of the heartbleed attack. Enabling
|
||||
# this might impact performance a bit.
|
||||
# @load policy/protocols/ssl/heartbleed
|
||||
|
|
|
@ -201,6 +201,9 @@ public:
|
|||
|
||||
bool PermitWeird(const char* name, uint64_t threshold, uint64_t rate, double duration);
|
||||
|
||||
// Returns true once Done() is called.
|
||||
bool IsFinished() { return finished; }
|
||||
|
||||
private:
|
||||
friend class session::detail::Timer;
|
||||
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
#include "zeek/DFA.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
||||
#include "zeek/Desc.h"
|
||||
#include "zeek/EquivClass.h"
|
||||
#include "zeek/Hash.h"
|
||||
|
@ -265,9 +263,9 @@ DFA_State_Cache::~DFA_State_Cache() {
|
|||
DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, DigestStr* digest) {
|
||||
// We assume that state ID's don't exceed 10 digits, plus
|
||||
// we allow one more character for the delimiter.
|
||||
auto id_tag_buf = std::make_unique<u_char[]>(nfas.length() * 11 + 1);
|
||||
auto id_tag_buf = std::make_unique<char[]>(nfas.length() * 11 + 1);
|
||||
auto id_tag = id_tag_buf.get();
|
||||
u_char* p = id_tag;
|
||||
char* p = id_tag;
|
||||
|
||||
for ( int i = 0; i < nfas.length(); ++i ) {
|
||||
NFA_State* n = nfas[i];
|
||||
|
@ -287,7 +285,7 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, DigestStr* digest
|
|||
// HashKey because the data is copied into the key.
|
||||
hash128_t hash;
|
||||
KeyedHash::Hash128(id_tag, p - id_tag, &hash);
|
||||
*digest = DigestStr(reinterpret_cast<const unsigned char*>(hash), 16);
|
||||
*digest = DigestStr(reinterpret_cast<const char*>(hash), 16);
|
||||
|
||||
auto entry = states.find(*digest);
|
||||
if ( entry == states.end() ) {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <sys/types.h> // for u_char
|
||||
#include <sys/types.h>
|
||||
#include <cassert>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
@ -18,7 +18,7 @@ class DFA_Machine;
|
|||
|
||||
// Transitions to the uncomputed state indicate that we haven't yet
|
||||
// computed the state to go to.
|
||||
#define DFA_UNCOMPUTED_STATE -2
|
||||
#define DFA_UNCOMPUTED_STATE (-2)
|
||||
#define DFA_UNCOMPUTED_STATE_PTR ((DFA_State*)DFA_UNCOMPUTED_STATE)
|
||||
|
||||
class DFA_State : public Obj {
|
||||
|
@ -67,7 +67,7 @@ protected:
|
|||
DFA_State* mark;
|
||||
};
|
||||
|
||||
using DigestStr = std::basic_string<u_char>;
|
||||
using DigestStr = std::string;
|
||||
|
||||
struct DFA_State_Cache_Stats {
|
||||
// Sum of all NFA states
|
||||
|
|
|
@ -1642,6 +1642,9 @@ class CoerceToAnyExpr : public UnaryExpr {
|
|||
public:
|
||||
CoerceToAnyExpr(ExprPtr op);
|
||||
|
||||
bool IsReduced(Reducer* c) const override;
|
||||
ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override;
|
||||
|
||||
protected:
|
||||
ValPtr Fold(Val* v) const override;
|
||||
|
||||
|
|
|
@ -130,11 +130,14 @@ bool Obj::SetLocationInfo(const detail::Location* start, const detail::Location*
|
|||
// We already have a better location, so don't use this one.
|
||||
return true;
|
||||
|
||||
delete location;
|
||||
|
||||
location =
|
||||
auto new_location =
|
||||
new detail::Location(start->filename, start->first_line, end->last_line, start->first_column, end->last_column);
|
||||
|
||||
// Don't delete this until we've constructed the new location, in case
|
||||
// "start" or "end" are our own location.
|
||||
delete location;
|
||||
location = new_location;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1849,7 +1849,8 @@ void WhenInfo::Build(StmtPtr ws) {
|
|||
auto else_branch = timeout_s ? timeout_s : empty;
|
||||
|
||||
auto do_bodies = make_intrusive<IfStmt>(two_test, s, else_branch);
|
||||
auto dummy_return = make_intrusive<ReturnStmt>(true_const);
|
||||
auto any_true_const = make_intrusive<CoerceToAnyExpr>(true_const);
|
||||
auto dummy_return = make_intrusive<ReturnStmt>(any_true_const);
|
||||
|
||||
auto shebang = make_intrusive<StmtList>(do_test, do_bodies, dummy_return);
|
||||
|
||||
|
|
|
@ -2,14 +2,11 @@
|
|||
|
||||
#include "zeek/Timer.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
||||
#include "zeek/Desc.h"
|
||||
#include "zeek/NetVar.h"
|
||||
#include "zeek/RunState.h"
|
||||
#include "zeek/broker/Manager.h"
|
||||
#include "zeek/iosource/Manager.h"
|
||||
#include "zeek/iosource/PktSrc.h"
|
||||
#include "zeek/util.h"
|
||||
|
||||
namespace zeek::detail {
|
||||
|
|
|
@ -3241,10 +3241,11 @@ bool VectorVal::Assign(unsigned int index, ValPtr element) {
|
|||
|
||||
if ( yield_types ) {
|
||||
const auto& t = element->GetType();
|
||||
(*yield_types)[index] = t;
|
||||
auto& yt_i = (*yield_types)[index];
|
||||
auto& elem = vector_val[index];
|
||||
if ( elem )
|
||||
ZVal::DeleteIfManaged(*elem, t);
|
||||
ZVal::DeleteIfManaged(*elem, yt_i);
|
||||
yt_i = t;
|
||||
elem = ZVal(std::move(element), t);
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -113,19 +113,7 @@ void Analyzer::CtorInit(const zeek::Tag& arg_tag, Connection* arg_conn) {
|
|||
|
||||
Analyzer::~Analyzer() {
|
||||
assert(finished);
|
||||
|
||||
// Make sure any late entries into the analyzer tree are handled (e.g.
|
||||
// from some Done() implementation).
|
||||
LOOP_OVER_GIVEN_CHILDREN(i, new_children) {
|
||||
if ( ! (*i)->finished )
|
||||
(*i)->Done();
|
||||
}
|
||||
|
||||
// Deletion of new_children done in separate loop in case a Done()
|
||||
// implementation tries to inspect analyzer tree w/ assumption that
|
||||
// all analyzers are still valid.
|
||||
LOOP_OVER_GIVEN_CHILDREN(i, new_children)
|
||||
delete *i;
|
||||
assert(new_children.empty());
|
||||
|
||||
LOOP_OVER_CHILDREN(i)
|
||||
delete *i;
|
||||
|
@ -330,6 +318,30 @@ void Analyzer::ForwardEndOfData(bool orig) {
|
|||
bool Analyzer::AddChildAnalyzer(Analyzer* analyzer, bool init) {
|
||||
auto t = analyzer->GetAnalyzerTag();
|
||||
|
||||
// Prevent attaching child analyzers to analyzer subtrees where
|
||||
// either the parent has finished or is being removed. Further,
|
||||
// don't attach analyzers when the connection has finished or is
|
||||
// currently being finished (executing Done()).
|
||||
//
|
||||
// Scenarios in which analyzers have been observed that late in
|
||||
// analyzer / connection lifetime are:
|
||||
//
|
||||
// * A DPD signature match on undelivered TCP data that is flushed
|
||||
// during Connection::Done(). The PIA analyzer activates a new
|
||||
// analyzer adding it to the TCP analyzer.
|
||||
//
|
||||
// * Analyzers flushing buffered state during Done(), resulting
|
||||
// in new analyzers being created.
|
||||
//
|
||||
// Analyzers added during Done() are problematic as calling Done()
|
||||
// within the parent's destructor isn't safe, so we prevent these
|
||||
// situations.
|
||||
if ( Removing() || IsFinished() || Conn()->IsFinished() ) {
|
||||
analyzer->Done();
|
||||
delete analyzer;
|
||||
return false;
|
||||
}
|
||||
|
||||
if ( HasChildAnalyzer(t) || IsPreventedChildAnalyzer(t) ) {
|
||||
analyzer->Done();
|
||||
delete analyzer;
|
||||
|
|
|
@ -15,7 +15,7 @@ public type Request = unit {
|
|||
|
||||
switch {
|
||||
-> : /\/W/ { self.whois = True; }
|
||||
-> void;
|
||||
-> : void;
|
||||
};
|
||||
|
||||
: OptionalWhiteSpace;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
spicy_add_analyzer(
|
||||
NAME LDAP
|
||||
PACKAGE_NAME spicy-ldap
|
||||
SOURCES ldap.spicy ldap.evt asn1.spicy
|
||||
MODULES LDAP ASN1)
|
||||
SOURCES ldap.spicy ldap.evt asn1.spicy ldap_zeek.spicy
|
||||
MODULES LDAP ASN1 LDAP_Zeek)
|
||||
|
|
|
@ -41,3 +41,18 @@ on LDAP::SearchRequest -> event LDAP::search_request($conn,
|
|||
on LDAP::SearchResultEntry -> event LDAP::search_result_entry($conn,
|
||||
message.messageID,
|
||||
self.objectName);
|
||||
|
||||
on LDAP::ExtendedRequest -> event LDAP::extended_request($conn,
|
||||
message.messageID,
|
||||
self.requestName,
|
||||
self.requestValue);
|
||||
|
||||
on LDAP::ExtendedResponse -> event LDAP::extended_response($conn,
|
||||
message.messageID,
|
||||
message.result_.code,
|
||||
self.responseName,
|
||||
self.responseValue);
|
||||
|
||||
# Once switched into MessageMode::TLS, we won't parse messages anymore,
|
||||
# so this is raised just once.
|
||||
on LDAP::Message if (ctx.messageMode == LDAP::MessageMode::TLS) -> event LDAP::starttls($conn);
|
||||
|
|
|
@ -126,125 +126,206 @@ public type Result = unit {
|
|||
# https://tools.ietf.org/html/rfc4511#section-4.1.10
|
||||
};
|
||||
|
||||
# 1.2.840.48018.1.2.2 (MS KRB5 - Microsoft Kerberos 5)
|
||||
const GSSAPI_MECH_MS_KRB5 = "1.2.840.48018.1.2.2";
|
||||
|
||||
# Supported SASL stripping modes.
|
||||
type MessageMode = enum {
|
||||
MS_KRB5 = 1, # Payload starts with a 4 byte length followed by a wrap token that may or may not be sealed.
|
||||
TLS = 2, # Client/server used StartTLS, forward to SSL analyzer.
|
||||
MAYBE_ENCRYPTED = 3, # Use a heuristic to determine encrypted traffic.
|
||||
CLEARTEXT = 4, # Assume cleartext.
|
||||
ENCRYPTED = 5, # Assume encrypted.
|
||||
};
|
||||
|
||||
type Ctx = struct {
|
||||
messageMode: MessageMode; # Message dispatching mode
|
||||
saslMechanism: string; # The SASL mechanism selected by the client.
|
||||
startTlsRequested: bool; # Did the client use the StartTLS extended request?
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type Messages = unit {
|
||||
: MessageWrapper[];
|
||||
%context = Ctx;
|
||||
: MessageDispatch(self.context())[];
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type SASLLayer = unit {
|
||||
# For the time being (before we support parsing the SASL layer) this unit
|
||||
# is used by MessageWrapper below to strip it (SASL) so that the parser
|
||||
# can attempt to resume parsing afterward. It also sets the success flag
|
||||
# if '\x30' is found, otherwise backtracks so that we can deal with encrypted
|
||||
# SASL payloads without raising a parse error.
|
||||
var success: bool = False;
|
||||
: bytes &until=b"\x30" {
|
||||
self.success = True;
|
||||
public type MessageDispatch = unit(ctx: Ctx&) {
|
||||
switch( ctx.messageMode ) {
|
||||
MessageMode::Undef -> : Message(ctx);
|
||||
MessageMode::MS_KRB5 -> : SaslMsKrb5Stripper(ctx);
|
||||
MessageMode::TLS -> : TlsForward; # never returns
|
||||
MessageMode::MAYBE_ENCRYPTED -> : MaybeEncrypted(ctx);
|
||||
MessageMode::CLEARTEXT -> : Message(ctx);
|
||||
MessageMode::ENCRYPTED -> : EncryptedMessage;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type MaybeEncrypted = unit(ctx: Ctx&) {
|
||||
# A plaintext LDAP message always starts with at least 3 bytes and the first
|
||||
# byte is 0x30 for the sequence. A SASL encrypted message starts with a 4 byte
|
||||
# length field. The heuristic here is that if the first byte is a 0x30,
|
||||
# assume it's unencrypted LDAP. This should be pretty good, if it was an
|
||||
# encrypted/SASL wrapped message, it would have a size between 0x30000000 and
|
||||
# 0x30FFFFFF, meaning at least a size of ~768MB, which seems unlikely.
|
||||
var start: iterator<stream>;
|
||||
var saslLen: uint64;
|
||||
var mech: bytes;
|
||||
|
||||
on %init {
|
||||
self.start = self.input();
|
||||
# Don't have starts_with() on string, work around that.
|
||||
# https://github.com/zeek/spicy/issues/1807
|
||||
self.mech = ctx.saslMechanism.encode(spicy::Charset::UTF8);
|
||||
}
|
||||
|
||||
on %error {
|
||||
self.backtrack();
|
||||
first: uint8 {
|
||||
if ( $$ == 0x30 ) {
|
||||
ctx.messageMode = MessageMode::CLEARTEXT;
|
||||
} else {
|
||||
ctx.messageMode = MessageMode::ENCRYPTED;
|
||||
}
|
||||
}
|
||||
|
||||
# As a further heuristic, if encrypted mode was decided and the client
|
||||
# requested GSSAPI or GSS-SPNEGO (or we just didn't see it) peak a bit
|
||||
# into the SASL payload and check if it starts with a 0504 (WRAP_TOKEN).
|
||||
# If so, switch into KRB mode assuming that's what is being used and
|
||||
# have a chance seeing some more plaintext LDAP in non-sealed tokens.
|
||||
rem: uint8[3] if ( ctx.messageMode == MessageMode::ENCRYPTED && (|self.mech| == 0 || self.mech.starts_with(b"GSS")) ) {
|
||||
self.saslLen = (uint64(self.first) << 24) + (uint64($$[0]) << 16) + (uint64($$[1]) << 8) + uint64($$[2]);
|
||||
}
|
||||
|
||||
: uint16 if ( self.saslLen >= 2 ) {
|
||||
if ( $$ == 0x0504 ) {
|
||||
ctx.messageMode = MessageMode::MS_KRB5;
|
||||
}
|
||||
}
|
||||
|
||||
# Rewind the input.
|
||||
: void {
|
||||
# Prevent MessageDispatch from recursing endlessly.
|
||||
assert ctx.messageMode != MessageMode::MAYBE_ENCRYPTED;
|
||||
self.set_input(self.start);
|
||||
}
|
||||
|
||||
# One recursion to parse with the new ctx.messageMode setting.
|
||||
: MessageDispatch(ctx);
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type MessageWrapper = unit {
|
||||
# A wrapper around 'Message'. First, we try to parse a Message unit.
|
||||
# There are two possible outcomes:
|
||||
# (1) Success -> We consumed all bytes and successfully parsed a Message unit
|
||||
# (2) No success -> self.backtrack() is called in the Message unit,
|
||||
# so effectively we didn't consume any bytes yet.
|
||||
# The outcome can be determined by checking the `success` variable of the Message unit
|
||||
|
||||
# This success variable is different, because this keeps track of the status for the MessageWrapper object
|
||||
var success: bool = False;
|
||||
var message: Message;
|
||||
|
||||
# Here, we try to parse the message...
|
||||
: Message &try {
|
||||
|
||||
# ... and only if the Message unit successfully parsed, we can set
|
||||
# the status of this MessageWrapper's success to 'True'
|
||||
if ( $$.success == True ) {
|
||||
self.success = True;
|
||||
self.message = $$;
|
||||
}
|
||||
}
|
||||
|
||||
# If we failed to parse the message, then we're going to scan the remaining bytes for the '\x30'
|
||||
# start byte and try to parse a Message starting from that byte. This effectively
|
||||
# strips the SASL layer if SASL Signing was enabled. Until now, I haven't found A
|
||||
# better way to scan / determine the exact SASL header length yet, so we'll stick with this
|
||||
# for the time being. If the entire LDAP packet was encrypted with SASL, then we skip parsing for
|
||||
# now (in the long run we need to be parsing SASL/GSSAPI instead, in which case encrypted payloads
|
||||
# are just another message type).
|
||||
|
||||
# SASLLayer (see unit above) just consumes bytes &until=b"\x30" or backtracks if it isn't found
|
||||
# and sets a success flag we can use later to decide if those bytes contain a parsable message.
|
||||
var sasl_success: bool = False;
|
||||
: SASLLayer &try if ( self.success == False ) {
|
||||
if ( $$.success == True ) {
|
||||
self.sasl_success = True;
|
||||
}
|
||||
}
|
||||
var remainder: bytes;
|
||||
|
||||
# SASLLayer consumes the delimiter ('\x30'), and because this is the first byte of a valid LDAP message
|
||||
# we should re-add it to the remainder if the delimiter was found. If the delimiter was not found, we
|
||||
# leave the remainder empty, but note that the bytes must be consumed either way to avoid stalling the
|
||||
# parser and causing an infinite loop error.
|
||||
: bytes &eod if ( self.success == False ) {
|
||||
if ( self.sasl_success == True ) {
|
||||
self.remainder = b"\x30" + $$;
|
||||
}
|
||||
}
|
||||
|
||||
# Again, try to parse a Message unit. Be aware that in this will sometimes fail if the '\x30' byte is
|
||||
# also present in the SASL header.
|
||||
|
||||
# Also, we could try to do this recursively or try a few iterations, but for now I would suggest
|
||||
# to try this extra parsing once to get the best cost/benefit tradeoff.
|
||||
: Message &try &parse-from=self.remainder if ( self.success == False && self.sasl_success == True ) {
|
||||
if ( $$.success == True ) {
|
||||
self.success = True;
|
||||
self.message = $$;
|
||||
}
|
||||
}
|
||||
|
||||
# If we still didn't manage to parse a message (so the &try resulted in another backtrack()) then
|
||||
# this is probably an encrypted LDAP message, so skip it
|
||||
|
||||
} &convert=self.message;
|
||||
type EncryptedMessage = unit {
|
||||
len: uint32;
|
||||
: skip bytes &size=self.len;
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type Message = unit {
|
||||
type TlsForward = unit {
|
||||
# Just consume everything. This is hooked in ldap_zeek.spicy
|
||||
chunk: bytes &chunked &eod;
|
||||
};
|
||||
|
||||
type KrbWrapToken = unit {
|
||||
# https://datatracker.ietf.org/doc/html/rfc4121#section-4.2.6.2
|
||||
|
||||
# Number of bytes to expect *after* the payload.
|
||||
var trailer_ec: uint64;
|
||||
var header_ec: uint64;
|
||||
|
||||
ctx_flags: bitfield(8) {
|
||||
send_by_acceptor: 0;
|
||||
sealed: 1;
|
||||
acceptor_subkey: 2;
|
||||
};
|
||||
filler: skip b"\xff";
|
||||
ec: uint16; # extra count
|
||||
rrc: uint16 { # right rotation count
|
||||
# Handle rrc == ec or rrc == 0.
|
||||
if ( self.rrc == self.ec ) {
|
||||
self.header_ec = self.ec;
|
||||
} else if ( self.rrc == 0 ) {
|
||||
self.trailer_ec = self.ec;
|
||||
} else {
|
||||
if ( ! self.ctx_flags.sealed )
|
||||
# If it's sealed, we'll consume until &eod anyhow
|
||||
# and ec/rrc shouldn't apply, otherwise, bail.
|
||||
throw "Unhandled rc %s and ec %s" % (self.ec, self.rrc);
|
||||
}
|
||||
}
|
||||
|
||||
snd_seq: uint64;
|
||||
header_e: skip bytes &size=self.header_ec;
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type SaslMsKrb5Stripper = unit(ctx: Ctx&) {
|
||||
# This is based on Wireshark output and example traffic we have. There's always
|
||||
# a 4 byte length field followed by the krb5_tok_id field in messages after
|
||||
# MS_KRB5 was selected. I haven't read enough specs to understand if it's
|
||||
# just this one case that works, or others could use the same stripping.
|
||||
var switch_size: uint64;
|
||||
|
||||
len: uint32;
|
||||
krb5_tok_id: uint16;
|
||||
|
||||
switch ( self.krb5_tok_id ) {
|
||||
0x0504 -> krb_wrap_token: KrbWrapToken;
|
||||
* -> : void;
|
||||
};
|
||||
|
||||
: skip bytes &size=0 {
|
||||
self.switch_size = self.len - (self.offset() - 4);
|
||||
if ( self?.krb_wrap_token )
|
||||
self.switch_size -= self.krb_wrap_token.trailer_ec;
|
||||
}
|
||||
|
||||
switch ( self?.krb_wrap_token && ! self.krb_wrap_token.ctx_flags.sealed ) {
|
||||
True -> : Message(ctx)[] &eod;
|
||||
* -> : skip bytes &eod;
|
||||
} &size=self.switch_size;
|
||||
|
||||
# Consume the wrap token trailer, if any.
|
||||
trailer_e: skip bytes &size=self.krb_wrap_token.trailer_ec if (self?.krb_wrap_token);
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type Message = unit(ctx: Ctx&) {
|
||||
var messageID: int64;
|
||||
var opcode: ProtocolOpcode = ProtocolOpcode::Undef;
|
||||
var applicationBytes: bytes;
|
||||
var unsetResultDefault: Result;
|
||||
var result_: Result& = self.unsetResultDefault;
|
||||
var obj: string = "";
|
||||
var arg: string = "";
|
||||
var success: bool = False;
|
||||
var seqHeaderLen: uint64;
|
||||
var msgLen: uint64;
|
||||
var opLen: uint64;
|
||||
|
||||
: ASN1::ASN1Message(True) {
|
||||
if (($$.head.tag.type_ == ASN1::ASN1Type::Sequence) &&
|
||||
($$.body?.seq) &&
|
||||
(|$$.body.seq.submessages| >= 2)) {
|
||||
if ($$.body.seq.submessages[0].body?.num_value) {
|
||||
self.messageID = $$.body.seq.submessages[0].body.num_value;
|
||||
seqHeader: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::Sequence) {
|
||||
self.msgLen = $$.len.len;
|
||||
}
|
||||
if ($$.body.seq.submessages[1]?.application_id) {
|
||||
self.opcode = cast<ProtocolOpcode>(cast<uint8>($$.body.seq.submessages[1].application_id));
|
||||
self.applicationBytes = $$.body.seq.submessages[1].application_data;
|
||||
|
||||
# Use offset() to determine how many bytes the seqHeader took. This
|
||||
# needs to be done after the seqHeader field hook.
|
||||
: void {
|
||||
self.seqHeaderLen = self.offset();
|
||||
}
|
||||
|
||||
messageID_header: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::Integer);
|
||||
: ASN1::ASN1Body(self.messageID_header, False) {
|
||||
self.messageID = $$.num_value;
|
||||
}
|
||||
|
||||
protocolOp: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Application) {
|
||||
self.opcode = cast<ProtocolOpcode>(cast<uint8>($$.tag.type_));
|
||||
self.opLen = $$.len.len;
|
||||
}
|
||||
|
||||
switch ( self.opcode ) {
|
||||
ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self);
|
||||
ProtocolOpcode::BIND_RESPONSE -> BIND_RESPONSE: BindResponse(self);
|
||||
ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self, ctx);
|
||||
ProtocolOpcode::BIND_RESPONSE -> BIND_RESPONSE: BindResponse(self, ctx);
|
||||
ProtocolOpcode::UNBIND_REQUEST -> UNBIND_REQUEST: UnbindRequest(self);
|
||||
ProtocolOpcode::SEARCH_REQUEST -> SEARCH_REQUEST: SearchRequest(self);
|
||||
ProtocolOpcode::SEARCH_RESULT_ENTRY -> SEARCH_RESULT_ENTRY: SearchResultEntry(self);
|
||||
|
@ -262,22 +343,20 @@ public type Message = unit {
|
|||
# just commenting this out, it will stop processing LDAP Messages in this connection
|
||||
ProtocolOpcode::ADD_REQUEST -> ADD_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::COMPARE_REQUEST -> COMPARE_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: NotImplemented(self);
|
||||
ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: ExtendedRequest(self, ctx);
|
||||
ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: ExtendedResponse(self, ctx);
|
||||
ProtocolOpcode::INTERMEDIATE_RESPONSE -> INTERMEDIATE_RESPONSE: NotImplemented(self);
|
||||
ProtocolOpcode::MOD_DN_REQUEST -> MOD_DN_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::SEARCH_RESULT_REFERENCE -> SEARCH_RESULT_REFERENCE: NotImplemented(self);
|
||||
} &parse-from=self.applicationBytes if ( self.opcode );
|
||||
} &size=self.opLen;
|
||||
|
||||
on %error {
|
||||
self.backtrack();
|
||||
}
|
||||
# Ensure some invariants hold after parsing the command.
|
||||
: void &requires=(self.offset() >= self.seqHeaderLen);
|
||||
: void &requires=(self.msgLen >= (self.offset() - self.seqHeaderLen));
|
||||
|
||||
on %done {
|
||||
self.success = True;
|
||||
}
|
||||
|
||||
} &requires=((self?.messageID) && (self?.opcode) && (self.opcode != ProtocolOpcode::Undef));
|
||||
# Eat the controls field if it exists.
|
||||
: skip bytes &size=self.msgLen - (self.offset() - self.seqHeaderLen);
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Bind Operation
|
||||
|
@ -288,18 +367,94 @@ public type BindAuthType = enum {
|
|||
BIND_AUTH_SASL = 3,
|
||||
};
|
||||
|
||||
type GSS_SPNEGO_negTokenInit = unit {
|
||||
oidHeader: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::ObjectIdentifier);
|
||||
oid: ASN1::ASN1ObjectIdentifier(self.oidHeader.len.len) &requires=(self.oid.oidstring == "1.3.6.1.5.5.2");
|
||||
|
||||
# TODO: Parse the rest of negTokenInit.
|
||||
: skip bytes &eod;
|
||||
};
|
||||
|
||||
# Peak into GSS-SPNEGO payload and ensure it is indeed GSS-SPNEGO,
|
||||
# or GSS-SPNEGO with a NTMLSSP payload that starts with NTLMSSP.
|
||||
type GSS_SPNEGO_Init = unit {
|
||||
# This is the optional octet string in SaslCredentials.
|
||||
credentialsHeader: ASN1::ASN1Header &requires=($$.tag.type_ == ASN1::ASN1Type::OctetString);
|
||||
|
||||
# Now we either have the initial message as specified in RFC2743 or
|
||||
# a continuation from RFC4178, or a "NTMLSSP" signature.
|
||||
#
|
||||
# 60 -> APPLICATION [0] https://datatracker.ietf.org/doc/html/rfc2743#page-81
|
||||
# a1 -> CHOICE [1] https://www.rfc-editor.org/rfc/rfc4178#section-4.2
|
||||
# "NTMLSSP" https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/907f519d-6217-45b1-b421-dca10fc8af0d
|
||||
#
|
||||
switch {
|
||||
-> spnegoInitByte: uint8(0x60);
|
||||
-> spnegoChoiceByte: uint8(0xa1);
|
||||
-> ntlmSignature: skip b"NTLMSSP"; # Unsupported, should forward to child analyzer!
|
||||
};
|
||||
|
||||
spnegoLen: skip ASN1::LengthType if (self?.spnegoInitByte || self?.spnegoChoiceByte);
|
||||
|
||||
# Peak into the SPNEGO_negTokenInit
|
||||
spnegoInitial: skip GSS_SPNEGO_negTokenInit if (self?.spnegoInitByte);
|
||||
};
|
||||
|
||||
type SaslCredentials = unit() {
|
||||
mechanism: ASN1::ASN1Message(True) &convert=$$.body.str_value;
|
||||
# TODO: if we want to parse the (optional) credentials string
|
||||
mechanism: ASN1::ASN1Message(False) &convert=$$.body.str_value;
|
||||
|
||||
# Peak into GSS-SPNEGO payload if we have any.
|
||||
switch ( self.mechanism ) {
|
||||
"GSS-SPNEGO" -> gss_spnego: GSS_SPNEGO_Init;
|
||||
* -> : skip bytes &eod;
|
||||
};
|
||||
};
|
||||
|
||||
type GSS_SPNEGO_Subsequent = unit {
|
||||
switch {
|
||||
-> spnegoChoiceByte: uint8(0xa1);
|
||||
-> ntmlSignature: skip b"NTLMSSP"; # Unsupported, should forward to NTLM!
|
||||
};
|
||||
|
||||
spnegoChoiceLen: skip ASN1::LengthType if (self?.spnegoChoiceByte);
|
||||
negTokenResp: GSS_SPNEGO_negTokenResp if (self?.spnegoChoiceByte);
|
||||
};
|
||||
|
||||
type GSS_SPNEGO_negTokenResp = unit {
|
||||
var accepted: bool;
|
||||
var supportedMech: ASN1::ASN1Message;
|
||||
|
||||
# Parse the contained Sequence.
|
||||
seq: ASN1::ASN1Message(True) {
|
||||
for ( msg in $$.body.seq.submessages ) {
|
||||
# https://www.rfc-editor.org/rfc/rfc4178#section-4.2.2
|
||||
if ( msg.application_id == 0 ) {
|
||||
self.accepted = msg.application_data == b"\x0a\x01\x00";
|
||||
} else if ( msg.application_id == 1 ) {
|
||||
self.supportedMech = msg;
|
||||
} else if ( msg.application_id == 2 ) {
|
||||
# ignore responseToken
|
||||
} else if ( msg.application_id == 3 ) {
|
||||
# ignore mechListMec
|
||||
} else {
|
||||
throw "unhandled NegTokenResp id %s" % msg.application_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch ( self?.supportedMech ) {
|
||||
True -> supportedMechOid: ASN1::ASN1Message(False) &convert=$$.body.str_value;
|
||||
* -> : void;
|
||||
} &parse-from=self.supportedMech.application_data;
|
||||
};
|
||||
|
||||
# TODO(fox-ds): A helper unit for requests for which no handling has been implemented.
|
||||
# Eventually all uses of this unit should be replaced with actual parsers so this unit can be removed.
|
||||
type NotImplemented = unit(inout message: Message) {
|
||||
# Do nothing
|
||||
: skip bytes &eod;
|
||||
};
|
||||
|
||||
type BindRequest = unit(inout message: Message) {
|
||||
type BindRequest = unit(inout message: Message, ctx: Ctx&) {
|
||||
version: ASN1::ASN1Message(True) &convert=$$.body.num_value;
|
||||
name: ASN1::ASN1Message(True) &convert=$$.body.str_value {
|
||||
message.obj = self.name;
|
||||
|
@ -323,15 +478,57 @@ type BindRequest = unit(inout message: Message) {
|
|||
saslCreds: SaslCredentials() &parse-from=self.authData if ((self.authType == BindAuthType::BIND_AUTH_SASL) &&
|
||||
(|self.authData| > 0)) {
|
||||
message.arg = self.saslCreds.mechanism;
|
||||
ctx.saslMechanism = self.saslCreds.mechanism;
|
||||
}
|
||||
} &requires=((self?.authType) && (self.authType != BindAuthType::Undef));
|
||||
} &requires=(self?.authType && (self.authType != BindAuthType::Undef));
|
||||
|
||||
type BindResponse = unit(inout message: Message) {
|
||||
type ServerSaslCreds = unit {
|
||||
serverSaslCreds: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(7));
|
||||
payload: bytes &size=self.serverSaslCreds.len.len;
|
||||
};
|
||||
|
||||
type BindResponse = unit(inout message: Message, ctx: Ctx&) {
|
||||
: Result {
|
||||
message.result_ = $$;
|
||||
|
||||
# The SASL authentication was successful. We do not actually
|
||||
# know if the following messages are encrypted or not. This may be
|
||||
# mechanism and parameter specific. For example SCRAM-SHA512 or NTLM
|
||||
# will continue to be cleartext, while SRP or GSS-API would be encrypted.
|
||||
#
|
||||
# Switch messageMode into trial mode which is explored via MessageDispatch
|
||||
# and the MaybeEncrypted unit.
|
||||
#
|
||||
# Note, messageMode may be changed to something more specific like
|
||||
# MS_KRB5 below.
|
||||
if ( |ctx.saslMechanism| > 0 && $$.code == ResultCode::SUCCESS ) {
|
||||
ctx.messageMode = MessageMode::MAYBE_ENCRYPTED;
|
||||
}
|
||||
}
|
||||
|
||||
# TODO: if we want to parse SASL credentials returned
|
||||
# Try to parse serverSaslCreds if there's any input remaining. This
|
||||
# unit is parsed with &size, so &eod here works.
|
||||
#
|
||||
# Technically we should be able to tell from the ASN.1 structure
|
||||
# if the serverSaslCreds field exists or not. But, not sure we can
|
||||
# check if there's any bytes left at this point outside of passing
|
||||
# in the length and playing with offset().
|
||||
serverSaslCreds: ServerSaslCreds[] &eod;
|
||||
|
||||
# If the client requested GSS-SPNEGO, try to parse the server's response
|
||||
# to switch message mode.
|
||||
gss_spnego: GSS_SPNEGO_Subsequent &parse-from=self.serverSaslCreds[0].payload
|
||||
if (ctx.saslMechanism == "GSS-SPNEGO" && |self.serverSaslCreds| > 0) {
|
||||
|
||||
if ( $$?.negTokenResp ) {
|
||||
local token = $$.negTokenResp;
|
||||
if ( token.accepted && token?.supportedMechOid ) {
|
||||
if ( token.supportedMechOid == GSSAPI_MECH_MS_KRB5 ) {
|
||||
ctx.messageMode = MessageMode::MS_KRB5;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
@ -879,16 +1076,61 @@ type AbandonRequest = unit(inout message: Message) {
|
|||
#-----------------------------------------------------------------------------
|
||||
# Extended Operation
|
||||
# https://tools.ietf.org/html/rfc4511#section-4.12
|
||||
type ExtendedRequest = unit(inout message: Message, ctx: Ctx&) {
|
||||
var requestValue: bytes;
|
||||
header: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific);
|
||||
requestName: bytes &size=self.header.len.len &convert=$$.decode(spicy::Charset::ASCII) {
|
||||
message.obj = $$;
|
||||
}
|
||||
|
||||
# TODO: implement ExtendedRequest
|
||||
# type ExtendedRequest = unit(inout message: Message) {
|
||||
#
|
||||
# };
|
||||
# If there's more byte to parse, it's the requestValue.
|
||||
: ASN1::ASN1Message(False)
|
||||
&requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific)
|
||||
if ( message.opLen > self.offset() ) {
|
||||
|
||||
# TODO: implement ExtendedResponse
|
||||
# type ExtendedResponse = unit(inout message: Message) {
|
||||
#
|
||||
# };
|
||||
self.requestValue = $$.application_data;
|
||||
}
|
||||
|
||||
on %done {
|
||||
# Did the client request StartTLS?
|
||||
#
|
||||
# https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1
|
||||
if ( self.requestName == "1.3.6.1.4.1.1466.20037" )
|
||||
ctx.startTlsRequested = True;
|
||||
}
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type ExtendedResponseEntry = unit(inout r: ExtendedResponse) {
|
||||
: ASN1::ASN1Message(False) &requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific) {
|
||||
if ( $$.head.tag.type_ == ASN1::ASN1Type(10) )
|
||||
r.responseName = $$.application_data;
|
||||
else if ( $$.head.tag.type_ == ASN1::ASN1Type(11) )
|
||||
r.responseValue = $$.application_data;
|
||||
else
|
||||
throw "Unhandled extended response tag %s" % $$.head.tag;
|
||||
}
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type ExtendedResponse = unit(inout message: Message, ctx: Ctx&) {
|
||||
var responseName: bytes;
|
||||
var responseValue: bytes;
|
||||
: Result {
|
||||
message.result_ = $$;
|
||||
}
|
||||
|
||||
# Try to parse two ASN1 entries if there are bytes left in the unit.
|
||||
# Both are optional and identified by context specific tagging.
|
||||
: ExtendedResponseEntry(self) if ( message.opLen > self.offset() );
|
||||
: ExtendedResponseEntry(self) if ( message.opLen > self.offset() );
|
||||
|
||||
on %done {
|
||||
# Client had requested StartTLS and it was successful? Switch to SSL.
|
||||
if ( ctx.startTlsRequested && message.result_.code == ResultCode::SUCCESS )
|
||||
ctx.messageMode = MessageMode::TLS;
|
||||
}
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# IntermediateResponse Message
|
||||
|
@ -899,6 +1141,6 @@ type AbandonRequest = unit(inout message: Message) {
|
|||
#
|
||||
# };
|
||||
|
||||
on LDAP::MessageWrapper::%done {
|
||||
on LDAP::Message::%done {
|
||||
spicy::accept_input();
|
||||
}
|
||||
|
|
12
src/analyzer/protocol/ldap/ldap_zeek.spicy
Normal file
12
src/analyzer/protocol/ldap/ldap_zeek.spicy
Normal file
|
@ -0,0 +1,12 @@
|
|||
module LDAP_Zeek;
|
||||
|
||||
import LDAP;
|
||||
import zeek;
|
||||
|
||||
on LDAP::TlsForward::%init {
|
||||
zeek::protocol_begin("SSL");
|
||||
}
|
||||
|
||||
on LDAP::TlsForward::chunk {
|
||||
zeek::protocol_data_in(zeek::is_orig(), self.chunk);
|
||||
}
|
|
@ -90,8 +90,8 @@ type ModbusTCP_PDU(is_orig: bool) = record {
|
|||
|
||||
type ModbusTCP_TransportHeader = record {
|
||||
tid: uint16; # Transaction identifier
|
||||
pid: uint16; # Protocol identifier
|
||||
len: uint16; # Length of everything after this field
|
||||
pid: uint16 &enforce(pid == 0); # Protocol identifier
|
||||
len: uint16 &enforce(len >= 2); # Length of everything after this field
|
||||
uid: uint8; # Unit identifier (previously 'slave address')
|
||||
fc: uint8; # MODBUS function code (see function_codes enum)
|
||||
} &byteorder=bigendian, &let {
|
||||
|
|
|
@ -5,4 +5,5 @@ zeek_add_plugin(
|
|||
POP3.cc
|
||||
Plugin.cc
|
||||
BIFS
|
||||
consts.bif
|
||||
events.bif)
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
|
||||
#include "zeek/analyzer/protocol/pop3/POP3.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
||||
#include <cctype>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -12,6 +10,7 @@
|
|||
#include "zeek/Base64.h"
|
||||
#include "zeek/Reporter.h"
|
||||
#include "zeek/analyzer/Manager.h"
|
||||
#include "zeek/analyzer/protocol/pop3/consts.bif.h"
|
||||
#include "zeek/analyzer/protocol/pop3/events.bif.h"
|
||||
|
||||
namespace zeek::analyzer::pop3 {
|
||||
|
@ -41,6 +40,7 @@ POP3_Analyzer::POP3_Analyzer(Connection* conn) : analyzer::tcp::TCP_ApplicationA
|
|||
authLines = 0;
|
||||
|
||||
mail = nullptr;
|
||||
unknown_client_cmds = 0;
|
||||
|
||||
cl_orig = new analyzer::tcp::ContentLine_Analyzer(conn, true);
|
||||
AddSupportAnalyzer(cl_orig);
|
||||
|
@ -205,6 +205,19 @@ void POP3_Analyzer::ProcessRequest(int length, const char* line) {
|
|||
// keep a list of pending commands.
|
||||
cmds.emplace_back(line);
|
||||
|
||||
// Prevent unbounded state growth of cmds if there are no matching
|
||||
// server replies by simply dropping the oldest command.
|
||||
//
|
||||
// This may be caused by packet drops of the server side, one-sided
|
||||
// traffic, or analyzing the wrong protocol (Redis), etc.
|
||||
if ( zeek::BifConst::POP3::max_pending_commands > 0 ) {
|
||||
if ( cmds.size() > zeek::BifConst::POP3::max_pending_commands ) {
|
||||
Weird("pop3_client_too_many_pending_commands");
|
||||
|
||||
cmds.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
if ( cmds.size() == 1 )
|
||||
// Not waiting for another server response,
|
||||
// so we can process it immediately.
|
||||
|
@ -236,10 +249,19 @@ void POP3_Analyzer::ProcessClientCmd() {
|
|||
|
||||
if ( cmd_code == -1 ) {
|
||||
if ( ! waitingForAuthentication ) {
|
||||
Weird("pop3_client_command_unknown");
|
||||
Weird("pop3_client_command_unknown", (tokens.size() > 0 ? tokens[0].c_str() : "???"));
|
||||
if ( subState == detail::POP3_WOK )
|
||||
subState = detail::POP3_OK;
|
||||
|
||||
++unknown_client_cmds;
|
||||
|
||||
if ( zeek::BifConst::POP3::max_unknown_client_commands > 0 ) {
|
||||
if ( unknown_client_cmds > zeek::BifConst::POP3::max_unknown_client_commands ) {
|
||||
AnalyzerViolation("too many unknown client commands");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -299,6 +321,7 @@ void POP3_Analyzer::ProcessClientCmd() {
|
|||
if ( masterState == detail::POP3_AUTHORIZATION ) {
|
||||
POP3Event(pop3_request, true, cmd, message);
|
||||
if ( ! *message ) {
|
||||
// This is the client requesting a list of AUTH mechanisms available.
|
||||
requestForMultiLine = true;
|
||||
state = detail::AUTH;
|
||||
subState = detail::POP3_WOK;
|
||||
|
@ -555,7 +578,7 @@ void POP3_Analyzer::ProcessReply(int length, const char* line) {
|
|||
AnalyzerViolation(util::fmt("unknown server command (%s)", (tokens.size() > 0 ? tokens[0].c_str() : "???")),
|
||||
line, length);
|
||||
|
||||
Weird("pop3_server_command_unknown");
|
||||
Weird("pop3_server_command_unknown", (tokens.size() > 0 ? tokens[0].c_str() : "???"));
|
||||
if ( subState == detail::POP3_WOK )
|
||||
subState = detail::POP3_OK;
|
||||
}
|
||||
|
|
|
@ -4,11 +4,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "zeek/analyzer/protocol/login/NVT.h"
|
||||
#include "zeek/analyzer/protocol/mime/MIME.h"
|
||||
#include "zeek/analyzer/protocol/tcp/ContentLine.h"
|
||||
#include "zeek/analyzer/protocol/tcp/TCP.h"
|
||||
|
@ -106,6 +104,7 @@ protected:
|
|||
|
||||
analyzer::mime::MIME_Mail* mail;
|
||||
std::list<std::string> cmds;
|
||||
zeek_uint_t unknown_client_cmds;
|
||||
|
||||
private:
|
||||
bool tls;
|
||||
|
|
2
src/analyzer/protocol/pop3/consts.bif
Normal file
2
src/analyzer/protocol/pop3/consts.bif
Normal file
|
@ -0,0 +1,2 @@
|
|||
const POP3::max_pending_commands: count;
|
||||
const POP3::max_unknown_client_commands: count;
|
|
@ -60,7 +60,6 @@ const size_t AEAD_IV_LEN = 12;
|
|||
const size_t AEAD_HP_LEN = 16;
|
||||
const size_t AEAD_SAMPLE_LENGTH = 16;
|
||||
const size_t AEAD_TAG_LENGTH = 16;
|
||||
const size_t MAXIMUM_PACKET_LENGTH = 1500;
|
||||
const size_t MAXIMUM_PACKET_NUMBER_LENGTH = 4;
|
||||
|
||||
EVP_CIPHER_CTX* get_aes_128_ecb() {
|
||||
|
@ -153,12 +152,17 @@ Function that calls the AEAD decryption routine, and returns the decrypted data.
|
|||
*/
|
||||
hilti::rt::Bytes decrypt(const std::vector<uint8_t>& client_key, const hilti::rt::Bytes& all_data,
|
||||
uint64_t payload_length, const DecryptionInformation& decryptInfo) {
|
||||
int out, out2, res;
|
||||
int out, out2;
|
||||
|
||||
if ( payload_length < decryptInfo.packet_number_length + AEAD_TAG_LENGTH )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("payload too small %ld < %ld", payload_length,
|
||||
decryptInfo.packet_number_length + AEAD_TAG_LENGTH));
|
||||
|
||||
// Bail on large payloads, somewhat arbitrarily. 10k allows for Jumbo frames
|
||||
// and sometimes the fuzzer produces packets up to that size as well.
|
||||
if ( payload_length > 10000 )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("payload_length too large %ld", payload_length));
|
||||
|
||||
const uint8_t* encrypted_payload = data_as_uint8(all_data) + decryptInfo.unprotected_header.size();
|
||||
|
||||
int encrypted_payload_size = payload_length - decryptInfo.packet_number_length - AEAD_TAG_LENGTH;
|
||||
|
@ -173,7 +177,8 @@ hilti::rt::Bytes decrypt(const std::vector<uint8_t>& client_key, const hilti::rt
|
|||
const void* tag_to_check = all_data.data() + decryptInfo.unprotected_header.size() + encrypted_payload_size;
|
||||
int tag_to_check_length = AEAD_TAG_LENGTH;
|
||||
|
||||
std::array<uint8_t, MAXIMUM_PACKET_LENGTH> decrypt_buffer;
|
||||
// Allocate memory for decryption.
|
||||
std::vector<uint8_t> decrypt_buffer(encrypted_payload_size);
|
||||
|
||||
// Setup context
|
||||
auto* ctx = get_aes_128_gcm();
|
||||
|
@ -197,7 +202,8 @@ hilti::rt::Bytes decrypt(const std::vector<uint8_t>& client_key, const hilti::rt
|
|||
EVP_CipherUpdate(ctx, decrypt_buffer.data(), &out, encrypted_payload, encrypted_payload_size);
|
||||
|
||||
// Validate whether the decryption was successful or not
|
||||
EVP_CipherFinal_ex(ctx, NULL, &out2);
|
||||
if ( EVP_CipherFinal_ex(ctx, NULL, &out2) == 0 )
|
||||
throw hilti::rt::RuntimeError("decryption failed");
|
||||
|
||||
// Copy the decrypted data from the decrypted buffer into a Bytes instance.
|
||||
return hilti::rt::Bytes(decrypt_buffer.data(), decrypt_buffer.data() + out);
|
||||
|
|
|
@ -413,7 +413,7 @@ type SMB2_error_response(header: SMB2_Header) = record {
|
|||
byte_count : uint32;
|
||||
# This is implemented incorrectly and is disabled for now.
|
||||
#error_data : SMB2_error_data(header, byte_count);
|
||||
stuff : bytestring &restofdata &transient;
|
||||
stuff : bytestring &length=byte_count &transient;
|
||||
} &byteorder = littleendian;
|
||||
|
||||
type SMB2_logoff_request(header: SMB2_Header) = record {
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
#include <arpa/inet.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/opensslv.h>
|
||||
#include <vector>
|
||||
|
||||
#include "zeek/Reporter.h"
|
||||
#include "zeek/analyzer/Manager.h"
|
||||
#include "zeek/analyzer/protocol/ssl/events.bif.h"
|
||||
#include "zeek/analyzer/protocol/ssl/ssl_pac.h"
|
||||
#include "zeek/analyzer/protocol/ssl/tls-handshake_pac.h"
|
||||
|
@ -22,6 +22,8 @@
|
|||
|
||||
namespace zeek::analyzer::ssl {
|
||||
|
||||
using byte_buffer = std::vector<u_char>;
|
||||
|
||||
template<typename T>
|
||||
static inline T MSB(const T a) {
|
||||
return ((a >> 8) & 0xff);
|
||||
|
@ -32,12 +34,13 @@ static inline T LSB(const T a) {
|
|||
return (a & 0xff);
|
||||
}
|
||||
|
||||
static std::basic_string<unsigned char> fmt_seq(uint32_t num) {
|
||||
std::basic_string<unsigned char> out(4, '\0');
|
||||
static byte_buffer fmt_seq(uint32_t num) {
|
||||
byte_buffer out(4, '\0');
|
||||
out.reserve(13);
|
||||
uint32_t netnum = htonl(num);
|
||||
out.append(reinterpret_cast<u_char*>(&netnum), 4);
|
||||
out.append(5, '\0');
|
||||
uint8_t* p = reinterpret_cast<uint8_t*>(&netnum);
|
||||
out.insert(out.end(), p, p + 4);
|
||||
out.insert(out.end(), 5, '\0');
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -271,8 +274,8 @@ bool SSL_Analyzer::TryDecryptApplicationData(int len, const u_char* data, bool i
|
|||
const u_char* s_iv = keys.data() + 68;
|
||||
|
||||
// FIXME: should we change types here?
|
||||
u_char* encrypted = (u_char*)data;
|
||||
size_t encrypted_len = len;
|
||||
const u_char* encrypted = data;
|
||||
int encrypted_len = len;
|
||||
|
||||
if ( is_orig )
|
||||
c_seq++;
|
||||
|
@ -280,14 +283,15 @@ bool SSL_Analyzer::TryDecryptApplicationData(int len, const u_char* data, bool i
|
|||
s_seq++;
|
||||
|
||||
// AEAD nonce, length 12
|
||||
std::basic_string<unsigned char> s_aead_nonce;
|
||||
byte_buffer s_aead_nonce;
|
||||
s_aead_nonce.reserve(12);
|
||||
if ( is_orig )
|
||||
s_aead_nonce.assign(c_iv, 4);
|
||||
s_aead_nonce.insert(s_aead_nonce.end(), c_iv, c_iv + 4);
|
||||
else
|
||||
s_aead_nonce.assign(s_iv, 4);
|
||||
s_aead_nonce.insert(s_aead_nonce.end(), s_iv, s_iv + 4);
|
||||
|
||||
// this should be the explicit counter
|
||||
s_aead_nonce.append(encrypted, 8);
|
||||
s_aead_nonce.insert(s_aead_nonce.end(), encrypted, encrypted + 8);
|
||||
assert(s_aead_nonce.size() == 12);
|
||||
|
||||
EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new();
|
||||
|
@ -310,28 +314,28 @@ bool SSL_Analyzer::TryDecryptApplicationData(int len, const u_char* data, bool i
|
|||
else
|
||||
EVP_DecryptInit(ctx, EVP_aes_256_gcm(), s_wk, s_aead_nonce.data());
|
||||
|
||||
EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, encrypted + encrypted_len);
|
||||
EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, const_cast<u_char*>(encrypted + encrypted_len));
|
||||
|
||||
// AEAD tag
|
||||
std::basic_string<unsigned char> s_aead_tag;
|
||||
byte_buffer s_aead_tag;
|
||||
if ( is_orig )
|
||||
s_aead_tag = fmt_seq(c_seq);
|
||||
else
|
||||
s_aead_tag = fmt_seq(s_seq);
|
||||
|
||||
assert(s_aead_tag.size() == 13);
|
||||
s_aead_tag[8] = content_type;
|
||||
s_aead_tag[9] = MSB(raw_tls_version);
|
||||
s_aead_tag[10] = LSB(raw_tls_version);
|
||||
s_aead_tag[11] = MSB(encrypted_len);
|
||||
s_aead_tag[12] = LSB(encrypted_len);
|
||||
assert(s_aead_tag.size() == 13);
|
||||
|
||||
auto decrypted = std::vector<u_char>(encrypted_len +
|
||||
16); // see OpenSSL manpage - 16 is the block size for the supported cipher
|
||||
int decrypted_len = 0;
|
||||
|
||||
EVP_DecryptUpdate(ctx, NULL, &decrypted_len, s_aead_tag.data(), s_aead_tag.size());
|
||||
EVP_DecryptUpdate(ctx, decrypted.data(), &decrypted_len, (const u_char*)encrypted, encrypted_len);
|
||||
EVP_DecryptUpdate(ctx, decrypted.data(), &decrypted_len, encrypted, encrypted_len);
|
||||
assert(static_cast<decltype(decrypted.size())>(decrypted_len) <= decrypted.size());
|
||||
decrypted.resize(decrypted_len);
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ import spicy;
|
|||
public type Message = unit {
|
||||
switch {
|
||||
-> prio: Priority;
|
||||
-> void;
|
||||
-> : void;
|
||||
};
|
||||
|
||||
msg: bytes &eod;
|
||||
|
|
|
@ -273,7 +273,12 @@ void TCP_Reassembler::MatchUndelivered(uint64_t up_to_seq, bool use_last_upper)
|
|||
if ( b.upper > last_reassem_seq )
|
||||
break;
|
||||
|
||||
tcp_analyzer->Conn()->Match(zeek::detail::Rule::PAYLOAD, b.block, b.Size(), false, false, IsOrig(), false);
|
||||
// Note: Even though this passes bol=false, at the point where
|
||||
// this code runs, the matcher is re-initialized resulting in
|
||||
// undelivered data implicitly being bol-anchored. It's unclear
|
||||
// if that was intended, but there's hardly a right way here,
|
||||
// so that seems ok.
|
||||
tcp_analyzer->Conn()->Match(zeek::detail::Rule::PAYLOAD, b.block, b.Size(), IsOrig(), false, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ Raw::Raw(ReaderFrontend* frontend) : ReaderBackend(frontend), file(nullptr, fclo
|
|||
sep_length = BifConst::InputRaw::record_separator->Len();
|
||||
|
||||
bufpos = 0;
|
||||
bufsize = 0;
|
||||
|
||||
stdin_fileno = fileno(stdin);
|
||||
stdout_fileno = fileno(stdout);
|
||||
|
@ -420,59 +421,74 @@ bool Raw::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fie
|
|||
|
||||
int64_t Raw::GetLine(FILE* arg_file) {
|
||||
errno = 0;
|
||||
int pos = 0; // strstr_n only works on ints - so no use to use something different here
|
||||
int offset = 0;
|
||||
|
||||
if ( ! buf )
|
||||
if ( ! buf ) {
|
||||
buf = std::unique_ptr<char[]>(new char[block_size]);
|
||||
|
||||
int repeats = 1;
|
||||
bufpos = 0;
|
||||
bufsize = block_size;
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
size_t readbytes = fread(buf.get() + bufpos + offset, 1, block_size - bufpos, arg_file);
|
||||
pos += bufpos + readbytes;
|
||||
// printf("Pos: %d\n", pos);
|
||||
bufpos = offset = 0; // read full block size in next read...
|
||||
size_t readbytes = fread(buf.get() + bufpos, 1, bufsize - bufpos, arg_file);
|
||||
|
||||
if ( pos == 0 && errno != 0 )
|
||||
bufpos = bufpos + readbytes;
|
||||
|
||||
// Nothing in the buffer and errno set, yield.
|
||||
if ( bufpos == 0 && errno != 0 )
|
||||
break;
|
||||
|
||||
// researching everything each time is a bit... cpu-intensive. But otherwise we have
|
||||
// to deal with situations where the separator is multi-character and split over multiple
|
||||
// reads...
|
||||
int found = util::strstr_n(pos, (unsigned char*)buf.get(), separator.size(), (unsigned char*)separator.c_str());
|
||||
//
|
||||
// memmem() would be more appropriate, but not available on Windows.
|
||||
int found = util::strstr_n(bufpos, reinterpret_cast<u_char*>(buf.get()), separator.size(),
|
||||
reinterpret_cast<const u_char*>(separator.c_str()));
|
||||
|
||||
if ( found == -1 ) {
|
||||
// we did not find it and have to search again in the next try. resize buffer....
|
||||
// we did not find it and have to search again in the next try.
|
||||
// but first check if we encountered the file end - because if we did this was it.
|
||||
if ( feof(arg_file) != 0 ) {
|
||||
if ( pos == 0 )
|
||||
if ( bufpos == 0 )
|
||||
return -1; // signal EOF - and that we had no more data.
|
||||
else {
|
||||
outbuf = std::move(buf); // buf is null after this
|
||||
return pos;
|
||||
return bufpos; // flush out remaining buffered data as line
|
||||
}
|
||||
}
|
||||
|
||||
repeats++;
|
||||
// bah, we cannot use realloc because we would have to change the delete in the manager
|
||||
// to a free.
|
||||
std::unique_ptr<char[]> newbuf = std::unique_ptr<char[]>(new char[block_size * repeats]);
|
||||
memcpy(newbuf.get(), buf.get(), block_size * (repeats - 1));
|
||||
// No separator found and buffer full, realloc and retry reading more right away.
|
||||
if ( bufpos == bufsize ) {
|
||||
std::unique_ptr<char[]> newbuf = std::unique_ptr<char[]>(new char[bufsize + block_size]);
|
||||
memcpy(newbuf.get(), buf.get(), bufsize);
|
||||
buf = std::move(newbuf);
|
||||
offset = block_size * (repeats - 1);
|
||||
bufsize = bufsize + block_size;
|
||||
}
|
||||
else {
|
||||
// Short or empty read, some data in the buffer, but no separator found
|
||||
// and also not EOF: This is likely reading from a pipe where the separator
|
||||
// wasn't yet produced. Yield to retry on the next heartbeat.
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
else {
|
||||
size_t sep_idx = static_cast<size_t>(found);
|
||||
assert(sep_idx <= bufsize - sep_length);
|
||||
size_t remaining = bufpos - sep_idx - sep_length;
|
||||
|
||||
outbuf = std::move(buf);
|
||||
|
||||
if ( found < pos ) {
|
||||
if ( remaining > 0 ) {
|
||||
// we have leftovers. copy them into the buffer for the next line
|
||||
assert(remaining <= block_size);
|
||||
buf = std::unique_ptr<char[]>(new char[block_size]);
|
||||
memcpy(buf.get(), outbuf.get() + found + sep_length, pos - found - sep_length);
|
||||
bufpos = pos - found - sep_length;
|
||||
bufpos = remaining;
|
||||
bufsize = block_size;
|
||||
|
||||
memcpy(buf.get(), outbuf.get() + sep_idx + sep_length, remaining);
|
||||
}
|
||||
|
||||
return found;
|
||||
return sep_idx;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,8 @@ private:
|
|||
std::string separator;
|
||||
unsigned int sep_length; // length of the separator
|
||||
|
||||
int bufpos;
|
||||
size_t bufpos; // Where in buf to read more data.
|
||||
size_t bufsize; // Currently allocated size of buf.
|
||||
std::unique_ptr<char[]> buf;
|
||||
std::unique_ptr<char[]> outbuf;
|
||||
|
||||
|
|
|
@ -1629,6 +1629,9 @@ ExprPtr AssignExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
|
|||
StmtPtr lhs_stmt;
|
||||
StmtPtr rhs_stmt;
|
||||
|
||||
if ( GetType()->Tag() == TYPE_ANY && op2->GetType()->Tag() != TYPE_ANY )
|
||||
op2 = with_location_of(make_intrusive<CoerceToAnyExpr>(op2), op2);
|
||||
|
||||
auto lhs_e = field_e->Op()->Reduce(c, lhs_stmt);
|
||||
auto rhs_e = op2->ReduceToFieldAssignment(c, rhs_stmt);
|
||||
|
||||
|
@ -3091,6 +3094,23 @@ CoerceToAnyExpr::CoerceToAnyExpr(ExprPtr arg_op) : UnaryExpr(EXPR_TO_ANY_COERCE,
|
|||
type = base_type(TYPE_ANY);
|
||||
}
|
||||
|
||||
bool CoerceToAnyExpr::IsReduced(Reducer* c) const { return HasReducedOps(c); }
|
||||
|
||||
ExprPtr CoerceToAnyExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
|
||||
if ( c->Optimizing() )
|
||||
op = c->UpdateExpr(op);
|
||||
|
||||
red_stmt = nullptr;
|
||||
|
||||
if ( ! op->IsSingleton(c) )
|
||||
op = op->ReduceToSingleton(c, red_stmt);
|
||||
|
||||
if ( c->Optimizing() )
|
||||
return ThisPtr();
|
||||
else
|
||||
return AssignToTemporary(c, red_stmt);
|
||||
}
|
||||
|
||||
ValPtr CoerceToAnyExpr::Fold(Val* v) const { return {NewRef{}, v}; }
|
||||
|
||||
ExprPtr CoerceToAnyExpr::Duplicate() { return SetSucc(new CoerceToAnyExpr(op->Duplicate())); }
|
||||
|
|
|
@ -73,7 +73,7 @@ void FixedCatArg::RenderInto(ZVal* zframe, int slot, char*& res) {
|
|||
n = modp_dtoa2(d, res, 6);
|
||||
res += n;
|
||||
|
||||
if ( util::approx_equal(d, nearbyint(d), 1e-9) && std::isfinite(d) && ! strchr(tmp, 'e') ) {
|
||||
if ( util::approx_equal(d, nearbyint(d), 1e-9) && std::isfinite(d) ) {
|
||||
// disambiguate from integer
|
||||
*(res++) = '.';
|
||||
*(res++) = '0';
|
||||
|
|
|
@ -42,7 +42,6 @@ public:
|
|||
|
||||
protected:
|
||||
TypePtr t;
|
||||
char tmp[256];
|
||||
};
|
||||
|
||||
class StringCatArg : public CatArg {
|
||||
|
|
|
@ -934,13 +934,13 @@ eval auto& vsel = frame[z.v2].vector_val->RawVec();
|
|||
auto& v1 = frame[z.v3].vector_val->RawVec();
|
||||
auto& v2 = frame[z.v4].vector_val->RawVec();
|
||||
auto n = v1.size();
|
||||
auto res = new vector<std::optional<ZVal>>(n);
|
||||
vector<std::optional<ZVal>> res(n);
|
||||
for ( auto i = 0U; i < n; ++i )
|
||||
if ( vsel[i] )
|
||||
(*res)[i] = vsel[i]->int_val ? v1[i] : v2[i];
|
||||
res[i] = vsel[i]->int_val ? v1[i] : v2[i];
|
||||
auto& full_res = frame[z.v1].vector_val;
|
||||
Unref(full_res);
|
||||
full_res = new VectorVal(cast_intrusive<VectorType>(z.t), res);
|
||||
full_res = new VectorVal(cast_intrusive<VectorType>(z.t), &res);
|
||||
|
||||
# Our instruction format doesn't accommodate two constants, so for
|
||||
# the singular case of a V ? C1 : C2 conditional, we split it into
|
||||
|
@ -1254,9 +1254,14 @@ macro AssignFromRec()
|
|||
for ( size_t i = 0U; i < n; ++i )
|
||||
{
|
||||
auto rhs_i = rhs->RawField(rhs_map[i]);
|
||||
auto& init_i = init_vals[lhs_map[i]];
|
||||
if ( is_managed[i] )
|
||||
{
|
||||
zeek::Ref(rhs_i.ManagedVal());
|
||||
init_vals[lhs_map[i]] = rhs_i;
|
||||
if ( init_i )
|
||||
ZVal::DeleteManagedType(*init_i);
|
||||
}
|
||||
init_i = rhs_i;
|
||||
}
|
||||
|
||||
op Construct-Known-Record-From
|
||||
|
@ -1561,7 +1566,9 @@ eval if ( frame[z.v2].vector_val->Size() > 0 )
|
|||
unary-expr-op To-Any-Coerce
|
||||
op-type X
|
||||
set-type $1
|
||||
eval AssignV1(ZVal(frame[z.v2].ToVal(z.t), ZAM::any_base_type))
|
||||
eval auto orig_lhs = frame[z.v1]; /* hold in case z.v1 = z.v2 */
|
||||
frame[z.v1] = ZVal($1.ToVal(z.t), ZAM::any_base_type);
|
||||
ZVal::DeleteManagedType(orig_lhs);
|
||||
|
||||
unary-expr-op From-Any-Coerce
|
||||
op-type X
|
||||
|
@ -1604,7 +1611,19 @@ op Any-Vector-Elem-Assign
|
|||
op1-read
|
||||
set-type $1
|
||||
type VVV
|
||||
eval EvalVectorElemAssign(, vv->Assign(ind, frame[z.v3].ToVal(z.t)))
|
||||
eval auto ind = frame[z.v2].AsCount();
|
||||
auto vv = frame[z.v1].AsVector();
|
||||
auto yt = vv->RawYieldTypes();
|
||||
if ( ind < vv->Size() && yt && (*yt)[ind] && ZVal::IsManagedType((*yt)[ind]) )
|
||||
{
|
||||
auto orig_elem = vv->RawVec()[ind];
|
||||
if ( ! vv->Assign(ind, frame[z.v3].ToVal(z.t)) )
|
||||
ZAM_run_time_error(z.loc, "value used but not set");
|
||||
if ( orig_elem )
|
||||
ZVal::DeleteManagedType(*orig_elem);
|
||||
}
|
||||
else if ( ! vv->Assign(ind, frame[z.v3].ToVal(z.t)) )
|
||||
ZAM_run_time_error(z.loc, "value used but not set");
|
||||
|
||||
op Vector-Elem-Assign-Any
|
||||
op1-read
|
||||
|
@ -2133,7 +2152,10 @@ type VVVV
|
|||
eval NextVectorIterCore(z.v3, v4)
|
||||
frame[z.v1].uint_val = si.iter;
|
||||
if ( z.is_managed )
|
||||
{
|
||||
ZVal::DeleteManagedType(frame[z.v2]);
|
||||
frame[z.v2] = BuildVal(vv[si.iter]->ToVal(z.t), z.t);
|
||||
}
|
||||
else
|
||||
frame[z.v2] = *vv[si.iter];
|
||||
si.IterFinished();
|
||||
|
@ -2145,7 +2167,10 @@ internal-op Next-Vector-Blank-Iter-Val-Var
|
|||
type VVV
|
||||
eval NextVectorIterCore(z.v2, v3)
|
||||
if ( z.is_managed )
|
||||
{
|
||||
ZVal::DeleteManagedType(frame[z.v1]);
|
||||
frame[z.v1] = BuildVal(vv[si.iter]->ToVal(z.t), z.t);
|
||||
}
|
||||
else
|
||||
frame[z.v1] = *vv[si.iter];
|
||||
si.IterFinished();
|
||||
|
@ -3068,11 +3093,11 @@ macro AnalyzerName(tag)
|
|||
auto atype = tag.ToVal(z.t);
|
||||
auto val = atype->AsEnumVal();
|
||||
Unref(frame[z.v1].string_val);
|
||||
plugin::Component* component = zeek::analyzer_mgr->Lookup(val);
|
||||
plugin::Component* component = zeek::analyzer_mgr->Lookup(val, false);
|
||||
if ( ! component )
|
||||
component = zeek::packet_mgr->Lookup(val);
|
||||
component = zeek::packet_mgr->Lookup(val, false);
|
||||
if ( ! component )
|
||||
component = zeek::file_mgr->Lookup(val);
|
||||
component = zeek::file_mgr->Lookup(val, false);
|
||||
if ( component )
|
||||
frame[z.v1].string_val = new StringVal(component->CanonicalName());
|
||||
else
|
||||
|
|
|
@ -214,8 +214,9 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
std::string err = "overflow promoting from "; \
|
||||
err += ov_err; \
|
||||
err += " arithmetic value"; \
|
||||
/* The run-time error will throw an exception, so recover intermediary memory. */ \
|
||||
delete res_zv; \
|
||||
ZAM_run_time_error(z.loc, err.c_str()); \
|
||||
res[i] = std::nullopt; \
|
||||
} \
|
||||
else \
|
||||
res[i] = ZVal(cast(vi)); \
|
||||
|
@ -326,6 +327,55 @@ std::shared_ptr<ProfVec> ZBody::BuildProfVec() const {
|
|||
return pv;
|
||||
}
|
||||
|
||||
// Helper class for managing ZBody state to ensure that memory is recovered
|
||||
// if a ZBody is exited via an exception.
|
||||
class ZBodyStateManager {
|
||||
public:
|
||||
// If fixed_frame is nil then creates a dynamic frame.
|
||||
ZBodyStateManager(ZVal* _fixed_frame, int frame_size, const std::vector<int>& _managed_slots,
|
||||
TableIterVec* _tiv_ptr)
|
||||
: fixed_frame(_fixed_frame), managed_slots(_managed_slots), tiv_ptr(_tiv_ptr) {
|
||||
if ( fixed_frame )
|
||||
frame = fixed_frame;
|
||||
else {
|
||||
frame = new ZVal[frame_size];
|
||||
for ( auto s : managed_slots )
|
||||
frame[s].ClearManagedVal();
|
||||
}
|
||||
}
|
||||
|
||||
void SetTableIters(TableIterVec* _tiv_ptr) { tiv_ptr = _tiv_ptr; }
|
||||
|
||||
~ZBodyStateManager() {
|
||||
if ( tiv_ptr )
|
||||
for ( auto& ti : *tiv_ptr )
|
||||
ti.Clear();
|
||||
|
||||
if ( fixed_frame ) {
|
||||
// Recover memory and reset for use in next call.
|
||||
for ( auto s : managed_slots ) {
|
||||
ZVal::DeleteManagedType(frame[s]);
|
||||
frame[s].ClearManagedVal();
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
// Recover memory, no need to reset.
|
||||
for ( auto s : managed_slots )
|
||||
ZVal::DeleteManagedType(frame[s]);
|
||||
delete[] frame;
|
||||
}
|
||||
}
|
||||
|
||||
auto Frame() { return frame; }
|
||||
|
||||
private:
|
||||
ZVal* fixed_frame;
|
||||
ZVal* frame;
|
||||
const std::vector<int>& managed_slots;
|
||||
TableIterVec* tiv_ptr;
|
||||
};
|
||||
|
||||
ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
||||
unsigned int pc = 0;
|
||||
|
||||
|
@ -358,22 +408,22 @@ ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
|||
}
|
||||
#endif
|
||||
|
||||
ZVal* frame;
|
||||
ZBodyStateManager state_mgr(fixed_frame, frame_size, managed_slots, &table_iters);
|
||||
std::unique_ptr<TableIterVec> local_table_iters;
|
||||
std::vector<StepIterInfo> step_iters(num_step_iters);
|
||||
|
||||
ZVal* frame;
|
||||
|
||||
if ( fixed_frame )
|
||||
frame = fixed_frame;
|
||||
else {
|
||||
frame = new ZVal[frame_size];
|
||||
// Clear slots for which we do explicit memory management.
|
||||
for ( auto s : managed_slots )
|
||||
frame[s].ClearManagedVal();
|
||||
frame = state_mgr.Frame();
|
||||
|
||||
if ( ! table_iters.empty() ) {
|
||||
local_table_iters = std::make_unique<TableIterVec>(table_iters.size());
|
||||
*local_table_iters = table_iters;
|
||||
tiv_ptr = &(*local_table_iters);
|
||||
state_mgr.SetTableIters(nullptr); // unique_ptr will clean it up directly
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -423,33 +473,6 @@ ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
|||
++pc;
|
||||
}
|
||||
|
||||
auto result = ret_type ? ret_u->ToVal(ret_type) : nullptr;
|
||||
|
||||
if ( fixed_frame ) {
|
||||
// Make sure we don't have any dangling iterators.
|
||||
for ( auto& ti : table_iters )
|
||||
ti.Clear();
|
||||
|
||||
// Free slots for which we do explicit memory management,
|
||||
// preparing them for reuse.
|
||||
for ( auto& ms : managed_slots ) {
|
||||
auto& v = frame[ms];
|
||||
ZVal::DeleteManagedType(v);
|
||||
v.ClearManagedVal();
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Free those slots for which we do explicit memory management.
|
||||
// No need to then clear them, as we're about to throw away
|
||||
// the entire frame.
|
||||
for ( auto& ms : managed_slots ) {
|
||||
auto& v = frame[ms];
|
||||
ZVal::DeleteManagedType(v);
|
||||
}
|
||||
|
||||
delete[] frame;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_ZAM_PROFILE
|
||||
if ( profiling_active ) {
|
||||
tot_CPU_time += util::curr_CPU_time() - start_CPU_time;
|
||||
|
@ -460,7 +483,7 @@ ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
|||
}
|
||||
#endif
|
||||
|
||||
return result;
|
||||
return ret_type ? ret_u->ToVal(ret_type) : nullptr;
|
||||
}
|
||||
|
||||
void ZBody::ReportExecutionProfile(ProfMap& pm) {
|
||||
|
@ -604,8 +627,7 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
|
||||
auto& vec2 = v2->RawVec();
|
||||
auto n = vec2.size();
|
||||
auto vec1_ptr = new vector<std::optional<ZVal>>(n);
|
||||
auto& vec1 = *vec1_ptr;
|
||||
vector<std::optional<ZVal>> vec1(n);
|
||||
|
||||
for ( auto i = 0U; i < n; ++i ) {
|
||||
if ( vec2[i] )
|
||||
|
@ -620,7 +642,7 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
|
||||
auto vt = cast_intrusive<VectorType>(std::move(t));
|
||||
auto old_v1 = v1;
|
||||
v1 = new VectorVal(std::move(vt), vec1_ptr);
|
||||
v1 = new VectorVal(std::move(vt), &vec1);
|
||||
Unref(old_v1);
|
||||
}
|
||||
|
||||
|
@ -631,8 +653,13 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
auto& vec2 = v2->RawVec();
|
||||
auto& vec3 = v3->RawVec();
|
||||
auto n = vec2.size();
|
||||
auto vec1_ptr = new vector<std::optional<ZVal>>(n);
|
||||
auto& vec1 = *vec1_ptr;
|
||||
|
||||
if ( vec3.size() != n ) {
|
||||
ZAM_run_time_error(util::fmt("vector operands are of different sizes (%d vs. %d)", int(n), int(vec3.size())));
|
||||
return;
|
||||
}
|
||||
|
||||
vector<std::optional<ZVal>> vec1(n);
|
||||
|
||||
for ( auto i = 0U; i < vec2.size(); ++i ) {
|
||||
if ( vec2[i] && vec3[i] )
|
||||
|
@ -647,7 +674,7 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
|
||||
auto vt = cast_intrusive<VectorType>(std::move(t));
|
||||
auto old_v1 = v1;
|
||||
v1 = new VectorVal(std::move(vt), vec1_ptr);
|
||||
v1 = new VectorVal(std::move(vt), &vec1);
|
||||
Unref(old_v1);
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <glob.h>
|
||||
|
||||
#include <exception>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
|
@ -32,6 +33,7 @@
|
|||
#include "zeek/spicy/file-analyzer.h"
|
||||
#include "zeek/spicy/packet-analyzer.h"
|
||||
#include "zeek/spicy/protocol-analyzer.h"
|
||||
#include "zeek/spicy/runtime-support.h"
|
||||
#include "zeek/zeek-config-paths.h"
|
||||
|
||||
using namespace zeek;
|
||||
|
@ -74,9 +76,13 @@ void Manager::registerProtocolAnalyzer(const std::string& name, hilti::rt::Proto
|
|||
info.name_zeek = hilti::rt::replace(name, "::", "_");
|
||||
info.name_zeekygen = hilti::rt::fmt("<Spicy-%s>", name);
|
||||
info.protocol = proto;
|
||||
info.ports = ports;
|
||||
info.linker_scope = linker_scope;
|
||||
|
||||
// Store ports in a deterministic order. We can't (easily) sort the
|
||||
// `hilti::rt::Vector` unfortunately.
|
||||
std::copy(ports.begin(), ports.end(), std::back_inserter(info.ports));
|
||||
std::sort(info.ports.begin(), info.ports.end());
|
||||
|
||||
// We may have that analyzer already iff it was previously pre-registered
|
||||
// without a linker scope. We'll then only set the scope now.
|
||||
if ( auto t = _analyzer_name_to_tag_type.find(info.name_zeek); t != _analyzer_name_to_tag_type.end() ) {
|
||||
|
@ -587,25 +593,25 @@ static ::TransportProto transport_protocol(const hilti::rt::Port port) {
|
|||
}
|
||||
|
||||
static void hook_accept_input() {
|
||||
auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("confirming protocol %s", tag.AsString()));
|
||||
return x->analyzer->AnalyzerConfirmation(tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void hook_decline_input(const std::string& reason) {
|
||||
auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("rejecting protocol %s: %s", tag.AsString(), reason));
|
||||
return x->analyzer->AnalyzerViolation(reason.c_str(), nullptr, 0, tag);
|
||||
}
|
||||
}
|
||||
else
|
||||
SPICY_DEBUG(hilti::rt::fmt("attempting to reject protocol without cookie: %s", reason));
|
||||
}
|
||||
|
||||
void Manager::InitPostScript() {
|
||||
|
@ -701,14 +707,25 @@ void Manager::InitPostScript() {
|
|||
if ( ! tag )
|
||||
reporter->InternalError("cannot get analyzer tag for '%s'", p.name_analyzer.c_str());
|
||||
|
||||
auto register_analyzer_for_port = [&](auto tag, const hilti::rt::Port& port_) {
|
||||
SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port_));
|
||||
|
||||
// Well-known ports are registered in scriptland, so we'll raise an
|
||||
// event that will do it for us through a predefined handler.
|
||||
zeek::Args vals = Args();
|
||||
vals.emplace_back(tag.AsVal());
|
||||
vals.emplace_back(zeek::spicy::rt::to_val(port_, base_type(TYPE_PORT)));
|
||||
EventHandlerPtr handler = event_registry->Register("spicy_analyzer_for_port");
|
||||
event_mgr.Enqueue(handler, vals);
|
||||
};
|
||||
|
||||
for ( const auto& ports : p.ports ) {
|
||||
const auto proto = ports.begin.protocol();
|
||||
|
||||
// Port ranges are closed intervals.
|
||||
for ( auto port = ports.begin.port(); port <= ports.end.port(); ++port ) {
|
||||
const auto port_ = hilti::rt::Port(port, proto);
|
||||
SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port_));
|
||||
analyzer_mgr->RegisterAnalyzerForPort(tag, transport_protocol(port_), port);
|
||||
register_analyzer_for_port(tag, port_);
|
||||
|
||||
// Don't double register in case of single-port ranges.
|
||||
if ( ports.begin.port() == ports.end.port() )
|
||||
|
@ -727,7 +744,7 @@ void Manager::InitPostScript() {
|
|||
continue;
|
||||
|
||||
SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port.port));
|
||||
analyzer_mgr->RegisterAnalyzerForPort(tag, transport_protocol(port.port), port.port.port());
|
||||
register_analyzer_for_port(tag, port.port);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ public:
|
|||
*
|
||||
* @param name name of the analyzer as defined in its EVT file
|
||||
* @param proto analyzer's transport-layer protocol
|
||||
* @param prts well-known ports for the analyzer; it'll be activated automatically for these
|
||||
* @param ports well-known ports for the analyzer; it'll be activated automatically for these
|
||||
* @param parser_orig name of the Spicy parser for the originator side; must match the name that
|
||||
* Spicy registers the unit's parser with
|
||||
* @param parser_resp name of the Spicy parser for the originator side; must match the name that
|
||||
|
@ -343,7 +343,7 @@ private:
|
|||
std::string name_parser_resp;
|
||||
std::string name_replaces;
|
||||
hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef;
|
||||
hilti::rt::Vector<::zeek::spicy::rt::PortRange> ports;
|
||||
std::vector<::zeek::spicy::rt::PortRange> ports; // we keep this sorted
|
||||
std::string linker_scope;
|
||||
|
||||
// Computed and available once the analyzer has been registered.
|
||||
|
|
|
@ -19,6 +19,11 @@ struct PortRange {
|
|||
|
||||
hilti::rt::Port begin; /**< first port in the range */
|
||||
hilti::rt::Port end; /**< last port in the range */
|
||||
|
||||
bool operator<(const PortRange& other) const {
|
||||
// Just get us a deterministic order.
|
||||
return std::tie(begin, end) < std::tie(other.begin, other.end);
|
||||
}
|
||||
};
|
||||
|
||||
inline bool operator==(const PortRange& a, const PortRange& b) {
|
||||
|
|
|
@ -223,9 +223,8 @@ TypePtr rt::event_arg_type(const EventHandlerPtr& handler, const hilti::rt::inte
|
|||
|
||||
ValPtr& rt::current_conn() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_conn");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( cookie->cache.conn )
|
||||
return cookie->cache.conn;
|
||||
|
||||
|
@ -233,15 +232,15 @@ ValPtr& rt::current_conn() {
|
|||
cookie->cache.conn = x->analyzer->Conn()->GetVal();
|
||||
return cookie->cache.conn;
|
||||
}
|
||||
else
|
||||
}
|
||||
|
||||
throw ValueUnavailable("$conn not available");
|
||||
}
|
||||
|
||||
ValPtr& rt::current_is_orig() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_is_orig");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( cookie->cache.is_orig )
|
||||
return cookie->cache.is_orig;
|
||||
|
||||
|
@ -249,21 +248,22 @@ ValPtr& rt::current_is_orig() {
|
|||
cookie->cache.is_orig = val_mgr->Bool(x->is_orig);
|
||||
return cookie->cache.is_orig;
|
||||
}
|
||||
else
|
||||
}
|
||||
|
||||
throw ValueUnavailable("$is_orig not available");
|
||||
}
|
||||
|
||||
void rt::debug(const std::string& msg) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/debug");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
if ( ! cookie )
|
||||
return SPICY_DEBUG(msg);
|
||||
|
||||
rt::debug(*cookie, msg);
|
||||
}
|
||||
|
||||
void rt::debug(const Cookie& cookie, const std::string& msg) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/debug");
|
||||
std::string name;
|
||||
std::string id;
|
||||
|
||||
if ( const auto p = cookie.protocol ) {
|
||||
auto name = p->analyzer->GetAnalyzerName();
|
||||
|
@ -285,11 +285,13 @@ void rt::debug(const Cookie& cookie, const std::string& msg) {
|
|||
inline rt::cookie::FileStateStack* _file_state_stack(rt::Cookie* cookie) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/file_state_stack");
|
||||
|
||||
if ( cookie ) {
|
||||
if ( auto c = cookie->protocol )
|
||||
return c->is_orig ? &c->fstate_orig : &c->fstate_resp;
|
||||
else if ( auto f = cookie->file )
|
||||
return &f->fstate;
|
||||
else
|
||||
}
|
||||
|
||||
throw rt::ValueUnavailable("no current connection or file available");
|
||||
}
|
||||
|
||||
|
@ -313,24 +315,23 @@ inline const rt::cookie::FileState* _file_state(rt::Cookie* cookie, std::optiona
|
|||
|
||||
ValPtr rt::current_file() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_file");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->file )
|
||||
return x->analyzer->GetFile()->ToVal();
|
||||
else if ( auto* fstate = _file_state(cookie, {}) ) {
|
||||
if ( auto* f = file_mgr->LookupFile(fstate->fid) )
|
||||
return f->ToVal();
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("$file not available");
|
||||
}
|
||||
|
||||
ValPtr rt::current_packet() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_packet");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->packet ) {
|
||||
if ( ! c->packet_val )
|
||||
// We cache the built value in case we need it multiple times.
|
||||
|
@ -338,32 +339,33 @@ ValPtr rt::current_packet() {
|
|||
|
||||
return c->packet_val;
|
||||
}
|
||||
else
|
||||
}
|
||||
|
||||
throw ValueUnavailable("$packet not available");
|
||||
}
|
||||
|
||||
hilti::rt::Bool rt::is_orig() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/is_orig");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol )
|
||||
return x->is_orig;
|
||||
else
|
||||
}
|
||||
|
||||
throw ValueUnavailable("is_orig() not available in current context");
|
||||
}
|
||||
|
||||
std::string rt::uid() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/uid");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->protocol ) {
|
||||
// Retrieve the ConnVal() so that we ensure the UID has been set.
|
||||
c->analyzer->ConnVal();
|
||||
return c->analyzer->Conn()->GetUID().Base62("C");
|
||||
}
|
||||
else
|
||||
}
|
||||
|
||||
throw ValueUnavailable("uid() not available in current context");
|
||||
}
|
||||
|
||||
|
@ -395,49 +397,48 @@ std::tuple<hilti::rt::Address, hilti::rt::Port, hilti::rt::Address, hilti::rt::P
|
|||
hilti::rt::cannot_be_reached();
|
||||
};
|
||||
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->protocol ) {
|
||||
const auto* conn = c->analyzer->Conn();
|
||||
return std::make_tuple(convert_address(conn->OrigAddr()), convert_port(conn->OrigPort(), conn->ConnTransport()),
|
||||
return std::make_tuple(convert_address(conn->OrigAddr()),
|
||||
convert_port(conn->OrigPort(), conn->ConnTransport()),
|
||||
convert_address(conn->RespAddr()),
|
||||
convert_port(conn->RespPort(), conn->ConnTransport()));
|
||||
}
|
||||
else
|
||||
}
|
||||
|
||||
throw ValueUnavailable("conn_id() not available in current context");
|
||||
}
|
||||
|
||||
void rt::flip_roles() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/flip_roles");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
rt::debug(*cookie, "flipping roles");
|
||||
|
||||
if ( auto x = cookie->protocol )
|
||||
x->analyzer->Conn()->FlipRoles();
|
||||
else
|
||||
return x->analyzer->Conn()->FlipRoles();
|
||||
}
|
||||
|
||||
throw ValueUnavailable("flip_roles() not available in current context");
|
||||
}
|
||||
|
||||
hilti::rt::integer::safe<uint64_t> rt::number_packets() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/number_packets");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol ) {
|
||||
return x->num_packets;
|
||||
}
|
||||
else
|
||||
}
|
||||
|
||||
throw ValueUnavailable("number_packets() not available in current context");
|
||||
}
|
||||
|
||||
void rt::confirm_protocol() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/confirm_protocol");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( cookie->cache.confirmed )
|
||||
return;
|
||||
|
||||
|
@ -447,6 +448,8 @@ void rt::confirm_protocol() {
|
|||
cookie->cache.confirmed = true;
|
||||
return x->analyzer->AnalyzerConfirmation(tag);
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("no current connection available");
|
||||
}
|
||||
|
||||
|
@ -471,17 +474,16 @@ void rt::reject_protocol(const std::string& reason) {
|
|||
|
||||
void rt::weird(const std::string& id, const std::string& addl) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/weird");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( const auto x = cookie->protocol )
|
||||
x->analyzer->Weird(id.c_str(), addl.data());
|
||||
return x->analyzer->Weird(id.c_str(), addl.data());
|
||||
else if ( const auto x = cookie->file )
|
||||
zeek::reporter->Weird(x->analyzer->GetFile(), id.c_str(), addl.data());
|
||||
else if ( const auto x = cookie->packet ) {
|
||||
x->analyzer->Weird(id.c_str(), x->packet, addl.c_str());
|
||||
return zeek::reporter->Weird(x->analyzer->GetFile(), id.c_str(), addl.data());
|
||||
else if ( const auto x = cookie->packet )
|
||||
return x->analyzer->Weird(id.c_str(), x->packet, addl.c_str());
|
||||
}
|
||||
else
|
||||
|
||||
throw ValueUnavailable("none of $conn, $file, or $packet available for weird reporting");
|
||||
}
|
||||
|
||||
|
@ -499,11 +501,11 @@ void rt::protocol_begin(const std::optional<std::string>& analyzer, const ::hilt
|
|||
// doesn't need to track what the other side already did.
|
||||
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( proto.value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -547,12 +549,12 @@ void rt::protocol_begin(const ::hilti::rt::Protocol& proto) { return protocol_be
|
|||
|
||||
rt::ProtocolHandle rt::protocol_handle_get_or_create(const std::string& analyzer, const ::hilti::rt::Protocol& proto) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_handle_get_or_create");
|
||||
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( proto.value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -623,11 +625,11 @@ static void protocol_data_in(const hilti::rt::Bool& is_orig, const hilti::rt::By
|
|||
const std::optional<rt::ProtocolHandle>& h) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_data_in");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
// We need to copy the data here to be on the safe side: the streaming
|
||||
// input methods expect the data to stay around until they return. At first
|
||||
|
@ -719,11 +721,11 @@ void rt::protocol_gap(const hilti::rt::Bool& is_orig, const hilti::rt::integer::
|
|||
const hilti::rt::integer::safe<uint64_t>& len, const std::optional<rt::ProtocolHandle>& h) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_gap");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( h->protocol().value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -761,25 +763,25 @@ void rt::protocol_gap(const hilti::rt::Bool& is_orig, const hilti::rt::integer::
|
|||
|
||||
void rt::protocol_end() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_end");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
for ( const auto& i : c->analyzer->GetChildren() )
|
||||
c->analyzer->RemoveChildAnalyzer(i);
|
||||
}
|
||||
}
|
||||
|
||||
void rt::protocol_handle_close(const ProtocolHandle& handle) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_handle_close");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( handle.protocol().value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -827,7 +829,8 @@ rt::cookie::FileState* rt::cookie::FileStateStack::push(std::optional<std::strin
|
|||
fid = *fid_provided;
|
||||
else {
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
if ( ! cookie )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
if ( auto c = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(c->analyzer->GetAnalyzerTag());
|
||||
|
@ -899,39 +902,39 @@ static void _data_in(const char* data, uint64_t len, std::optional<uint64_t> off
|
|||
|
||||
void rt::terminate_session() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/terminate_session");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->protocol ) {
|
||||
assert(session_mgr);
|
||||
session_mgr->Remove(c->analyzer->Conn());
|
||||
return session_mgr->Remove(c->analyzer->Conn());
|
||||
}
|
||||
else
|
||||
}
|
||||
|
||||
throw spicy::rt::ValueUnavailable("terminate_session() not available in the current context");
|
||||
}
|
||||
|
||||
void rt::skip_input() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/skip_input");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto p = cookie->protocol )
|
||||
p->analyzer->SetSkip(true);
|
||||
return p->analyzer->SetSkip(true);
|
||||
else if ( auto f = cookie->file )
|
||||
f->analyzer->SetSkip(true);
|
||||
else
|
||||
return f->analyzer->SetSkip(true);
|
||||
}
|
||||
|
||||
throw spicy::rt::ValueUnavailable("skip() not available in the current context");
|
||||
}
|
||||
|
||||
std::string rt::fuid() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/fuid");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto f = cookie->file ) {
|
||||
if ( auto file = f->analyzer->GetFile() )
|
||||
return file->GetID();
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("fuid() not available in current context");
|
||||
}
|
||||
|
@ -1003,6 +1006,9 @@ void rt::file_gap(const hilti::rt::integer::safe<uint64_t>& offset, const hilti:
|
|||
const std::optional<std::string>& fid) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/file_gap");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
if ( ! cookie )
|
||||
throw spicy::rt::ValueUnavailable("file_gap() not available in the current context");
|
||||
|
||||
auto* fstate = _file_state(cookie, fid);
|
||||
|
||||
if ( auto c = cookie->protocol ) {
|
||||
|
@ -1024,12 +1030,14 @@ void rt::file_end(const std::optional<std::string>& fid) {
|
|||
|
||||
void rt::forward_packet(const hilti::rt::integer::safe<uint32_t>& identifier) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/forward_packet");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto c = cookie->packet )
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->packet ) {
|
||||
c->next_analyzer = identifier;
|
||||
else
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("no current packet analyzer available");
|
||||
}
|
||||
|
||||
|
|
|
@ -4,10 +4,8 @@
|
|||
|
||||
#include <getopt.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
|
@ -42,11 +40,10 @@ struct VisitorTypes : public spicy::visitor::PreOrder {
|
|||
module = {};
|
||||
return;
|
||||
}
|
||||
|
||||
module = n->scopeID();
|
||||
path = n->uid().path;
|
||||
|
||||
if ( is_resolved )
|
||||
if ( is_resolved && ! n->skipImplementation() )
|
||||
glue->addSpicyModule(module, path);
|
||||
}
|
||||
|
||||
|
|
|
@ -1375,7 +1375,7 @@ bool GlueCompiler::CreateSpicyHook(glue::Event* ev) {
|
|||
|
||||
auto attrs = builder()->attributeSet({builder()->attribute("&priority", builder()->integer(ev->priority))});
|
||||
auto parameters = hilti::util::transform(ev->parameters, [](const auto& p) { return p.get(); });
|
||||
auto unit_hook = builder()->declarationHook(parameters, body.block(), ::spicy::Engine::All, attrs, meta);
|
||||
auto unit_hook = builder()->declarationHook(parameters, body.block(), attrs, meta);
|
||||
auto hook_decl = builder()->declarationUnitHook(ev->hook, unit_hook, meta);
|
||||
ev->spicy_module->spicy_module->add(context(), hook_decl);
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ zeek_add_subdir_library(
|
|||
ProcessStats.cc
|
||||
Utils.cc
|
||||
BIFS
|
||||
consts.bif
|
||||
telemetry.bif)
|
||||
|
||||
# We don't need to include the civetweb headers across the whole project, only
|
||||
|
|
|
@ -2,27 +2,17 @@
|
|||
|
||||
using namespace zeek::telemetry;
|
||||
|
||||
Counter::Counter(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
||||
: handle(family->Add(labels)), labels(labels) {
|
||||
if ( callback ) {
|
||||
handle.AddCollectCallback(std::move(callback));
|
||||
has_callback = true;
|
||||
}
|
||||
}
|
||||
Counter::Counter(FamilyType* family, const prometheus::Labels& labels, detail::CollectCallbackPtr callback) noexcept
|
||||
: family(family), handle(family->Add(labels)), labels(labels), callback(std::move(callback)) {}
|
||||
|
||||
double Counter::Value() const noexcept {
|
||||
if ( has_callback ) {
|
||||
// Use Collect() here instead of Value() to correctly handle metrics with
|
||||
// callbacks.
|
||||
auto metric = handle.Collect();
|
||||
return metric.counter.value;
|
||||
}
|
||||
if ( callback )
|
||||
return callback();
|
||||
|
||||
return handle.Value();
|
||||
}
|
||||
|
||||
std::shared_ptr<Counter> CounterFamily::GetOrAdd(Span<const LabelView> labels,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
std::shared_ptr<Counter> CounterFamily::GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback) {
|
||||
prometheus::Labels p_labels = detail::BuildPrometheusLabels(labels);
|
||||
|
||||
auto check = [&](const std::shared_ptr<Counter>& counter) { return counter->CompareLabels(p_labels); };
|
||||
|
@ -36,6 +26,15 @@ std::shared_ptr<Counter> CounterFamily::GetOrAdd(Span<const LabelView> labels,
|
|||
}
|
||||
|
||||
std::shared_ptr<Counter> CounterFamily::GetOrAdd(std::initializer_list<LabelView> labels,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
detail::CollectCallbackPtr callback) {
|
||||
return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback));
|
||||
}
|
||||
|
||||
void CounterFamily::RunCallbacks() {
|
||||
for ( auto& c : counters ) {
|
||||
if ( c->HasCallback() ) {
|
||||
double val = c->RunCallback();
|
||||
c->Set(val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/family.h>
|
||||
#include <cstdint>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
|
||||
|
@ -15,6 +14,12 @@
|
|||
|
||||
namespace zeek::telemetry {
|
||||
|
||||
namespace detail {
|
||||
using CollectCallbackPtr = std::function<double()>;
|
||||
}
|
||||
|
||||
class CounterFamily;
|
||||
|
||||
/**
|
||||
* A handle to a metric that can only go up.
|
||||
*/
|
||||
|
@ -26,7 +31,7 @@ public:
|
|||
using FamilyType = prometheus::Family<Handle>;
|
||||
|
||||
explicit Counter(FamilyType* family, const prometheus::Labels& labels,
|
||||
prometheus::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
detail::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
|
||||
/**
|
||||
* Increments the value by 1.
|
||||
|
@ -55,10 +60,21 @@ public:
|
|||
|
||||
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
||||
|
||||
bool HasCallback() const noexcept { return callback != nullptr; }
|
||||
double RunCallback() const { return callback(); }
|
||||
|
||||
private:
|
||||
friend class CounterFamily;
|
||||
void Set(double val) {
|
||||
// Counter has no Set(), but we can fake it.
|
||||
handle.Reset();
|
||||
handle.Increment(val);
|
||||
}
|
||||
|
||||
FamilyType* family = nullptr;
|
||||
Handle& handle;
|
||||
prometheus::Labels labels;
|
||||
bool has_callback = false;
|
||||
detail::CollectCallbackPtr callback;
|
||||
};
|
||||
|
||||
using CounterPtr = std::shared_ptr<Counter>;
|
||||
|
@ -74,15 +90,17 @@ public:
|
|||
* Returns the metrics handle for given labels, creating a new instance
|
||||
* lazily if necessary.
|
||||
*/
|
||||
CounterPtr GetOrAdd(Span<const LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
CounterPtr GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
/**
|
||||
* @copydoc GetOrAdd
|
||||
*/
|
||||
CounterPtr GetOrAdd(std::initializer_list<LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
CounterPtr GetOrAdd(std::initializer_list<LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
zeek_int_t MetricType() const noexcept override { return BifEnum::Telemetry::MetricType::COUNTER; }
|
||||
|
||||
void RunCallbacks() override;
|
||||
|
||||
private:
|
||||
prometheus::Family<prometheus::Counter>* family;
|
||||
std::vector<CounterPtr> counters;
|
||||
|
|
|
@ -3,26 +3,16 @@
|
|||
using namespace zeek::telemetry;
|
||||
|
||||
double Gauge::Value() const noexcept {
|
||||
if ( has_callback ) {
|
||||
// Use Collect() here instead of Value() to correctly handle metrics
|
||||
// with callbacks.
|
||||
auto metric = handle.Collect();
|
||||
return metric.gauge.value;
|
||||
}
|
||||
if ( callback )
|
||||
return callback();
|
||||
|
||||
return handle.Value();
|
||||
}
|
||||
|
||||
Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, detail::CollectCallbackPtr callback) noexcept
|
||||
: family(family), handle(family->Add(labels)), labels(labels), callback(std::move(callback)) {}
|
||||
|
||||
Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
||||
: handle(family->Add(labels)), labels(labels) {
|
||||
if ( callback ) {
|
||||
handle.AddCollectCallback(std::move(callback));
|
||||
has_callback = true;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(Span<const LabelView> labels, prometheus::CollectCallbackPtr callback) {
|
||||
std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback) {
|
||||
prometheus::Labels p_labels = detail::BuildPrometheusLabels(labels);
|
||||
|
||||
auto check = [&](const std::shared_ptr<Gauge>& gauge) { return gauge->CompareLabels(p_labels); };
|
||||
|
@ -36,6 +26,13 @@ std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(Span<const LabelView> labels, prome
|
|||
}
|
||||
|
||||
std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(std::initializer_list<LabelView> labels,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
detail::CollectCallbackPtr callback) {
|
||||
return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback));
|
||||
}
|
||||
|
||||
void GaugeFamily::RunCallbacks() {
|
||||
for ( const auto& g : gauges ) {
|
||||
if ( g->HasCallback() )
|
||||
g->Set(g->RunCallback());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
#include <prometheus/family.h>
|
||||
#include <prometheus/gauge.h>
|
||||
#include <cstdint>
|
||||
#include <unistd.h>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
|
||||
|
@ -15,6 +15,10 @@
|
|||
|
||||
namespace zeek::telemetry {
|
||||
|
||||
namespace detail {
|
||||
using CollectCallbackPtr = std::function<double()>;
|
||||
}
|
||||
|
||||
/**
|
||||
* A handle to a metric that can count up and down.
|
||||
*/
|
||||
|
@ -26,7 +30,7 @@ public:
|
|||
using FamilyType = prometheus::Family<Handle>;
|
||||
|
||||
explicit Gauge(FamilyType* family, const prometheus::Labels& labels,
|
||||
prometheus::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
detail::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
|
||||
/**
|
||||
* Increments the value by 1.
|
||||
|
@ -57,6 +61,11 @@ public:
|
|||
*/
|
||||
void Dec(double amount) noexcept { handle.Decrement(amount); }
|
||||
|
||||
/**
|
||||
* Set the value by @p val.
|
||||
*/
|
||||
void Set(double val) noexcept { handle.Set(val); }
|
||||
|
||||
/**
|
||||
* Decrements the value by 1.
|
||||
* @return The new value.
|
||||
|
@ -73,10 +82,14 @@ public:
|
|||
|
||||
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
||||
|
||||
bool HasCallback() const noexcept { return callback != nullptr; }
|
||||
double RunCallback() const { return callback(); }
|
||||
|
||||
private:
|
||||
FamilyType* family = nullptr;
|
||||
Handle& handle;
|
||||
prometheus::Labels labels;
|
||||
bool has_callback = false;
|
||||
detail::CollectCallbackPtr callback;
|
||||
};
|
||||
|
||||
using GaugePtr = std::shared_ptr<Gauge>;
|
||||
|
@ -89,18 +102,20 @@ public:
|
|||
* Returns the metrics handle for given labels, creating a new instance
|
||||
* lazily if necessary.
|
||||
*/
|
||||
GaugePtr GetOrAdd(Span<const LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
GaugePtr GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
/**
|
||||
* @copydoc GetOrAdd
|
||||
*/
|
||||
GaugePtr GetOrAdd(std::initializer_list<LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
GaugePtr GetOrAdd(std::initializer_list<LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
zeek_int_t MetricType() const noexcept override { return BifEnum::Telemetry::MetricType::GAUGE; }
|
||||
|
||||
GaugeFamily(prometheus::Family<prometheus::Gauge>* family, Span<const std::string_view> labels)
|
||||
: MetricFamily(labels), family(family) {}
|
||||
|
||||
void RunCallbacks() override;
|
||||
|
||||
private:
|
||||
prometheus::Family<prometheus::Gauge>* family;
|
||||
std::vector<GaugePtr> gauges;
|
||||
|
|
|
@ -66,6 +66,8 @@ public:
|
|||
|
||||
zeek_int_t MetricType() const noexcept override { return BifEnum::Telemetry::MetricType::HISTOGRAM; }
|
||||
|
||||
void RunCallbacks() override {}
|
||||
|
||||
private:
|
||||
prometheus::Family<prometheus::Histogram>* family;
|
||||
prometheus::Histogram::BucketBoundaries boundaries;
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
// CivetServer is from the civetweb submodule in prometheus-cpp
|
||||
#include <CivetServer.h>
|
||||
#include <prometheus/collectable.h>
|
||||
#include <prometheus/exposer.h>
|
||||
#include <prometheus/registry.h>
|
||||
#include <rapidjson/document.h>
|
||||
|
@ -16,19 +17,32 @@
|
|||
|
||||
#include "zeek/3rdparty/doctest.h"
|
||||
#include "zeek/ID.h"
|
||||
#include "zeek/RunState.h"
|
||||
#include "zeek/ZeekString.h"
|
||||
#include "zeek/broker/Manager.h"
|
||||
#include "zeek/iosource/Manager.h"
|
||||
#include "zeek/telemetry/ProcessStats.h"
|
||||
#include "zeek/telemetry/Timer.h"
|
||||
#include "zeek/telemetry/consts.bif.h"
|
||||
#include "zeek/telemetry/telemetry.bif.h"
|
||||
#include "zeek/threading/formatters/detail/json.h"
|
||||
|
||||
namespace zeek::telemetry {
|
||||
|
||||
Manager::Manager() { prometheus_registry = std::make_shared<prometheus::Registry>(); }
|
||||
/**
|
||||
* Prometheus Collectable interface used to insert Zeek callback processing
|
||||
* before the Prometheus registry's collection of metric data.
|
||||
*/
|
||||
class ZeekCollectable : public prometheus::Collectable {
|
||||
public:
|
||||
std::vector<prometheus::MetricFamily> Collect() const override {
|
||||
telemetry_mgr->WaitForPrometheusCallbacks();
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
Manager::Manager() : IOSource(true) { prometheus_registry = std::make_shared<prometheus::Registry>(); }
|
||||
|
||||
// This can't be defined as =default because of the use of unique_ptr with a forward-declared type
|
||||
// in Manager.h
|
||||
Manager::~Manager() {}
|
||||
|
||||
void Manager::InitPostScript() {
|
||||
|
@ -75,7 +89,9 @@ void Manager::InitPostScript() {
|
|||
|
||||
if ( ! getenv("ZEEKCTL_CHECK_CONFIG") ) {
|
||||
try {
|
||||
prometheus_exposer = std::make_unique<prometheus::Exposer>(prometheus_url, 2, callbacks);
|
||||
prometheus_exposer =
|
||||
std::make_unique<prometheus::Exposer>(prometheus_url, BifConst::Telemetry::civetweb_threads,
|
||||
callbacks);
|
||||
|
||||
// CivetWeb stores a copy of the callbacks, so we're safe to delete the pointer here
|
||||
delete callbacks;
|
||||
|
@ -84,6 +100,13 @@ void Manager::InitPostScript() {
|
|||
prometheus_url.c_str());
|
||||
}
|
||||
|
||||
// This has to be inserted before the registry below. The exposer
|
||||
// processes the collectors in order of insertion. We want to make
|
||||
// sure that the callbacks get called and the values in the metrics
|
||||
// are updated before prometheus-cpp scrapes them.
|
||||
zeek_collectable = std::make_shared<ZeekCollectable>();
|
||||
prometheus_exposer->RegisterCollectable(zeek_collectable);
|
||||
|
||||
prometheus_exposer->RegisterCollectable(prometheus_registry);
|
||||
}
|
||||
}
|
||||
|
@ -99,37 +122,32 @@ void Manager::InitPostScript() {
|
|||
return &this->current_process_stats;
|
||||
};
|
||||
rss_gauge = GaugeInstance("process", "resident_memory", {}, "Resident memory size", "bytes",
|
||||
[]() -> prometheus::ClientMetric {
|
||||
auto* s = get_stats();
|
||||
prometheus::ClientMetric metric;
|
||||
metric.gauge.value = static_cast<double>(s->rss);
|
||||
return metric;
|
||||
});
|
||||
[]() { return static_cast<double>(get_stats()->rss); });
|
||||
|
||||
vms_gauge = GaugeInstance("process", "virtual_memory", {}, "Virtual memory size", "bytes",
|
||||
[]() -> prometheus::ClientMetric {
|
||||
auto* s = get_stats();
|
||||
prometheus::ClientMetric metric;
|
||||
metric.gauge.value = static_cast<double>(s->vms);
|
||||
return metric;
|
||||
});
|
||||
[]() { return static_cast<double>(get_stats()->vms); });
|
||||
|
||||
cpu_gauge = GaugeInstance("process", "cpu", {}, "Total user and system CPU time spent", "seconds",
|
||||
[]() -> prometheus::ClientMetric {
|
||||
auto* s = get_stats();
|
||||
prometheus::ClientMetric metric;
|
||||
metric.gauge.value = s->cpu;
|
||||
return metric;
|
||||
});
|
||||
[]() { return get_stats()->cpu; });
|
||||
|
||||
fds_gauge = GaugeInstance("process", "open_fds", {}, "Number of open file descriptors", "",
|
||||
[]() -> prometheus::ClientMetric {
|
||||
auto* s = get_stats();
|
||||
prometheus::ClientMetric metric;
|
||||
metric.gauge.value = static_cast<double>(s->fds);
|
||||
return metric;
|
||||
});
|
||||
[]() { return static_cast<double>(get_stats()->fds); });
|
||||
#endif
|
||||
|
||||
iosource_mgr->RegisterFd(collector_flare.FD(), this);
|
||||
}
|
||||
|
||||
void Manager::Terminate() {
|
||||
// Notify the collector condition so that it doesn't hang waiting for
|
||||
// a collector request to complete.
|
||||
collector_cv.notify_all();
|
||||
|
||||
// Shut down the exposer first of all so we stop getting requests for
|
||||
// data. This keeps us from getting a request on another thread while
|
||||
// we're shutting down.
|
||||
prometheus_exposer.reset();
|
||||
|
||||
iosource_mgr->UnregisterFd(collector_flare.FD(), this);
|
||||
}
|
||||
|
||||
// -- collect metric stuff -----------------------------------------------------
|
||||
|
@ -453,7 +471,7 @@ CounterFamilyPtr Manager::CounterFamily(std::string_view prefix, std::string_vie
|
|||
|
||||
CounterPtr Manager::CounterInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
||||
std::string_view helptext, std::string_view unit,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
detail::CollectCallbackPtr callback) {
|
||||
return WithLabelNames(labels, [&, this](auto labelNames) {
|
||||
auto family = CounterFamily(prefix, name, labelNames, helptext, unit);
|
||||
return family->GetOrAdd(labels, callback);
|
||||
|
@ -462,7 +480,7 @@ CounterPtr Manager::CounterInstance(std::string_view prefix, std::string_view na
|
|||
|
||||
CounterPtr Manager::CounterInstance(std::string_view prefix, std::string_view name,
|
||||
std::initializer_list<LabelView> labels, std::string_view helptext,
|
||||
std::string_view unit, prometheus::CollectCallbackPtr callback) {
|
||||
std::string_view unit, detail::CollectCallbackPtr callback) {
|
||||
auto lbl_span = Span{labels.begin(), labels.size()};
|
||||
return CounterInstance(prefix, name, lbl_span, helptext, unit, std::move(callback));
|
||||
}
|
||||
|
@ -491,8 +509,7 @@ GaugeFamilyPtr Manager::GaugeFamily(std::string_view prefix, std::string_view na
|
|||
}
|
||||
|
||||
GaugePtr Manager::GaugeInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
||||
std::string_view helptext, std::string_view unit,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
std::string_view helptext, std::string_view unit, detail::CollectCallbackPtr callback) {
|
||||
return WithLabelNames(labels, [&, this](auto labelNames) {
|
||||
auto family = GaugeFamily(prefix, name, labelNames, helptext, unit);
|
||||
return family->GetOrAdd(labels, callback);
|
||||
|
@ -500,8 +517,7 @@ GaugePtr Manager::GaugeInstance(std::string_view prefix, std::string_view name,
|
|||
}
|
||||
|
||||
GaugePtr Manager::GaugeInstance(std::string_view prefix, std::string_view name, std::initializer_list<LabelView> labels,
|
||||
std::string_view helptext, std::string_view unit,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
std::string_view helptext, std::string_view unit, detail::CollectCallbackPtr callback) {
|
||||
auto lbl_span = Span{labels.begin(), labels.size()};
|
||||
return GaugeInstance(prefix, name, lbl_span, helptext, unit, std::move(callback));
|
||||
}
|
||||
|
@ -545,6 +561,41 @@ HistogramPtr Manager::HistogramInstance(std::string_view prefix, std::string_vie
|
|||
return HistogramInstance(prefix, name, lbls, bounds_span, helptext, unit);
|
||||
}
|
||||
|
||||
void Manager::ProcessFd(int fd, int flags) {
|
||||
std::unique_lock<std::mutex> lk(collector_cv_mtx);
|
||||
|
||||
collector_flare.Extinguish();
|
||||
|
||||
for ( const auto& [name, f] : families )
|
||||
f->RunCallbacks();
|
||||
|
||||
collector_response_idx = collector_request_idx;
|
||||
|
||||
lk.unlock();
|
||||
collector_cv.notify_all();
|
||||
}
|
||||
|
||||
void Manager::WaitForPrometheusCallbacks() {
|
||||
std::unique_lock<std::mutex> lk(collector_cv_mtx);
|
||||
|
||||
++collector_request_idx;
|
||||
uint64_t expected_idx = collector_request_idx;
|
||||
collector_flare.Fire();
|
||||
|
||||
// It should *not* take 5 seconds to go through all of the callbacks, but
|
||||
// set this to have a timeout anyways just to avoid a deadlock.
|
||||
bool res = collector_cv.wait_for(lk,
|
||||
std::chrono::microseconds(
|
||||
static_cast<long>(BifConst::Telemetry::callback_timeout * 1000000)),
|
||||
[expected_idx]() {
|
||||
return telemetry_mgr->collector_response_idx >= expected_idx ||
|
||||
zeek::run_state::terminating;
|
||||
});
|
||||
|
||||
if ( ! res )
|
||||
fprintf(stderr, "Timeout waiting for prometheus callbacks\n");
|
||||
}
|
||||
|
||||
} // namespace zeek::telemetry
|
||||
|
||||
// -- unit tests ---------------------------------------------------------------
|
||||
|
|
|
@ -9,8 +9,10 @@
|
|||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
#include "zeek/Flare.h"
|
||||
#include "zeek/IntrusivePtr.h"
|
||||
#include "zeek/Span.h"
|
||||
#include "zeek/iosource/IOSource.h"
|
||||
#include "zeek/telemetry/Counter.h"
|
||||
#include "zeek/telemetry/Gauge.h"
|
||||
#include "zeek/telemetry/Histogram.h"
|
||||
|
@ -29,15 +31,20 @@ class Registry;
|
|||
|
||||
namespace zeek::telemetry {
|
||||
|
||||
namespace detail {
|
||||
using CollectCallbackPtr = std::function<double()>;
|
||||
}
|
||||
|
||||
class ZeekCollectable;
|
||||
|
||||
/**
|
||||
* Manages a collection of metric families.
|
||||
*/
|
||||
class Manager final {
|
||||
class Manager final : public iosource::IOSource {
|
||||
public:
|
||||
Manager();
|
||||
|
||||
Manager(const Manager&) = delete;
|
||||
|
||||
Manager& operator=(const Manager&) = delete;
|
||||
|
||||
~Manager();
|
||||
|
@ -50,6 +57,8 @@ public:
|
|||
*/
|
||||
void InitPostScript();
|
||||
|
||||
void Terminate();
|
||||
|
||||
/**
|
||||
* @return A VectorVal containing all counter and gauge metrics and their values matching prefix and name.
|
||||
* @param prefix The prefix pattern to use for filtering. Supports globbing.
|
||||
|
@ -88,17 +97,17 @@ public:
|
|||
* @param labels Values for all label dimensions of the metric.
|
||||
* @param helptext Short explanation of the metric.
|
||||
* @param unit Unit of measurement.
|
||||
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called by
|
||||
* the metrics subsystem whenever data is requested.
|
||||
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called
|
||||
* by the metrics subsystem whenever data is requested.
|
||||
*/
|
||||
CounterPtr CounterInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
||||
std::string_view helptext, std::string_view unit = "",
|
||||
prometheus::CollectCallbackPtr callback = nullptr);
|
||||
detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
/// @copydoc counterInstance
|
||||
CounterPtr CounterInstance(std::string_view prefix, std::string_view name, std::initializer_list<LabelView> labels,
|
||||
std::string_view helptext, std::string_view unit = "",
|
||||
prometheus::CollectCallbackPtr callback = nullptr);
|
||||
detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
/**
|
||||
* @return A gauge metric family. Creates the family lazily if necessary.
|
||||
|
@ -124,17 +133,17 @@ public:
|
|||
* @param labels Values for all label dimensions of the metric.
|
||||
* @param helptext Short explanation of the metric.
|
||||
* @param unit Unit of measurement.
|
||||
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called by
|
||||
* the metrics subsystem whenever data is requested.
|
||||
* @param callback Passing a callback method will enable asynchronous mode. The callback method will be called
|
||||
* by the metrics subsystem whenever data is requested.
|
||||
*/
|
||||
GaugePtr GaugeInstance(std::string_view prefix, std::string_view name, Span<const LabelView> labels,
|
||||
std::string_view helptext, std::string_view unit = "",
|
||||
prometheus::CollectCallbackPtr callback = nullptr);
|
||||
detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
/// @copydoc GaugeInstance
|
||||
GaugePtr GaugeInstance(std::string_view prefix, std::string_view name, std::initializer_list<LabelView> labels,
|
||||
std::string_view helptext, std::string_view unit = "",
|
||||
prometheus::CollectCallbackPtr callback = nullptr);
|
||||
detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
// Forces the compiler to use the type `Span<const T>` instead of trying to
|
||||
// match parameters to a `span`.
|
||||
|
@ -212,6 +221,12 @@ public:
|
|||
*/
|
||||
std::shared_ptr<prometheus::Registry> GetRegistry() const { return prometheus_registry; }
|
||||
|
||||
// IOSource interface
|
||||
double GetNextTimeout() override { return -1.0; }
|
||||
void Process() override {}
|
||||
const char* Tag() override { return "Telemetry::Manager"; }
|
||||
void ProcessFd(int fd, int flags) override;
|
||||
|
||||
protected:
|
||||
template<class F>
|
||||
static auto WithLabelNames(Span<const LabelView> xs, F continuation) {
|
||||
|
@ -231,6 +246,15 @@ protected:
|
|||
}
|
||||
}
|
||||
|
||||
friend class ZeekCollectable;
|
||||
|
||||
/**
|
||||
* Fires the flare for prometheus-cpp callback handling and waits for it to complete.
|
||||
* This can be called from other threads to ensure the callback handling happens on
|
||||
* the main thread.
|
||||
*/
|
||||
void WaitForPrometheusCallbacks();
|
||||
|
||||
private:
|
||||
RecordValPtr GetMetricOptsRecord(const prometheus::MetricFamily& metric_family);
|
||||
void BuildClusterJson();
|
||||
|
@ -250,6 +274,14 @@ private:
|
|||
std::unique_ptr<prometheus::Exposer> prometheus_exposer;
|
||||
|
||||
std::string cluster_json;
|
||||
|
||||
std::shared_ptr<ZeekCollectable> zeek_collectable;
|
||||
zeek::detail::Flare collector_flare;
|
||||
std::condition_variable collector_cv;
|
||||
std::mutex collector_cv_mtx;
|
||||
// Only modified under collector_cv_mtx!
|
||||
uint64_t collector_request_idx = 0;
|
||||
uint64_t collector_response_idx = 0;
|
||||
};
|
||||
|
||||
} // namespace zeek::telemetry
|
||||
|
|
|
@ -22,6 +22,8 @@ public:
|
|||
|
||||
std::vector<std::string> LabelNames() const { return label_names; }
|
||||
|
||||
virtual void RunCallbacks() = 0;
|
||||
|
||||
protected:
|
||||
MetricFamily(Span<const std::string_view> labels) {
|
||||
for ( const auto& lbl : labels )
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
#include <string_view>
|
||||
|
||||
#include "zeek/Span.h"
|
||||
#include "zeek/Val.h"
|
||||
|
||||
namespace zeek::telemetry {
|
||||
|
||||
|
|
2
src/telemetry/consts.bif
Normal file
2
src/telemetry/consts.bif
Normal file
|
@ -0,0 +1,2 @@
|
|||
const Telemetry::callback_timeout: interval;
|
||||
const Telemetry::civetweb_threads: count;
|
|
@ -376,6 +376,7 @@ static void terminate_zeek() {
|
|||
input_mgr->Terminate();
|
||||
thread_mgr->Terminate();
|
||||
broker_mgr->Terminate();
|
||||
telemetry_mgr->Terminate();
|
||||
|
||||
event_mgr.Drain();
|
||||
|
||||
|
@ -716,6 +717,7 @@ SetupResult setup(int argc, char** argv, Options* zopts) {
|
|||
// when that variable is defined.
|
||||
auto early_shutdown = [] {
|
||||
broker_mgr->Terminate();
|
||||
telemetry_mgr->Terminate();
|
||||
delete iosource_mgr;
|
||||
delete telemetry_mgr;
|
||||
};
|
||||
|
|
|
@ -5304,7 +5304,7 @@ function has_module_events%(group: string%) : bool
|
|||
%}
|
||||
|
||||
## Returns true if Zeek was built with support for using Spicy analyzers (which
|
||||
# is the default).
|
||||
## is the default).
|
||||
function have_spicy%(%) : bool
|
||||
%{
|
||||
#ifdef HAVE_SPICY
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
error in <...>/from_json.zeek, line 4: from_json() requires a type argument (from_json([], 10, from_json_default_key_mapper))
|
||||
error in <...>/from_json.zeek, line 4: from_json() requires a type argument (from_json([], <internal>::#0, from_json_default_key_mapper))
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#open XXXX-XX-XX-XX-XX-XX
|
||||
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer source
|
||||
#types time string addr port addr port string string bool string string
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 pop3_server_command_unknown - F zeek POP3
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 pop3_server_command_unknown + F zeek POP3
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 line_terminated_with_single_CR - F zeek CONTENTLINE
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 too_many_analyzer_violations - F zeek POP3
|
||||
#close XXXX-XX-XX-XX-XX-XX
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
error in <...>/option-runtime-errors.zeek, line 3: Incompatible type for set of ID 'A': got 'string', need 'count' (Option::set(A, hi, ))
|
||||
error in <...>/option-runtime-errors.zeek, line 3: Incompatible type for set of ID 'A': got 'string', need 'count' (Option::set(A, <internal>::#0, ))
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
error in <...>/option-runtime-errors.zeek, line 3: ID 'A' is not an option (Option::set(A, 6, ))
|
||||
error in <...>/option-runtime-errors.zeek, line 3: ID 'A' is not an option (Option::set(A, <internal>::#0, ))
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
error in <...>/option-runtime-errors.zeek, line 9: Could not find ID named 'B' (Option::set(B, 6, ))
|
||||
error in <...>/option-runtime-errors.zeek, line 9: Could not find ID named 'B' (Option::set(B, <internal>::#0, ))
|
||||
|
|
|
@ -7,5 +7,5 @@
|
|||
#open XXXX-XX-XX-XX-XX-XX
|
||||
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents
|
||||
#types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string]
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 172.16.238.1 49656 172.16.238.131 80 tcp spicy_ssh 9.953807 2405 2887 SF T T 0 ShAdDaFf 40 4497 30 4455 -
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 172.16.238.1 49656 172.16.238.131 80 tcp ssh 9.953807 2405 2887 SF T T 0 ShAdDaFf 40 4497 30 4455 -
|
||||
#close XXXX-XX-XX-XX-XX-XX
|
||||
|
|
|
@ -22,5 +22,6 @@ warning in <params>, line 1: event handler never invoked: SupervisorControl::res
|
|||
warning in <params>, line 1: event handler never invoked: SupervisorControl::status_request
|
||||
warning in <params>, line 1: event handler never invoked: SupervisorControl::stop_request
|
||||
warning in <params>, line 1: event handler never invoked: spicy_analyzer_for_mime_type
|
||||
warning in <params>, line 1: event handler never invoked: spicy_analyzer_for_port
|
||||
warning in <params>, line 1: event handler never invoked: terminate_event
|
||||
warning in <params>, line 1: event handler never invoked: this_is_never_used
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#open XXXX-XX-XX-XX-XX-XX
|
||||
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer source
|
||||
#types time string addr port addr port string string bool string string
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 pop3_server_command_unknown - F zeek POP3
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 pop3_server_command_unknown + F zeek POP3
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 line_terminated_with_single_CR - F zeek CONTENTLINE
|
||||
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 127.0.0.1 58246 127.0.0.1 110 too_many_analyzer_violations - F zeek POP3
|
||||
#close XXXX-XX-XX-XX-XX-XX
|
||||
|
|
|
@ -146,6 +146,7 @@ scripts/base/init-frameworks-and-bifs.zeek
|
|||
scripts/base/frameworks/files/magic/__load__.zeek
|
||||
scripts/base/frameworks/telemetry/options.zeek
|
||||
build/scripts/base/bif/__load__.zeek
|
||||
build/scripts/base/bif/consts.bif.zeek
|
||||
build/scripts/base/bif/telemetry.bif.zeek
|
||||
build/scripts/base/bif/zeekygen.bif.zeek
|
||||
build/scripts/base/bif/pcap.bif.zeek
|
||||
|
@ -191,6 +192,7 @@ scripts/base/init-frameworks-and-bifs.zeek
|
|||
build/scripts/base/bif/plugins/Zeek_NTLM.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_NTP.types.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_NTP.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_POP3.consts.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_POP3.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_RADIUS.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_RDP.events.bif.zeek
|
||||
|
|
|
@ -146,6 +146,7 @@ scripts/base/init-frameworks-and-bifs.zeek
|
|||
scripts/base/frameworks/files/magic/__load__.zeek
|
||||
scripts/base/frameworks/telemetry/options.zeek
|
||||
build/scripts/base/bif/__load__.zeek
|
||||
build/scripts/base/bif/consts.bif.zeek
|
||||
build/scripts/base/bif/telemetry.bif.zeek
|
||||
build/scripts/base/bif/zeekygen.bif.zeek
|
||||
build/scripts/base/bif/pcap.bif.zeek
|
||||
|
@ -191,6 +192,7 @@ scripts/base/init-frameworks-and-bifs.zeek
|
|||
build/scripts/base/bif/plugins/Zeek_NTLM.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_NTP.types.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_NTP.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_POP3.consts.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_POP3.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_RADIUS.events.bif.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_RDP.events.bif.zeek
|
||||
|
|
3
testing/btest/Baseline/opt.regress-any-leak/output
Normal file
3
testing/btest/Baseline/opt.regress-any-leak/output
Normal file
|
@ -0,0 +1,3 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
[[a=abc-1]]
|
||||
[1]
|
3
testing/btest/Baseline/opt.regress-any/output
Normal file
3
testing/btest/Baseline/opt.regress-any/output
Normal file
|
@ -0,0 +1,3 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
[a=123, b=[abc]]
|
||||
[a=123, b=1]
|
|
@ -0,0 +1 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
|
@ -385,6 +385,7 @@
|
|||
0.000000 MetaHookPost LoadFile(0, ./Zeek_NetBIOS.functions.bif.zeek, <...>/Zeek_NetBIOS.functions.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./Zeek_NoneWriter.none.bif.zeek, <...>/Zeek_NoneWriter.none.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./Zeek_PE.events.bif.zeek, <...>/Zeek_PE.events.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./Zeek_POP3.consts.bif.zeek, <...>/Zeek_POP3.consts.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./Zeek_POP3.events.bif.zeek, <...>/Zeek_POP3.events.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./Zeek_RADIUS.events.bif.zeek, <...>/Zeek_RADIUS.events.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./Zeek_RDP.events.bif.zeek, <...>/Zeek_RDP.events.bif.zeek) -> -1
|
||||
|
@ -464,6 +465,7 @@
|
|||
0.000000 MetaHookPost LoadFile(0, ./comm.bif.zeek, <...>/comm.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./const.bif.zeek, <...>/const.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./consts.bif.zeek, <...>/consts.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./contents, <...>/contents.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./control, <...>/control.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./data.bif.zeek, <...>/data.bif.zeek) -> -1
|
||||
|
@ -679,6 +681,7 @@
|
|||
0.000000 MetaHookPost LoadFileExtended(0, ./Zeek_NetBIOS.functions.bif.zeek, <...>/Zeek_NetBIOS.functions.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./Zeek_NoneWriter.none.bif.zeek, <...>/Zeek_NoneWriter.none.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./Zeek_PE.events.bif.zeek, <...>/Zeek_PE.events.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./Zeek_POP3.consts.bif.zeek, <...>/Zeek_POP3.consts.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./Zeek_POP3.events.bif.zeek, <...>/Zeek_POP3.events.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./Zeek_RADIUS.events.bif.zeek, <...>/Zeek_RADIUS.events.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./Zeek_RDP.events.bif.zeek, <...>/Zeek_RDP.events.bif.zeek) -> (-1, <no content>)
|
||||
|
@ -758,6 +761,7 @@
|
|||
0.000000 MetaHookPost LoadFileExtended(0, ./comm.bif.zeek, <...>/comm.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./const.bif.zeek, <...>/const.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./consts.bif.zeek, <...>/consts.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./contents, <...>/contents.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./control, <...>/control.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./data.bif.zeek, <...>/data.bif.zeek) -> (-1, <no content>)
|
||||
|
@ -1305,6 +1309,7 @@
|
|||
0.000000 MetaHookPre LoadFile(0, ./Zeek_NetBIOS.functions.bif.zeek, <...>/Zeek_NetBIOS.functions.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./Zeek_NoneWriter.none.bif.zeek, <...>/Zeek_NoneWriter.none.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./Zeek_PE.events.bif.zeek, <...>/Zeek_PE.events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./Zeek_POP3.consts.bif.zeek, <...>/Zeek_POP3.consts.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./Zeek_POP3.events.bif.zeek, <...>/Zeek_POP3.events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./Zeek_RADIUS.events.bif.zeek, <...>/Zeek_RADIUS.events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./Zeek_RDP.events.bif.zeek, <...>/Zeek_RDP.events.bif.zeek)
|
||||
|
@ -1384,6 +1389,7 @@
|
|||
0.000000 MetaHookPre LoadFile(0, ./comm.bif.zeek, <...>/comm.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./const.bif.zeek, <...>/const.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./consts.bif.zeek, <...>/consts.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./contents, <...>/contents.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./control, <...>/control.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./data.bif.zeek, <...>/data.bif.zeek)
|
||||
|
@ -1599,6 +1605,7 @@
|
|||
0.000000 MetaHookPre LoadFileExtended(0, ./Zeek_NetBIOS.functions.bif.zeek, <...>/Zeek_NetBIOS.functions.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./Zeek_NoneWriter.none.bif.zeek, <...>/Zeek_NoneWriter.none.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./Zeek_PE.events.bif.zeek, <...>/Zeek_PE.events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./Zeek_POP3.consts.bif.zeek, <...>/Zeek_POP3.consts.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./Zeek_POP3.events.bif.zeek, <...>/Zeek_POP3.events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./Zeek_RADIUS.events.bif.zeek, <...>/Zeek_RADIUS.events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./Zeek_RDP.events.bif.zeek, <...>/Zeek_RDP.events.bif.zeek)
|
||||
|
@ -1678,6 +1685,7 @@
|
|||
0.000000 MetaHookPre LoadFileExtended(0, ./comm.bif.zeek, <...>/comm.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./communityid.bif.zeek, <...>/communityid.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./const.bif.zeek, <...>/const.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./consts.bif.zeek, <...>/consts.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./contents, <...>/contents.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./control, <...>/control.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./data.bif.zeek, <...>/data.bif.zeek)
|
||||
|
@ -2224,6 +2232,7 @@
|
|||
0.000000 | HookLoadFile ./Zeek_NetBIOS.functions.bif.zeek <...>/Zeek_NetBIOS.functions.bif.zeek
|
||||
0.000000 | HookLoadFile ./Zeek_NoneWriter.none.bif.zeek <...>/Zeek_NoneWriter.none.bif.zeek
|
||||
0.000000 | HookLoadFile ./Zeek_PE.events.bif.zeek <...>/Zeek_PE.events.bif.zeek
|
||||
0.000000 | HookLoadFile ./Zeek_POP3.consts.bif.zeek <...>/Zeek_POP3.consts.bif.zeek
|
||||
0.000000 | HookLoadFile ./Zeek_POP3.events.bif.zeek <...>/Zeek_POP3.events.bif.zeek
|
||||
0.000000 | HookLoadFile ./Zeek_RADIUS.events.bif.zeek <...>/Zeek_RADIUS.events.bif.zeek
|
||||
0.000000 | HookLoadFile ./Zeek_RDP.events.bif.zeek <...>/Zeek_RDP.events.bif.zeek
|
||||
|
@ -2305,6 +2314,7 @@
|
|||
0.000000 | HookLoadFile ./comm.bif.zeek <...>/comm.bif.zeek
|
||||
0.000000 | HookLoadFile ./communityid.bif.zeek <...>/communityid.bif.zeek
|
||||
0.000000 | HookLoadFile ./const.bif.zeek <...>/const.bif.zeek
|
||||
0.000000 | HookLoadFile ./consts.bif.zeek <...>/consts.bif.zeek
|
||||
0.000000 | HookLoadFile ./contents <...>/contents.zeek
|
||||
0.000000 | HookLoadFile ./control <...>/control.zeek
|
||||
0.000000 | HookLoadFile ./data.bif.zeek <...>/data.bif.zeek
|
||||
|
@ -2518,6 +2528,7 @@
|
|||
0.000000 | HookLoadFileExtended ./Zeek_NetBIOS.functions.bif.zeek <...>/Zeek_NetBIOS.functions.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./Zeek_NoneWriter.none.bif.zeek <...>/Zeek_NoneWriter.none.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./Zeek_PE.events.bif.zeek <...>/Zeek_PE.events.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./Zeek_POP3.consts.bif.zeek <...>/Zeek_POP3.consts.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./Zeek_POP3.events.bif.zeek <...>/Zeek_POP3.events.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./Zeek_RADIUS.events.bif.zeek <...>/Zeek_RADIUS.events.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./Zeek_RDP.events.bif.zeek <...>/Zeek_RDP.events.bif.zeek
|
||||
|
@ -2599,6 +2610,7 @@
|
|||
0.000000 | HookLoadFileExtended ./comm.bif.zeek <...>/comm.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./communityid.bif.zeek <...>/communityid.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./const.bif.zeek <...>/const.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./consts.bif.zeek <...>/consts.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./contents <...>/contents.zeek
|
||||
0.000000 | HookLoadFileExtended ./control <...>/control.zeek
|
||||
0.000000 | HookLoadFileExtended ./data.bif.zeek <...>/data.bif.zeek
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
Input::EVENT_NEW, aaa
|
||||
Input::EVENT_NEW, bbb
|
||||
Input::EVENT_NEW, final
|
|
@ -0,0 +1,4 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
Input::EVENT_NEW, aaa
|
||||
Input::EVENT_NEW, bbb
|
||||
Input::EVENT_NEW, final
|
|
@ -0,0 +1,4 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
Input::EVENT_NEW, 24612, binary start\x00\x00\x00\x00, \x00\x00\x00\x00\x00binary done
|
||||
Input::EVENT_NEW, 3, ccc, ccc
|
||||
Input::EVENT_NEW, 5, final, final
|
|
@ -0,0 +1,4 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
Input::EVENT_NEW, aaa-bbb-ccc
|
||||
Input::EVENT_NEW, aaa-bbb-ccc
|
||||
Input::EVENT_NEW, final
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue