mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
Compare commits
148 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
43f4ff255d | ||
![]() |
79a51c715a | ||
![]() |
19346b93ad | ||
![]() |
9bda7493ec | ||
![]() |
66a801bd2c | ||
![]() |
6e4d3f0e56 | ||
![]() |
db5ab72d0e | ||
![]() |
8f877f9d58 | ||
![]() |
a0d35d6e28 | ||
![]() |
59a1c74ac5 | ||
![]() |
356685d82d | ||
![]() |
d90c0d3730 | ||
![]() |
181214ed78 | ||
![]() |
4021a0c654 | ||
![]() |
b76a75d86e | ||
![]() |
737b7d0add | ||
![]() |
a233788a69 | ||
![]() |
1610fe9eaf | ||
![]() |
94700130ed | ||
![]() |
c700efc3c8 | ||
![]() |
1b5ac2d2e5 | ||
![]() |
05da1c5a52 | ||
![]() |
5f07b3a858 | ||
![]() |
98eb2a10de | ||
![]() |
c2874bf818 | ||
![]() |
83ea862c11 | ||
![]() |
11cf9e99f2 | ||
![]() |
76c94e84ac | ||
![]() |
37e7b57664 | ||
![]() |
c8b42fe3c7 | ||
![]() |
bdcb1c8a44 | ||
![]() |
ceb798b42a | ||
![]() |
ec18da8baa | ||
![]() |
e712461719 | ||
![]() |
bc8dc65bd6 | ||
![]() |
3e5060018a | ||
![]() |
9f8e27118e | ||
![]() |
89376095dc | ||
![]() |
3e8af6497e | ||
![]() |
5051cce720 | ||
![]() |
c30b835a14 | ||
![]() |
a041080e3f | ||
![]() |
fc3001c76a | ||
![]() |
e2b2c79306 | ||
![]() |
ed32ee73fa | ||
![]() |
eed9858bc4 | ||
![]() |
ed081212ae | ||
![]() |
ec04c925a0 | ||
![]() |
de8127f3cd | ||
![]() |
b5774f2de9 | ||
![]() |
7c8a7680ba | ||
![]() |
26b50908e1 | ||
![]() |
c2f2388f18 | ||
![]() |
d745d746bc | ||
![]() |
5fbb6b4599 | ||
![]() |
7c463b5f92 | ||
![]() |
e7f694bcbb | ||
![]() |
f54416eae4 | ||
![]() |
68bfe8d1c0 | ||
![]() |
cf97ed6ac1 | ||
![]() |
35cd891d6e | ||
![]() |
f300ddb9fe | ||
![]() |
fa5a7c4a5b | ||
![]() |
56b596a3e3 | ||
![]() |
91067b32cc | ||
![]() |
43ab74b70f | ||
![]() |
887d92e26c | ||
![]() |
b1fec3284e | ||
![]() |
5ce0f2edb6 | ||
![]() |
d5c3cdf33a | ||
![]() |
7ed52733d2 | ||
![]() |
056b70bd2d | ||
![]() |
f697670668 | ||
![]() |
826d5e6fb7 | ||
![]() |
1c3be97fe9 | ||
![]() |
107c0da15d | ||
![]() |
e3845060dc | ||
![]() |
34ef830b9c | ||
![]() |
3ebe867193 | ||
![]() |
300b7a11ac | ||
![]() |
f5fefd17df | ||
![]() |
3281aa6284 | ||
![]() |
bcfd47c28d | ||
![]() |
10d5ca5948 | ||
![]() |
f693f22192 | ||
![]() |
c86f9267ff | ||
![]() |
dfbeb3e71f | ||
![]() |
fabb4023c9 | ||
![]() |
9eb3ada8c8 | ||
![]() |
7a73f81792 | ||
![]() |
ea44c30272 | ||
![]() |
c988bd2e4d | ||
![]() |
5579494d48 | ||
![]() |
121170a5de | ||
![]() |
0e4f2a2bab | ||
![]() |
270429bfea | ||
![]() |
815001f2aa | ||
![]() |
88c37d0be8 | ||
![]() |
40db8463df | ||
![]() |
fb51e3a88f | ||
![]() |
5a0e2bf771 | ||
![]() |
95e7c5a63e | ||
![]() |
024304bddf | ||
![]() |
2cc6c735d3 | ||
![]() |
3bf8bfaac6 | ||
![]() |
89b9f9a456 | ||
![]() |
8de8fb8fae | ||
![]() |
595cdf8b55 | ||
![]() |
74b832fa39 | ||
![]() |
15be682f63 | ||
![]() |
8f9c5f79c6 | ||
![]() |
382b4b5473 | ||
![]() |
6f65b88f1b | ||
![]() |
cfe47f40a4 | ||
![]() |
0fd6672dde | ||
![]() |
e7ab18b343 | ||
![]() |
8a92b150a5 | ||
![]() |
dd4597865a | ||
![]() |
056bbe04ea | ||
![]() |
f6b8864584 | ||
![]() |
d1f6e91988 | ||
![]() |
6bbaef3e09 | ||
![]() |
55d36fc2cd | ||
![]() |
f8fbeca504 | ||
![]() |
72ff343f17 | ||
![]() |
b76096a9ee | ||
![]() |
b9e4669632 | ||
![]() |
5974613cae | ||
![]() |
3a44bda957 | ||
![]() |
51262d02c7 | ||
![]() |
b46aeefbab | ||
![]() |
a4b746e5e8 | ||
![]() |
746ae4d2cc | ||
![]() |
a65a339aa8 | ||
![]() |
8014c4b8c3 | ||
![]() |
d9dc121e9a | ||
![]() |
5a56ff92d2 | ||
![]() |
b13dfa3b16 | ||
![]() |
d17a1f9822 | ||
![]() |
5cdddd92d5 | ||
![]() |
b8d11f4688 | ||
![]() |
91b23a6e2e | ||
![]() |
a8c56c1f25 | ||
![]() |
5f6df68463 | ||
![]() |
ac95484382 | ||
![]() |
962b03a431 | ||
![]() |
92a685df50 | ||
![]() |
1bf439cd58 |
334 changed files with 8056 additions and 4107 deletions
84
.cirrus.yml
84
.cirrus.yml
|
@ -10,7 +10,7 @@ btest_jobs: &BTEST_JOBS 4
|
|||
btest_retries: &BTEST_RETRIES 2
|
||||
memory: &MEMORY 16GB
|
||||
|
||||
config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||
config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror -D FETCHCONTENT_FULLY_DISCONNECTED:BOOL=ON
|
||||
no_spicy_config: &NO_SPICY_CONFIG --build-type=release --disable-broker-tests --disable-spicy --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||
static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror
|
||||
binary_config: &BINARY_CONFIG --prefix=$CIRRUS_WORKING_DIR/install --libdir=$CIRRUS_WORKING_DIR/install/lib --binary-package --enable-static-broker --enable-static-binpac --disable-broker-tests --build-type=Release --ccache --enable-werror
|
||||
|
@ -35,8 +35,7 @@ macos_environment: &MACOS_ENVIRONMENT
|
|||
|
||||
freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE
|
||||
cpu: 8
|
||||
# Not allowed to request less than 8GB for an 8 CPU FreeBSD VM.
|
||||
memory: 8GB
|
||||
memory: *MEMORY
|
||||
# For greediness, see https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4
|
||||
greedy: true
|
||||
|
||||
|
@ -67,6 +66,12 @@ skip_task_on_pr: &SKIP_TASK_ON_PR
|
|||
skip: >
|
||||
($CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ '.*fullci.*')
|
||||
|
||||
zam_skip_task_on_pr: &ZAM_SKIP_TASK_ON_PR
|
||||
# Skip this task on PRs if it does not have the fullci or zamci label,
|
||||
# it continues to run for direct pushes to master/release.
|
||||
skip: >
|
||||
($CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ '.*fullci.*' && $CIRRUS_PR_LABELS !=~ '.*zamci.*')
|
||||
|
||||
benchmark_only_if_template: &BENCHMARK_ONLY_IF_TEMPLATE
|
||||
# only_if condition for cron-triggered benchmarking tests.
|
||||
# These currently do not run for release/.*
|
||||
|
@ -166,17 +171,17 @@ env:
|
|||
# Linux EOL timelines: https://linuxlifecycle.com/
|
||||
# Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle
|
||||
|
||||
fedora40_task:
|
||||
fedora42_task:
|
||||
container:
|
||||
# Fedora 40 EOL: Around May 2025
|
||||
dockerfile: ci/fedora-40/Dockerfile
|
||||
# Fedora 42 EOL: Around May 2026
|
||||
dockerfile: ci/fedora-42/Dockerfile
|
||||
<< : *RESOURCES_TEMPLATE
|
||||
<< : *CI_TEMPLATE
|
||||
|
||||
fedora39_task:
|
||||
fedora41_task:
|
||||
container:
|
||||
# Fedora 39 EOL: Around Nov 2024
|
||||
dockerfile: ci/fedora-39/Dockerfile
|
||||
# Fedora 41 EOL: Around Nov 2025
|
||||
dockerfile: ci/fedora-41/Dockerfile
|
||||
<< : *RESOURCES_TEMPLATE
|
||||
<< : *CI_TEMPLATE
|
||||
<< : *SKIP_TASK_ON_PR
|
||||
|
@ -302,25 +307,6 @@ ubuntu22_spicy_task:
|
|||
benchmark_script: ./ci/benchmark.sh
|
||||
<< : *BENCHMARK_ONLY_IF_TEMPLATE
|
||||
|
||||
ubuntu22_spicy_head_task:
|
||||
container:
|
||||
# Ubuntu 22.04 EOL: April 2027
|
||||
dockerfile: ci/ubuntu-22.04/Dockerfile
|
||||
<< : *RESOURCES_TEMPLATE
|
||||
<< : *CI_TEMPLATE
|
||||
env:
|
||||
ZEEK_CI_CREATE_ARTIFACT: 1
|
||||
# Pull auxil/spicy to the latest head version. May or may not build.
|
||||
ZEEK_CI_PREBUILD_COMMAND: 'cd auxil/spicy && git fetch && git reset --hard origin/main && git submodule update --init --recursive'
|
||||
spicy_install_analyzers_script: ./ci/spicy-install-analyzers.sh
|
||||
upload_binary_artifacts:
|
||||
path: build.tgz
|
||||
benchmark_script: ./ci/benchmark.sh
|
||||
# Don't run this job on release branches. It tests against spicy HEAD, which
|
||||
# will frequently require other fixes that won't be in a release branch.
|
||||
skip: $CIRRUS_BRANCH =~ 'release/.*'
|
||||
<< : *BENCHMARK_ONLY_IF_TEMPLATE
|
||||
|
||||
ubuntu20_task:
|
||||
container:
|
||||
# Ubuntu 20.04 EOL: April 2025
|
||||
|
@ -341,12 +327,12 @@ alpine_task:
|
|||
# Cirrus only supports the following macos runner currently, selecting
|
||||
# anything else automatically upgrades to this one.
|
||||
#
|
||||
# ghcr.io/cirruslabs/macos-runner:sonoma
|
||||
# ghcr.io/cirruslabs/macos-runner:sequoia
|
||||
#
|
||||
# See also: https://cirrus-ci.org/guide/macOS/
|
||||
macos_sonoma_task:
|
||||
macos_sequoia_task:
|
||||
macos_instance:
|
||||
image: ghcr.io/cirruslabs/macos-runner:sonoma
|
||||
image: ghcr.io/cirruslabs/macos-runner:sequoia
|
||||
prepare_script: ./ci/macos/prepare.sh
|
||||
<< : *CI_TEMPLATE
|
||||
<< : *MACOS_ENVIRONMENT
|
||||
|
@ -355,7 +341,7 @@ macos_sonoma_task:
|
|||
freebsd14_task:
|
||||
freebsd_instance:
|
||||
# FreeBSD 14 EOL: Nov 30 2028
|
||||
image_family: freebsd-14-0
|
||||
image_family: freebsd-14-2
|
||||
<< : *FREEBSD_RESOURCES_TEMPLATE
|
||||
|
||||
prepare_script: ./ci/freebsd/prepare.sh
|
||||
|
@ -365,7 +351,7 @@ freebsd14_task:
|
|||
freebsd13_task:
|
||||
freebsd_instance:
|
||||
# FreeBSD 13 EOL: January 31, 2026
|
||||
image_family: freebsd-13-3
|
||||
image_family: freebsd-13-4
|
||||
<< : *FREEBSD_RESOURCES_TEMPLATE
|
||||
|
||||
prepare_script: ./ci/freebsd/prepare.sh
|
||||
|
@ -389,6 +375,21 @@ asan_sanitizer_task:
|
|||
# Use absolute paths for coverage files.
|
||||
CCACHE_BASEDIR:
|
||||
|
||||
# ASAN task executing btests with zam alternative.
|
||||
asan_sanitizer_zam_task:
|
||||
container:
|
||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
||||
<< : *RESOURCES_TEMPLATE
|
||||
|
||||
<< : *CI_TEMPLATE
|
||||
env:
|
||||
ZEEK_CI_CONFIGURE_FLAGS: *ASAN_SANITIZER_CONFIG
|
||||
ASAN_OPTIONS: detect_leaks=1:detect_odr_violation=0
|
||||
ZEEK_CI_SKIP_UNIT_TESTS: 1
|
||||
ZEEK_CI_SKIP_EXTERNAL_BTESTS: 1
|
||||
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
||||
<< : *ZAM_SKIP_TASK_ON_PR
|
||||
|
||||
ubsan_sanitizer_task:
|
||||
container:
|
||||
# Just uses a recent/common distro to run undefined behavior checks.
|
||||
|
@ -406,6 +407,23 @@ ubsan_sanitizer_task:
|
|||
ZEEK_TAILORED_UB_CHECKS: 1
|
||||
UBSAN_OPTIONS: print_stacktrace=1
|
||||
|
||||
ubsan_sanitizer_zam_task:
|
||||
container:
|
||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
||||
<< : *RESOURCES_TEMPLATE
|
||||
|
||||
<< : *CI_TEMPLATE
|
||||
env:
|
||||
CC: clang-18
|
||||
CXX: clang++-18
|
||||
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
|
||||
ZEEK_TAILORED_UB_CHECKS: 1
|
||||
UBSAN_OPTIONS: print_stacktrace=1
|
||||
ZEEK_CI_SKIP_UNIT_TESTS: 1
|
||||
ZEEK_CI_SKIP_EXTERNAL_BTESTS: 1
|
||||
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
||||
<< : *ZAM_SKIP_TASK_ON_PR
|
||||
|
||||
tsan_sanitizer_task:
|
||||
container:
|
||||
# Just uses a recent/common distro to run memory error/leak checks.
|
||||
|
|
2
.github/workflows/generate-docs.yml
vendored
2
.github/workflows/generate-docs.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
|||
permissions:
|
||||
contents: write # for Git to git push
|
||||
if: github.repository == 'zeek/zeek'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
# We only perform a push if the action was triggered via a schedule
|
||||
|
|
|
@ -32,4 +32,4 @@ repos:
|
|||
rev: v1.16.21
|
||||
hooks:
|
||||
- id: typos
|
||||
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES)$'
|
||||
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES|scripts/base/protocols/ssl/mozilla-ca-list.zeek)$'
|
||||
|
|
|
@ -30,6 +30,7 @@ extend-ignore-re = [
|
|||
"\"BaR\"",
|
||||
"\"xFoObar\"",
|
||||
"\"FoO\"",
|
||||
"Steve Smoot",
|
||||
]
|
||||
|
||||
extend-ignore-identifiers-re = [
|
||||
|
@ -64,6 +65,8 @@ have_2nd = "have_2nd"
|
|||
ot1 = "ot1"
|
||||
ot2 = "ot2"
|
||||
uses_seh = "uses_seh"
|
||||
ect0 = "ect0"
|
||||
ect1 = "ect1"
|
||||
|
||||
[default.extend-words]
|
||||
caf = "caf"
|
||||
|
|
897
CHANGES
897
CHANGES
|
@ -1,3 +1,900 @@
|
|||
7.0.9 | 2025-07-21 10:23:17 -0700
|
||||
|
||||
* Return weird if a log line is over a configurable size limit (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.8-9 | 2025-07-17 08:46:49 -0700
|
||||
|
||||
* CI: Remove spicy-head task (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* [Spicy] Let `zeek::protocol_handle_close()` send a TCP EOF. (Benjamin Bannier, Corelight)
|
||||
|
||||
(cherry picked from commit ce6c7a6cd1b3eb7f7b3c12772763edd9881b430e)
|
||||
|
||||
* Remove libzmq5 from Docker images (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This was accidentally added in 356685d82deea868f4340169b9a6c8d2e2109bed and
|
||||
doesn't need to be in our official 7.0 images.
|
||||
|
||||
* CI: Force opensuse-tumbleweed image to rebuild (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.8-5 | 2025-07-14 14:15:46 -0700
|
||||
|
||||
* fix for error in ZAM's constant propagation logic (Vern Paxson, Corelight)
|
||||
|
||||
(cherry picked from commit 869bd181b20fc9840472e35e87249a946177273b)
|
||||
|
||||
7.0.8-4 | 2025-07-14 14:12:46 -0700
|
||||
|
||||
* btest/logging: Fly-by cleanup (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit f4357485d2006813fba768aa83e9e1bf8e8bb236)
|
||||
|
||||
* GH-4562: logging/Ascii: Fix abort() for non-existing postrotation functions (Arne Welzel, Corelight)
|
||||
|
||||
When looking up the postprocessor function from shadow files, id::find_func()
|
||||
would abort() if the function wasn't available instead of falling back
|
||||
to the default postprocessor.
|
||||
|
||||
Fix by using id::find() and checking the type explicitly and also adding a
|
||||
strict type check while at it.
|
||||
|
||||
This issue was tickled by loading the json-streaming-logs package,
|
||||
Zeek creating shadow files containing its custom postprocessor function,
|
||||
then restarting Zeek without the package loaded.
|
||||
|
||||
(cherry picked from commit f4357485d2006813fba768aa83e9e1bf8e8bb236)
|
||||
|
||||
7.0.8-3 | 2025-07-14 14:11:30 -0700
|
||||
|
||||
* docker: Add `net-tools` and `procps` dependencies (Edoardo Mich)
|
||||
|
||||
Add `net-tools` and `procps` to the final.Dockerfile, to avoid warning in
|
||||
zeekctl commands like: "failed to find local IP addresses [...]" and the
|
||||
error in `zeekctl top` command caused respectively by missing `ifconfig`
|
||||
and `top` commands.
|
||||
|
||||
(cherry picked from commit 8189716adcd16e017c4ea7bcd2358f89bd6cbcc5)
|
||||
|
||||
7.0.8-2 | 2025-07-14 14:09:51 -0700
|
||||
|
||||
* Update ZeekJS to v0.18.0 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This is primarily to bring in 26c8c3684c46dce2f00b191ed009b1ea9bfe9159.
|
||||
|
||||
7.0.8-1 | 2025-07-14 13:56:17 -0700
|
||||
|
||||
* GH-4522: smtp: Fix last_reply column in smtp.log for BDAT LAST (Arne Welzel, Corelight)
|
||||
|
||||
The response to BDAT LAST was never recognized, resulting in the
|
||||
BDAT LAST commands not being logged in a timely fashion and receiving
|
||||
the wrong status.
|
||||
|
||||
This likely doesn't handle complex pipeline scenarios, but it fixes
|
||||
the wrong behavior for smtp_reply() not handling simple BDAT commands
|
||||
responses.
|
||||
|
||||
Thanks @cccs-jsjm for the report!
|
||||
|
||||
(cherry picked from commit f5063bfcd409a88420d16be3ea2d9706fedc7f3e)
|
||||
|
||||
7.0.8 | 2025-05-19 14:00:30 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.8 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.7-7 | 2025-05-19 11:17:50 -0700
|
||||
|
||||
* external/subdir-btest.cfg: Set OPENSSL_ENABLE_SHA1_SIGNATURES=1 (Arne Welzel, Corelight)
|
||||
|
||||
We already do something similar for OPENSSL_ENABLE_MD5_VERIFY=1
|
||||
|
||||
(cherry picked from commit 280e7acc6e99a33fa79f1c1b36cf0edffbdca72d)
|
||||
|
||||
* btest/x509_verify: Drop OpenSSL 1.0 hack (Arne Welzel, Corelight)
|
||||
|
||||
We do not have a distro in CI anymore that ships OpenSSL 1.0,
|
||||
drop the hack.
|
||||
|
||||
(cherry picked from commit 280e7acc6e99a33fa79f1c1b36cf0edffbdca72d)
|
||||
|
||||
* GH-4035: testing/btest: Use OPENSSL_ENABLE_SHA1_SIGNATURES (Arne Welzel, Corelight)
|
||||
|
||||
This reverts the call to update-crypto-policies in the Fedora 41 image
|
||||
and instead sets OPENSSL_ENABLE_SHA1_SIGNATURES in the individual tests.
|
||||
This allows RHEL 10 or Fedora 41 users to run the tests in question
|
||||
without needing to fiddle with system settings.
|
||||
|
||||
(cherry picked from commit 280e7acc6e99a33fa79f1c1b36cf0edffbdca72d)
|
||||
|
||||
7.0.7-6 | 2025-05-19 09:22:22 -0700
|
||||
|
||||
* Update paraglob submodule for GCC 15.1 build fix (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.7-5 | 2025-05-19 09:20:24 -0700
|
||||
|
||||
* probabilistic/BitVector: Add include <cstdint> (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 7583651bec7b4a87d77d756bb3ae963f7b69b1cd)
|
||||
|
||||
* CI: Drop fedora-39 and fedora-40 (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 7583651bec7b4a87d77d756bb3ae963f7b69b1cd)
|
||||
|
||||
* CI: Add fedora-41 and fedora-42 (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 7583651bec7b4a87d77d756bb3ae963f7b69b1cd)
|
||||
|
||||
7.0.7-4 | 2025-05-19 09:17:52 -0700
|
||||
|
||||
* Remove unnecessary check for bind library. (Johanna Amann, Corelight)
|
||||
|
||||
This has not been required since we switched to c-ares and breaks builds
|
||||
on Fedora Rawhide.
|
||||
|
||||
(cherry picked from commit 37be65dfd04906270111c44533b3aadc5f421784)
|
||||
|
||||
7.0.7-3 | 2025-05-19 09:15:54 -0700
|
||||
|
||||
* fix for ZAM optimization when an aggregate is modified inside of a loop (Vern Paxson, Corelight)
|
||||
|
||||
(cherry picked from commit 2255fa23b8a3f7bde1345f2847764412c90487c8)
|
||||
|
||||
7.0.7-2 | 2025-05-19 09:09:07 -0700
|
||||
|
||||
* Bump `auxil/spicy` to v1.11.5 (Benjamin Bannier, Corelight)
|
||||
|
||||
7.0.7 | 2025-05-08 14:46:40 -0700
|
||||
|
||||
* Release 7.0.7.
|
||||
|
||||
7.0.6-14 | 2025-05-08 14:46:21 -0700
|
||||
|
||||
* Bump auxil/spicy to spicy-1.11.4 (Benjamin Bannier, Corelight)
|
||||
|
||||
* Add fix to support CMake 4.0, plus update Spicy to version that supports it (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* CI: Use brew version of python3 on macOS (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.6-8 | 2025-05-06 10:09:01 -0700
|
||||
|
||||
* Update quic baselines due to service ordering (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.6-7 | 2025-05-06 09:54:32 -0700
|
||||
|
||||
* GH-1841: ldap: Replace if with switch on bool (Arne Welzel, Corelight)
|
||||
|
||||
The change from a2a535d0c91da67c7389a4aedec6a0c8a6da6613 used
|
||||
zeek/spicy#1841, but Zeek 7.0 does not have that functionality
|
||||
yet. Replace with switch ( bool ).
|
||||
|
||||
* ldap: Clean up from code review (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit a2a535d0c91da67c7389a4aedec6a0c8a6da6613)
|
||||
|
||||
* ldap: Add Sicily Authentication constants (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit a2a535d0c91da67c7389a4aedec6a0c8a6da6613)
|
||||
|
||||
* GH-4275: ldap: Only switch into MS_KRB5 mode if responseToken exists (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit a2a535d0c91da67c7389a4aedec6a0c8a6da6613)
|
||||
|
||||
7.0.6-4 | 2025-05-05 12:56:17 -0700
|
||||
|
||||
* QUIC: Extract reset_crypto() function (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 50ac8d1468603c710e109f1c050b3966dd91deda)
|
||||
|
||||
* QUIC: Rename ConnectionIDInfo to Context (Arne Welzel, Corelight)
|
||||
|
||||
Lets just call it what it is given that it contains more than just
|
||||
connection IDs.
|
||||
|
||||
(cherry picked from commit 50ac8d1468603c710e109f1c050b3966dd91deda)
|
||||
|
||||
* QUIC: Switch initial_destination_conn_id to optional (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 50ac8d1468603c710e109f1c050b3966dd91deda)
|
||||
|
||||
* QUIC: Use initial destination conn_id for decryption (Arne Welzel, Corelight)
|
||||
|
||||
Ensure the client side also uses the initial destination connection ID
|
||||
for decryption purposes instead of the one from the current long header
|
||||
packet. PCAP from local WiFi hotspot.
|
||||
|
||||
(cherry picked from commit 50ac8d1468603c710e109f1c050b3966dd91deda)
|
||||
|
||||
* QUIC: Handle CRYPTO frames across multiple INITIAL packets (Arne Welzel, Corelight)
|
||||
|
||||
Instead of sending the accumulated CRYPTO frames after processing an
|
||||
INITIAL packet, add logic to determine the total length of the TLS
|
||||
Client or Server Hello (by peeking into the first 4 byte). Once all
|
||||
CRYPTO frames have arrived, flush the reassembled data to the TLS
|
||||
analyzer at once.
|
||||
|
||||
(cherry picked from commit 50ac8d1468603c710e109f1c050b3966dd91deda)
|
||||
|
||||
* QUIC: Do not consume EncryptedLongPacketPayload (Arne Welzel, Corelight)
|
||||
|
||||
The payload is already consumed within the InitialPacket unit. Consuming
|
||||
it again resulted in UDP datagrams with multiple packets to ignore
|
||||
the remaining packets in the same UDP datagram. The baseline changes
|
||||
showing I being followed by a new H indicates that the INITIAL packet
|
||||
was followed by a HANDSHAKE packet, but previously Zeek discarded
|
||||
these.
|
||||
|
||||
(cherry picked from commit 50ac8d1468603c710e109f1c050b3966dd91deda)
|
||||
|
||||
* QUIC: Fix ACK frame parsing (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 50ac8d1468603c710e109f1c050b3966dd91deda)
|
||||
|
||||
7.0.6-3 | 2025-05-05 12:54:30 -0700
|
||||
|
||||
* broker/main: Adapt enum values to agree with comm.bif (Arne Welzel, Corelight)
|
||||
|
||||
Logic to detect this error already existed, but due to enum identifiers
|
||||
not having a value set, it never triggered before.
|
||||
|
||||
(cherry picked from commit 6bc36e8cf87593200c121caab8eccdaf94e2f841)
|
||||
|
||||
7.0.6 | 2025-03-20 12:24:03 -0700
|
||||
|
||||
* Release 7.0.6.
|
||||
|
||||
7.0.5-14 | 2025-03-19 13:04:31 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.6 release (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update zeekctl submodule to fix a couple btests (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.5-12 | 2025-03-19 10:43:12 -0700
|
||||
|
||||
* Update zeekjs to v0.16.0 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.5-10 | 2025-03-18 17:59:01 -0700
|
||||
|
||||
* Update mozilla-ca-list.zeek and ct-list.zeek to NSS 3.109 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update core/vntag-in-vlan baseline to remove ip_proto field for 7.0 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.5-7 | 2025-03-18 16:21:31 -0700
|
||||
|
||||
* CI: Unconditionally upgrade pip on macOS (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit e8d91c8227f95eda002a3cb2675cb64863871ebc)
|
||||
|
||||
7.0.5-6 | 2025-03-18 16:20:53 -0700
|
||||
|
||||
* ci/init-external-repo.sh: Use regex to match macos cirrus task (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 43f108bb71bca8cec7faeb8d1aeae5c5920fcee6)
|
||||
|
||||
* CI: Change macOS runner to Sequoia (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit 43f108bb71bca8cec7faeb8d1aeae5c5920fcee6)
|
||||
|
||||
* Update mozilla-ca-list.zeek and ct-list.zeek to NSS 3.109 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* CI: Update freebsd to 13.4 and 14.2 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.5-4 | 2025-03-18 16:17:54 -0700
|
||||
|
||||
* Add analyzer registration from VLAN to VNTAG (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit cb5e3d0054ac9acdc13606de35b79f0da06d6a21)
|
||||
|
||||
7.0.5-3 | 2025-03-18 16:16:08 -0700
|
||||
|
||||
* GH-2311: scan.l: Fix @load-plugin scripts loading (Arne Welzel, Corelight)
|
||||
|
||||
For a plugin loaded via @load-plugin, create a YY_BUFFER_STATE holding
|
||||
the required loads for the implicitly loaded files. In loaded scripts,
|
||||
this generated file will show up with a path of the shared object file
|
||||
of the plugin with the __preload__.zeek and __load__.zeek files loaded
|
||||
by it.
|
||||
|
||||
(cherry picked from commit a3a08fa0f3d78144d21176111457ab8bdd3c9a74)
|
||||
|
||||
* scan.l: Extract switch_to() from load_files() (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit a3a08fa0f3d78144d21176111457ab8bdd3c9a74)
|
||||
|
||||
* ScannedFile: Allow skipping canonicalization (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit a3a08fa0f3d78144d21176111457ab8bdd3c9a74)
|
||||
|
||||
7.0.5-2 | 2025-03-18 16:14:21 -0700
|
||||
|
||||
* QUIC/decrypt_crypto: Rename all_data to data (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 44304973fb4ea3ffc94f13feb8592952675202f1)
|
||||
|
||||
* GH-4201: QUIC: Confirm before forwarding data to SSL (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 44304973fb4ea3ffc94f13feb8592952675202f1)
|
||||
|
||||
* GH-4198: QUIC: Parse all QUIC packets in a UDP datagram (Arne Welzel, Corelight)
|
||||
|
||||
A UDP datagram may contain multiple QUIC packets, but the parser so far
|
||||
handled only the very first packet, ignoring any subsequent packets.
|
||||
|
||||
(cherry picked from commit 44304973fb4ea3ffc94f13feb8592952675202f1)
|
||||
|
||||
* QUIC: Only slurp till packet end, not till &eod (Arne Welzel, Corelight)
|
||||
|
||||
This doesn't change behavior, but avoids slurping in more data than
|
||||
needed. A UDP packet an contain multiple QUIC packets and we'd read
|
||||
all following ones instead just the one we're interested in.
|
||||
|
||||
(cherry picked from commit 44304973fb4ea3ffc94f13feb8592952675202f1)
|
||||
|
||||
7.0.5-1 | 2025-03-18 16:12:32 -0700
|
||||
|
||||
* fix for ZAM optimization of assigning a record field to result of "in" operation (Vern Paxson, Corelight)
|
||||
|
||||
(cherry picked from commit 991bc9644dd9b77f09fff992658f10500dce77e1)
|
||||
|
||||
7.0.5 | 2024-12-16 11:12:33 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.5 release (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.4-10 | 2024-12-16 10:21:46 -0700
|
||||
|
||||
* QUIC/decrypt_crypto: Actually check if decryption was successful (Arne Welzel, Corelight)
|
||||
|
||||
...and bail if it wasn't.
|
||||
|
||||
PCAP was produced using OSS-Fuzz input from issue 383379789.
|
||||
|
||||
* QUIC/decrypt_crypto: Limit payload_length to 10k (Arne Welzel, Corelight)
|
||||
|
||||
Given we dynamically allocate memory for decryption, employ a limit
|
||||
that is unlikely to be hit, but allows for large payloads produced
|
||||
by the fuzzer or jumbo frames.
|
||||
|
||||
* QUIC/decrypt_crypto: Fix decrypting into too small stack buffer (Arne Welzel, Corelight)
|
||||
|
||||
A QUIC initial packet larger than 1500 bytes could lead to crashes
|
||||
due to the usage of a fixed size stack buffer for decryption.
|
||||
|
||||
Allocate the necessary memory dynamically on the heap instead.
|
||||
|
||||
7.0.4-5 | 2024-12-13 12:25:43 -0700
|
||||
|
||||
* fix for memory management associated with ZAM table iteration (Vern Paxson, Corelight)
|
||||
|
||||
(cherry picked from commit 805e9db58840671c866c85461ad88198eeeec967)
|
||||
|
||||
7.0.4-4 | 2024-12-12 13:12:30 -0700
|
||||
|
||||
* Fix ZAM's implementation of Analyzer::name() BiF (Christian Kreibich, Corelight)
|
||||
|
||||
(cherry picked from commit e100a8e698d1dba0fc339eae800d13b298e55d46)
|
||||
|
||||
7.0.4-3 | 2024-12-12 13:04:44 -0700
|
||||
|
||||
* GH-4052: More robust memory management for ZAM execution - fixes #4052 (Vern Paxson, Corelight)
|
||||
|
||||
(cherry picked from commit c3b30b187e44de593d0ec685dc313e6aa10ff5e5)
|
||||
|
||||
7.0.4-2 | 2024-12-12 12:44:36 -0700
|
||||
|
||||
* Bump zeekjs to v0.14.0 (Arne Welzel, Corelight)
|
||||
79b0c21 version: 0.14.0
|
||||
b75b384 docker: Use Fedora 41 packages
|
||||
d65cbc8 Minor header cleanup
|
||||
4fd7e8b scripts: Remove __preload__.zeek and types.zeek
|
||||
93a8113 CreateEnvironment: Unset kOwnsProcessState and kOwnsInspector
|
||||
3b1f5d3 Instance::Init(): Replace parameters with Options struct
|
||||
|
||||
(cherry picked from commit aac640ebffdd58b1e25e42eee538b7c8669b1dd4)
|
||||
|
||||
* Fix documentation for `have_spicy` [skip CI] (Benjamin Bannier, Corelight)
|
||||
|
||||
The continued line was incorrectly marked up (as a plain comment instead
|
||||
of a Zeekygen comment) which caused only half of the docstring to be
|
||||
rendered by Zeekygen.
|
||||
|
||||
(cherry picked from commit 4a96d34af6fe68bad6afe12dffff6bd795e6b4b1)
|
||||
|
||||
7.0.4 | 2024-11-19 12:31:05 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.4 release (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.3-27 | 2024-11-19 11:19:00 -0700
|
||||
|
||||
* Bump zeekjs to 0.13.2 (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 6e916efe8d9470cdca7b4f4933c44b8c1cab66b0)
|
||||
|
||||
7.0.3-26 | 2024-11-19 10:56:06 -0700
|
||||
|
||||
* Merge remote-tracking branch 'origin/topic/timw/speed-up-zam-ci-testing' (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* origin/topic/timw/speed-up-zam-ci-testing:
|
||||
CI: Use test.sh script for running ZAM tests, but disable parts of it
|
||||
|
||||
(cherry picked from commit d9a74680e08553b34cb164847f61b0ea00ad1f5f)
|
||||
|
||||
7.0.3-24 | 2024-11-19 09:32:54 -0700
|
||||
|
||||
* account for spaces encoding to plus signs in sqli regex detection (Cooper Grill)
|
||||
|
||||
(cherry picked from commit 5200b84fb3ce6c19e9d114b3d8fb0d964639446d)
|
||||
|
||||
7.0.3-23 | 2024-11-14 19:06:56 -0700
|
||||
|
||||
* btest: Skip core.script-args under TSAN (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 159f40a4bff10e7000cb5f5c8a08c6d2b236baef)
|
||||
|
||||
* Disable core.expr-execption btest under ZAM to fix CI builds (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Fix ubsan warning with ZAM and mmdb btest (Tim Wojtulewicz)
|
||||
|
||||
7.0.3-19 | 2024-11-14 12:15:54 -0700
|
||||
|
||||
* ci: Add asan and ubsan sanitizer tasks for ZAM (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 8945b2b186e633f1f7c3a07f9e343da278c037ec)
|
||||
|
||||
7.0.3-18 | 2024-11-14 12:13:59 -0700
|
||||
|
||||
* policy/community-id: Populate conn$community_id in new_connection() (Arne Welzel, Corelight)
|
||||
|
||||
This wasn't possible before #3028 was fixed, but now it's safe to set
|
||||
the value in new_connection() and allow other users access to the
|
||||
field much earlier. We do not have to deal with connection_flipped()
|
||||
because the community-id hash is symmetric.
|
||||
|
||||
(cherry picked from commit d3579c1f34fa679ac05df6f1f2cfbe11fc77b328)
|
||||
|
||||
* Update zeekjs submodule to latest tagged version (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This picks up the changes to support Node.js v22.11.0.
|
||||
|
||||
7.0.3-16 | 2024-11-14 11:57:54 -0700
|
||||
|
||||
* GH-4006: Fix nullptr deref in Spicy accept/decline input (Evan Typanski, Corelight)
|
||||
|
||||
(cherry picked from commit 1d38c310719088ca0e9610ec8458fe4cf8e4a318)
|
||||
|
||||
7.0.3-15 | 2024-11-14 11:56:06 -0700
|
||||
|
||||
* Fix potential nullptr deref in Spicy plugin runtime (Benjamin Bannier, Corelight)
|
||||
|
||||
If we added a file but the other side of the connection had already run
|
||||
into a protocol violation and shut down we could previously have
|
||||
dereferenced a null cookie. This patch fixes the code so it now throws
|
||||
in such scenarios.
|
||||
|
||||
(cherry picked from commit 2e8d6e86e75bc7b0c7be67ab9c38738a1318f6ff)
|
||||
|
||||
* Assume no Spicy cookie in most places (Benjamin Bannier, Corelight)
|
||||
|
||||
We would previously assert that it was available which could have lead
|
||||
to aborts since when the analyzer for either side of a connection shuts
|
||||
down the connection cookie could get cleared and become nil. This patch
|
||||
reworks the code slightly so we now never assume it is available. We do
|
||||
this by either throwing or by making the whole operation requesting the
|
||||
cookie a noop.
|
||||
|
||||
(cherry picked from commit 2e8d6e86e75bc7b0c7be67ab9c38738a1318f6ff)
|
||||
|
||||
7.0.3-14 | 2024-11-14 11:52:34 -0700
|
||||
|
||||
* Fix up minor warnings in touched files (Evan Typanski, Corelight)
|
||||
|
||||
(cherry picked from commit 36af0591a6f2c7270c68deaee4c4d733fa4086b1)
|
||||
|
||||
* Fix Clang 19 deprecation failure (Evan Typanski, Corelight)
|
||||
|
||||
Clang 19 with libc++ started failing to compile because the default
|
||||
implementation of `std::char_traits` was removed, making uses of
|
||||
`std::char_traits<unsigned char>` invalid. This was more of used for
|
||||
convenience before, but it should be roughly the same behavior with
|
||||
`char`.
|
||||
|
||||
(cherry picked from commit 985f4f7c725ae1a9f85dbc112e5bc340a34a034b)
|
||||
|
||||
7.0.3-12 | 2024-11-14 11:33:09 -0700
|
||||
|
||||
* GH-3978: Bump zeekjs to 0.12.1 (Arne Welzel, Corelight)
|
||||
7ec0953 ci: Bump actions/upload-artifact
|
||||
8e5914d ci/docker: Bump distros, update some OBS repos
|
||||
129b737 Fix crash during shutdown
|
||||
|
||||
(cherry picked from commit d74b073852b748aca7793f557f0a5378cb30ca19)
|
||||
|
||||
7.0.3-11 | 2024-11-14 11:31:35 -0700
|
||||
|
||||
* GH-3962: Prevent non-Modbus on port 502 to be reported as Modbus (Emmanuele Zambon)
|
||||
|
||||
This commit prevents most non-Modbus TCP traffic on port 502 to be
|
||||
reported as Modbus in conn.log as well as in modbus.log.
|
||||
To do so, we have introduced two &enforce checks in the Modbus
|
||||
protocol definition that checks that some specific fields of the
|
||||
(supposedly) Modbus header are compatible with values specified in
|
||||
the specs.
|
||||
|
||||
(cherry picked from commit 4763282f36d13808b58948cc378a7df00201c9f5)
|
||||
|
||||
7.0.3-10 | 2024-11-14 11:30:00 -0700
|
||||
|
||||
* GH-3957: input/Raw: Rework GetLine() (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 2a23e9fc1962419e41133689c2a682455d24e35e)
|
||||
|
||||
* GH-215: POP3: Rework unbounded pending command fix (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 2a23e9fc1962419e41133689c2a682455d24e35e)
|
||||
|
||||
7.0.3-9 | 2024-11-14 10:21:55 -0700
|
||||
|
||||
* import of GH-4022 BTest additions (Vern Paxson, Corelight)
|
||||
ZAM baseline update
|
||||
|
||||
* fix for setting object locations to avoid use-after-free situation (Vern Paxson, Corelight)
|
||||
|
||||
* fixes for script optimization of coerce-to-any expressions (Vern Paxson, Corelight)
|
||||
|
||||
* porting of GH-4022 (Vern Paxson, Corelight)
|
||||
|
||||
* porting of GH-4016 (Vern Paxson, Corelight)
|
||||
|
||||
* porting of GH-4013 (Vern Paxson, Corelight)
|
||||
|
||||
* fixed access to uninitialized memory in ZAM's "cat" built-in (Vern Paxson, Corelight)
|
||||
|
||||
7.0.3-1 | 2024-10-18 17:15:02 -0700
|
||||
|
||||
* Merge remote-tracking branch 'origin/topic/bbannier/fix-docs-ci-again' (Christian Kreibich, Corelight)
|
||||
|
||||
* origin/topic/bbannier/fix-docs-ci-again:
|
||||
Fix installation of Python packages in generate docs CI job again
|
||||
|
||||
(cherry picked from commit c28442a9a178b735e3fe1b5f5938f922a5aa7a66)
|
||||
|
||||
7.0.3 | 2024-10-04 15:42:14 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.3 release (Christian Kreibich, Corelight)
|
||||
|
||||
7.0.2-5 | 2024-10-04 10:46:01 -0700
|
||||
|
||||
* Merge remote-tracking branch 'security/topic/awelzel/215-pop3-mail-null-deref' (Christian Kreibich, Corelight)
|
||||
|
||||
* security/topic/awelzel/215-pop3-mail-null-deref:
|
||||
POP3: Rework unbounded pending command fix
|
||||
|
||||
(cherry picked from commit 7fea32c6edc5d4d14646366f87c9208c8c9cf555)
|
||||
|
||||
7.0.2-4 | 2024-10-04 10:28:13 -0700
|
||||
|
||||
* Update docs submodule [nomail] [skip ci] (Christian Kreibich, Corelight)
|
||||
|
||||
7.0.2-3 | 2024-10-04 09:54:48 -0700
|
||||
|
||||
* Bump auxil/spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
* Install procps in OpenSuse Leap images (Benjamin Bannier, Corelight)
|
||||
|
||||
(cherry picked from commit a27066e3fc58d70401359887fcf34bd0bb83d433)
|
||||
|
||||
7.0.2 | 2024-09-23 12:01:55 -0700
|
||||
|
||||
* pop3: Remove unused headers (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 702fb031a4ea2b00364d6a7321384a45551ce3a2)
|
||||
|
||||
* pop3: Prevent unbounded state growth (Arne Welzel, Corelight)
|
||||
|
||||
The cmds list may grow unbounded due to the POP3 analyzer being in
|
||||
multiLine mode after seeing `AUTH` in a Redis connection, but never
|
||||
a `.` terminator. This can easily be provoked by the Redis ping
|
||||
command.
|
||||
|
||||
This adds two heuristics: 1) Forcefully process the oldest commands in
|
||||
the cmds list and cap it at max_pending_commands. 2) Start raising
|
||||
analyzer violations if the client has been using more than
|
||||
max_unknown_client_commands commands (default 10).
|
||||
|
||||
(cherry picked from commit 702fb031a4ea2b00364d6a7321384a45551ce3a2)
|
||||
|
||||
* btest/pop3: Add somewhat more elaborate testing (Arne Welzel, Corelight)
|
||||
|
||||
PCAP taken from here: https://tranalyzer.com/tutorial/pop and reference
|
||||
added to Traces/README.
|
||||
|
||||
(cherry picked from commit 702fb031a4ea2b00364d6a7321384a45551ce3a2)
|
||||
|
||||
7.0.1-6 | 2024-09-23 10:01:55 -0700
|
||||
|
||||
* Remove core.negative-time btest (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This test is failing with libpcap libpcap 1.10.5; for more information
|
||||
see https://github.com/zeek/zeek/issues/3921
|
||||
|
||||
(cherry picked from commit 899f7297d7b8c83ef2640e7cf40d3f230e42cc6b)
|
||||
|
||||
* Update broker submodule (Arne Welzel, Corelight)
|
||||
|
||||
...to pin prometheus to 1.2.4
|
||||
|
||||
(cherry picked from commit f24bc1ee882b3b266ca9dc325a1b5584addb155e)
|
||||
|
||||
* telemetry: Move callbacks to Zeek (Arne Welzel, Corelight)
|
||||
|
||||
Now that we run callbacks on the main loop, we can move callback support
|
||||
for Counter and Gauge instances directly into Zeek and don't need to patch
|
||||
prometheus-cpp anymore.
|
||||
|
||||
(cherry picked from commit f24bc1ee882b3b266ca9dc325a1b5584addb155e)
|
||||
|
||||
* auxil/prometheus-cpp: Pin to 1.2.4 (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit f24bc1ee882b3b266ca9dc325a1b5584addb155e)
|
||||
|
||||
* btest/ldap: Add regression test for #3919 (Arne Welzel, Corelight)
|
||||
|
||||
This works as expected in master, it's just that we forgot to backport
|
||||
PR #3845 to 7.0.1. Add the PCAP from Martin anyhow.
|
||||
|
||||
(cherry picked from commit a339cfa4c007762c6fbb16a35576220851fe4a82)
|
||||
|
||||
* GH-3853: ldap: Recognize SASL+SPNEGO+NTLMSSP (Arne Welzel, Corelight)
|
||||
|
||||
The ctu-sme-11-win7ad-1-ldap-tcp-50041.pcap file was harvested
|
||||
from the CTU-SME-11 (Experiment-VM-Microsoft-Windows7AD-1) dataset
|
||||
at https://zenodo.org/records/7958259 (DOI 10.5281/zenodo.7958258).
|
||||
|
||||
(cherry picked from commit 152bbbd680e6c329255dc28d57cd506e7d2c09ff)
|
||||
|
||||
* Bump auxil/spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
7.0.1 | 2024-09-03 13:04:23 -0700
|
||||
|
||||
* Update CHANGES, VERSION, and NEWS for 7.0.1 release (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update zeek-aux submodule to pick up zeek-archiver permissions fix (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.0-14 | 2024-09-03 09:02:19 -0700
|
||||
|
||||
* Bump auxil/spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
7.0.0-11 | 2024-08-30 12:38:59 -0700
|
||||
|
||||
* Spicy: Register well-known ports through an event handler. (Robin Sommer, Corelight)
|
||||
|
||||
This avoids the earlier problem of not tracking ports correctly in
|
||||
scriptland, while still supporting `port` in EVT files and `%port` in
|
||||
Spicy files.
|
||||
|
||||
As it turns out we are already following the same approach for file
|
||||
analyzers' MIME types, so I'm applying the same pattern: it's one
|
||||
event per port, without further customization points. That leaves the
|
||||
patch pretty small after all while fixing the original issue.
|
||||
|
||||
(cherry picked from commit a2079bcda6e40180b888240a281c12cc0ca735be)
|
||||
|
||||
* Revert "Remove deprecated port/ports fields for spicy analyzers" (Robin Sommer, Corelight)
|
||||
|
||||
This reverts commit 15d404dd191a723960e4efd956eec22739d3f1c2.
|
||||
|
||||
(cherry picked from commit a2079bcda6e40180b888240a281c12cc0ca735be)
|
||||
|
||||
7.0.0-9 | 2024-08-30 11:47:39 -0700
|
||||
|
||||
* ldap: Promote uint8 to uint64 before shifting (Arne Welzel, Corelight)
|
||||
|
||||
(cherry picked from commit 97fa7cdc0a49869ee6605fac9cfc15f11d8c855b)
|
||||
|
||||
* ldap: Add heuristic for wrap tokens (Arne Welzel, Corelight)
|
||||
|
||||
Instead of dissecting the GSSAPI handshake, add another heuristic
|
||||
into MaybeEncrypted to check for the WRAP token identifier.
|
||||
|
||||
After this change, the pcap on the following ticket is processed
|
||||
nicely: https://gitlab.com/wireshark/migration-test/-/issues/9398
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Ignore ec/rrc for sealed wrap tokens (Arne Welzel, Corelight)
|
||||
|
||||
It shouldn't matter for the encrypted payload that we'll
|
||||
just consume and ignore.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Add LDAP sample with SASL-SRP mechanism (Arne Welzel, Corelight)
|
||||
|
||||
This is what @dopheide-esnet actually saw. Produced with a custom
|
||||
cyrus-sasl and openldap build :-(
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Reintroduce encryption after SASL heuristic (Arne Welzel, Corelight)
|
||||
|
||||
@dopheide-esnet provided sample captures where SASL SRP is used as
|
||||
a SASL mechanism and the follow-up LDAP messages are encrypted. It's
|
||||
not clear how to determine whether encryption will or will not happen,
|
||||
so re-add a heuristic to determine this based on the first byte of
|
||||
the first message *after* the successful bindResponse handshake. If
|
||||
that byte is 0x30, assume cleartext.
|
||||
|
||||
I haven't been able to produce such pcaps, unfortunately, but the
|
||||
cleartext path is tested via the existing sasl-ntlm.pcap.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Fix assuming GSS-SPNEGO for all bindResponses (Arne Welzel, Corelight)
|
||||
|
||||
In retrospect that's an obvious bug.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Implement extended request/response and StartTLS support (Arne Welzel, Corelight)
|
||||
|
||||
PCAP was produced with a local OpenLDAP server configured to support StartTLS.
|
||||
|
||||
This puts the Zeek calls into a separate ldap_zeek.spicy file/module
|
||||
to separate it from LDAP.
|
||||
|
||||
(cherry picked from commit 6a6a5c3d0d60a1d4d32ba2173c035023c29fbf1d)
|
||||
|
||||
* ldap: Remove MessageWrapper with magic 0x30 searching (Arne Welzel, Corelight)
|
||||
|
||||
This unit implements a heuristic to search for the 0x30 sequence
|
||||
byte if Message couldn't readily be parsed. Remove it with the
|
||||
idea of explicit and predictable support for SASL mechanisms.
|
||||
|
||||
(cherry picked from commit 2ea3a651bd83b0dfa15924417e4667241531b57b)
|
||||
|
||||
* ldap: Harden parsing a bit (Arne Welzel, Corelight)
|
||||
|
||||
ASN1Message(True) may go off parsing arbitrary input data as
|
||||
"something ASN.1" This could be GBs of octet strings or just very
|
||||
long sequences. Avoid this by open-coding some top-level types expected.
|
||||
|
||||
This also tries to avoid some of the &parse-from usages that result
|
||||
in unnecessary copies of data.
|
||||
|
||||
Adds a locally generated PCAP with addRequest/addResponse that we
|
||||
don't currently handle.
|
||||
|
||||
(cherry picked from commit 2ea3a651bd83b0dfa15924417e4667241531b57b)
|
||||
|
||||
* ldap: Handle integrity-only KRB wrap tokens (Arne Welzel, Corelight)
|
||||
|
||||
Mostly staring at the PCAPs and opened a few RFCs. For now, only if the
|
||||
MS_KRB5 OID is used and accepted in a bind response, start stripping
|
||||
KRB5 wrap tokens for both, client and server traffic.
|
||||
|
||||
Would probably be nice to forward the GSS-API data to the analyzer...
|
||||
|
||||
(cherry picked from commit 2ea3a651bd83b0dfa15924417e4667241531b57b)
|
||||
|
||||
* http: fix password capture when enabled (Pierre Lalet)
|
||||
|
||||
The current implementation would only log, if the password contains a
|
||||
colon, the part before the first colon (e.g., the password
|
||||
`password:password` would be logged as `password`).
|
||||
|
||||
(cherry picked from commit c27e18631c5d9c6f04c230bd421c9750a1f02342)
|
||||
|
||||
* Analyzer: Do not add child analyzers when finished (Arne Welzel, Corelight)
|
||||
|
||||
Depending on an analyzer's implementation, its Done() method may
|
||||
attempt to access analyzer or connection state when executing.
|
||||
When this happens in the destructor of the parent analyzer during
|
||||
the process of destructing a connection, this state may have been
|
||||
deleted, resulting in use-after-free crashes or worse memory
|
||||
corruption.
|
||||
|
||||
The following cases have been observed in the wild for when this happens.
|
||||
|
||||
* PIA matching during Done() for undelivered TCP data enables a Spicy
|
||||
based analyzer which in turn attempts to raise an analyzer violation
|
||||
during Done()->EndOfData().
|
||||
|
||||
* Spicy analyzers attaching new analyzers during their Done() processing
|
||||
which in turn attempt to use TCP() (to call FindChild()) during Done()
|
||||
while the analyzer tree / connection is being destructed.
|
||||
|
||||
The second scenario was previously found to happen in the HTTP analyzer
|
||||
and fixed with 6ef9423f3cff13e6c73f97eb6a3a27d6f64cc320.
|
||||
|
||||
Plug these scenarios by short-circuiting AddChildAnalyzer() if the analyzer
|
||||
or connection have finished or are being finished.
|
||||
|
||||
(cherry picked from commit 45b33bf5c17d5e8cf6c777a9bd57e4a803dfad19)
|
||||
|
||||
* TCP_Reassembler: Fix IsOrig() position in Match() call (Arne Welzel, Corelight)
|
||||
|
||||
Found during a debug session with @rsmmr. Undelivered TCP data
|
||||
would only be matched for the responder and eol set to IsOrig().
|
||||
|
||||
(cherry picked from commit 4a4cbf25765f387f0aa20277afd133918292b9c4)
|
||||
|
||||
* Process metric callbacks from the main-loop thread (Tim Wojtulewicz, Corelight)
|
||||
|
||||
This avoids the callbacks from being processed on the worker thread
|
||||
spawned by Civetweb. It fixes data race issues with lookups involving
|
||||
global variables, amongst other threading issues.
|
||||
|
||||
(cherry picked from commit 3c3853dc7da9aad94a9b2d5a143cc7bd9476ea7a)
|
||||
|
||||
* CI: Use 16GB of memory for FreeBSD builds (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit 9d9cc51e9dd93668cd332aa1aef283c9dc23a677)
|
||||
|
||||
7.0.0 | 2024-07-31 09:37:03 -0700
|
||||
|
||||
* Release 7.0.0.
|
||||
|
||||
7.0.0-rc4.4 | 2024-07-31 09:36:51 -0700
|
||||
|
||||
* Allowlist a name for typos check (Benjamin Bannier, Corelight)
|
||||
|
||||
* Bump Spicy to latest release (Benjamin Bannier, Corelight)
|
||||
|
||||
7.0.0-rc4 | 2024-07-26 10:12:34 -0700
|
||||
|
||||
* Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight)
|
||||
|
||||
This in particular pulls in a fix for zeek/spicy#1808.
|
||||
|
||||
(cherry picked from commit 4c0c7581c835b4dcd5339a4b34c2b82fcfc40dc3)
|
||||
|
||||
7.0.0-rc3 | 2024-07-25 10:52:29 -0700
|
||||
|
||||
* Generate docs for 7.0.0-rc3 (Tim Wojtulewicz)
|
||||
|
||||
* Bump zeek-testing-cluster to reflect deprecation of prometheus.zeek (Christian Kreibich, Corelight)
|
||||
|
||||
(cherry picked from commit 146cf99ff62d729705c155b44199a674911ade09)
|
||||
|
||||
* Update 7.0 NEWS with blurb about multi-PDU parsing causing increased load [nomail] [skip ci] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit bd208f4c54f66074315479071c810d792e69f96b)
|
||||
|
||||
* Fix handling of zero-length SMB2 error responses (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit bd208f4c54f66074315479071c810d792e69f96b)
|
||||
|
||||
* Update Mozilla CA list and CT list (Johanna Amann, Corelight)
|
||||
|
||||
(cherry picked from commit cb88f6316c7341da7a2af397932a145be3a0cc29)
|
||||
|
||||
* Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight)
|
||||
|
||||
(cherry picked from commit da7c3d91385195a7a4ba957e46743bc52a9d4ecb)
|
||||
|
||||
7.0.0-rc2.7 | 2024-07-24 17:00:51 -0700
|
||||
|
||||
* Add contributors to 7.0.0 NEWS entry (Christian Kreibich, Corelight)
|
||||
|
||||
* telemetry: Deprecate prometheus.zeek policy script (Arne Welzel, Corelight)
|
||||
|
||||
* Update broker submodule [nomail] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.0-rc2 | 2024-07-18 14:31:49 -0700
|
||||
|
||||
* Bump zeek-testing-cluster to pull in tee SIGPIPE fix (Christian Kreibich, Corelight)
|
||||
|
||||
(cherry picked from commit b51a46f94d4012119fd27d5e46328c70af7270a2)
|
||||
|
||||
* CI: Set FETCH_CONTENT_FULLY_DISCONNECTED flag for configure (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update broker and cmake submodules [nomail] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Fix warning about grealpath when running 'make dist' on Linux (Tim Wojtulewicz, Corelight)
|
||||
|
||||
(cherry picked from commit e4716b6c912f86cf6b2afd6979c38667c45add95)
|
||||
|
||||
7.0.0-rc1 | 2024-07-11 12:21:02 -0700
|
||||
|
||||
* Updating submodule(s) [nomail] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.0.0-dev.467 | 2024-07-11 12:14:52 -0700
|
||||
|
||||
* Update the scripts.base.frameworks.telemetry.internal-metrics test (Christian Kreibich, Corelight)
|
||||
|
|
|
@ -18,6 +18,22 @@ if (WIN32)
|
|||
set(CMAKE_TOOLCHAIN_FILE ${_toolchain} CACHE STRING "Vcpkg toolchain file")
|
||||
endif ()
|
||||
|
||||
if (APPLE AND CMAKE_VERSION VERSION_GREATER_EQUAL 4.0.0 AND NOT CMAKE_OSX_SYSROOT)
|
||||
# Spicy needs having CMAKE_OSX_SYSROOT point to the macOS SDK
|
||||
# path, but starting with CMake 4.0 CMAKE_OSX_SYSROOT is not set
|
||||
# automatically anymore. So we follow the guidance from the CMake 4.0
|
||||
# release notes here:
|
||||
#
|
||||
# Builds targeting macOS no longer choose any SDK or pass an "-isysroot"
|
||||
# flag to the compiler by default. [...] users must now specify
|
||||
# "-DCMAKE_OSX_SYSROOT=macosx" when configuring their build.
|
||||
#
|
||||
# Note that this needs to happen before the project() call below, meaning
|
||||
# we cannot rely on the corresponding code inside the Spicy CMake
|
||||
# configuration.
|
||||
set(CMAKE_OSX_SYSROOT "macosx")
|
||||
endif ()
|
||||
|
||||
project(Zeek C CXX)
|
||||
|
||||
# We want to set ENABLE_DEBUG to ON by default if the build type is Debug.
|
||||
|
@ -784,9 +800,6 @@ find_package(FLEX REQUIRED)
|
|||
find_package(BISON 2.5 REQUIRED)
|
||||
find_package(PCAP REQUIRED)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
if (NOT MSVC)
|
||||
find_package(BIND REQUIRED)
|
||||
endif ()
|
||||
find_package(ZLIB REQUIRED)
|
||||
|
||||
if (NOT BINARY_PACKAGING_MODE)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -9,7 +9,7 @@ BUILD=build
|
|||
REPO=$$(cd $(CURDIR) && basename $$(git config --get remote.origin.url | sed 's/^[^:]*://g'))
|
||||
VERSION_FULL=$(REPO)-$$(cd $(CURDIR) && cat VERSION)
|
||||
GITDIR=$$(test -f .git && echo $$(cut -d" " -f2 .git) || echo .git)
|
||||
REALPATH=$$($$(realpath --relative-to=$(pwd) . >/dev/null 2>&1) && echo 'realpath' || echo 'grealpath')
|
||||
REALPATH=$$($$(realpath --relative-to=$(shell pwd) . >/dev/null 2>&1) && echo 'realpath' || echo 'grealpath')
|
||||
|
||||
all: configured
|
||||
$(MAKE) -C $(BUILD) $@
|
||||
|
|
214
NEWS
214
NEWS
|
@ -3,9 +3,213 @@ This document summarizes the most important changes in the current Zeek
|
|||
release. For an exhaustive list of changes, see the ``CHANGES`` file
|
||||
(note that submodules, such as Broker, come with their own ``CHANGES``.)
|
||||
|
||||
Zeek 7.0.9
|
||||
==========
|
||||
|
||||
We would like to thank @cccs-jsjm, @edoardomich, and the Canadian Cyber Defence Collective
|
||||
for their contributions to this release.
|
||||
|
||||
This release fixes the following security issue:
|
||||
|
||||
- Very large log records can cause Zeek to overflow memory and potentially crash. Due to
|
||||
the possibility of building these log records with packets from remote hosts, this is a
|
||||
DoS risk. The fix adds additional length checking when serializing log data for writing
|
||||
to logging streams. This can be controlled via a new ``Log::max_log_record_size``
|
||||
redefinable constant, and reports a new ``log_record_too_large`` weird if the limitation
|
||||
is reached for any individual log entry. There is an also a new
|
||||
``log-writer-discarded-writes`` metric that tracks when this limitation is
|
||||
reached.
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- The response to BDAT ``LAST`` was never recognized by the SMTP analyzer, resulting in
|
||||
the BDAT ``LAST`` commands not being logged in a timely fashion and receiving the wrong
|
||||
status. Zeek now correctly reports these commands.
|
||||
|
||||
- The Docker images for zeek 7.0 releases now include the ``net-tools`` (for ``iproute2``)
|
||||
package to silience a warning from ``zeekctl``. They also now include the ``procps``
|
||||
package (for ``top``) to ensure the ``zeekctl`` top command works correctly.
|
||||
|
||||
- The ZeekJS submodule was updated to v0.18.0. This version fixes a compilation error with
|
||||
debug builds and GCC 15.1, as well as adding future support for Node v24.
|
||||
|
||||
Zeek 7.0.8
|
||||
==========
|
||||
|
||||
We would like to thank Ivan Navi (@i2z1) and Seth Grover (@mmguero) for their contributions
|
||||
to this release.
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- GCC 15.1 failed to build both Zeek and the integrated Paraglob library.
|
||||
|
||||
- The requirement for the BIND library to be present for the build was removed. This
|
||||
library is not needed since we migrated to C-Ares for DNS lookups.
|
||||
|
||||
- The Spicy submodule was updated to v1.11.5. This version brings a fix for a compilation
|
||||
failure when iterating over a byte value.
|
||||
|
||||
Zeek 7.0.7
|
||||
==========
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- The LDAP analyzer now handles GSSAPI-signed traffic correctly if the response token is
|
||||
missing. This fixes some cases of handling of traffic authenticated by GSS-SPNEGO.
|
||||
|
||||
- The QUIC analyzer gained the following fixes:
|
||||
|
||||
- Traffic where ClientHello/ServerHello messages are fragmented over multiple packets
|
||||
should now be parsed correctly.
|
||||
|
||||
- The wrong connection ID could be used for decryption if the client started using a
|
||||
different one later in the INITIAL packet.
|
||||
|
||||
- ACK ranges should now be parsed correctly.
|
||||
|
||||
- Parsing of INITIAL packets could consume the remainder of a UDP datagram, even if
|
||||
there were possibly other PACKET types in that datagram.
|
||||
|
||||
Zeek 7.0.6
|
||||
==========
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- ZAM now handles assignment of record fields correctly when assigning to the result of an
|
||||
``in`` operation.
|
||||
|
||||
- The QUIC analyzer was not handling all of the packets in a multi-packet UDP datagram,
|
||||
which lead it to miss data and have incomplete logging. This has been resolved.
|
||||
|
||||
- The QUIC analyzer was not confirming the protocol early enough, which caused the
|
||||
``service`` field for a connection to possible have a strange ordering (possibly
|
||||
confirmingg ssl before quic, for example). The QUIC analyzer now confirms the protocol
|
||||
before forwarding to the next analyzer.
|
||||
|
||||
- The ``@load-plugin`` script directive had some deficiencies when it came to loading
|
||||
scripts generated by BIF code, which could potentially lead to errors with plugins using
|
||||
those directives.
|
||||
|
||||
- VLAN packets containing VNTAG headers were not being handled correctly. Zeek was already
|
||||
handling VNTAG -> VLAN, but not the other way around.
|
||||
|
||||
- The ZeekJS submodule was upgraded to v0.16.0, which includes fixes for the following:
|
||||
|
||||
- Signal handling in Zeek was broken when using ZeekJS because we weren't properly
|
||||
keeping ZeekJS alive internally. A new internal timer was added that keeps the ZeekJS
|
||||
processing loop running.
|
||||
|
||||
- The ZeekJS javascript execution is now offloaded to a dedicated thread to avoid memory
|
||||
contention issues that were preventing a number of Zeek events to fail to execute
|
||||
properly.
|
||||
|
||||
- The mozilla-ca-list.zeek and ct-list.zeek files were updated to be based on NSS 3.109.
|
||||
|
||||
Zeek 7.0.5
|
||||
==========
|
||||
|
||||
This release fixes the following security issues:
|
||||
|
||||
- Large QUIC packets can cause Zeek to overflow memory and potentially
|
||||
crash. Due to the possibility of receiving these packets from remote hosts,
|
||||
this is a DoS risk. The fix included limits the payload length to 10000 bytes
|
||||
and reports an error for those cases, as well as fixing the memory allocation
|
||||
to not use a fixed-size buffer for all packets.
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- The ZAM script optimization feature gained some fixes for some minor memory
|
||||
leaks.
|
||||
|
||||
- The ZeekJS submodule was updated to v0.14.0. In certain environment, ZeekJS
|
||||
would fail to start a debugging thread due to limited stack size, producing
|
||||
spurious log messages. This was fixed by not starting the debugging thread by
|
||||
default.
|
||||
|
||||
Zeek 7.0.4
|
||||
==========
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- The community-id-logging.zeek policy script was used to set
|
||||
``c$conn$community_id`` during ``new_connection()`` rather than
|
||||
``connection_state_remove()``, allowing other scripts to reuse its value
|
||||
early.
|
||||
|
||||
- The input framework will no longer get stuck and use 100% of the CPU when
|
||||
encountering lines not immediately terminated by a new line.
|
||||
|
||||
- The Modbus analyzer added some additional protocol checks and should no longer
|
||||
over-match on traffic that's not specifically on port 502.
|
||||
|
||||
- ZeekJS was updated to version v0.13.2, which brings support for newer versions
|
||||
of Node.js and a fix for a segfault when running under Alpine.
|
||||
|
||||
- A minor bug was fixed in the detect-sqli policy script to handle spaces being
|
||||
encoded as plus signs.
|
||||
|
||||
Zeek 7.0.3
|
||||
==========
|
||||
|
||||
This release fixes the following security issue:
|
||||
|
||||
- Adding to the POP3 hardening in 7.0.2, the parser now simply discards too many
|
||||
pending commands, rather than any attempting to process them. Further, invalid
|
||||
server responses do not result in command completion anymore. Processing
|
||||
out-of-order commands or finishing commands based on invalid server responses
|
||||
could result in inconsistent analyzer state, potentially triggering null
|
||||
pointer references for crafted traffic.
|
||||
|
||||
This release ships with Spicy 1.11.3, a bugfix release. Please refer to its
|
||||
release notes for details.
|
||||
|
||||
Zeek 7.0.2
|
||||
==========
|
||||
|
||||
This release fixes the following security issues:
|
||||
|
||||
- The POP3 parser has been hardened to avoid unbounded state growth in the
|
||||
face of one-sided traffic capture or when enabled for non-POP3 traffic.
|
||||
Concretely, the Redis protocol's AUTH mechanism enables the POP3 analyzer
|
||||
for such connections through DPD.
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- Support for SASL+SPNEGO+NTLMSSP was added to the LDAP analyzer.
|
||||
|
||||
- Telemetry callbacks are now handled via Zeek instead of depending on the
|
||||
prometehus-cpp library to handle them.
|
||||
|
||||
Zeek 7.0.1
|
||||
==========
|
||||
|
||||
This release fixes the following bugs:
|
||||
|
||||
- HTTP passwords with colon characters in them are now correctly logged.
|
||||
|
||||
- The LDAP analyzer now supports handling of non-sealed GSS-API WRAP tokens.
|
||||
|
||||
- Heuristics for parsing SASL encrypted and signed LDAP traffic have been made
|
||||
more strict and predictable. Please provide input if this results in less
|
||||
visibility in your environment.
|
||||
|
||||
- StartTLS support was added to the LDAP analyzer. The SSL analyzer is enabled
|
||||
for connections where client and server negotiate to TLS through the extended
|
||||
request/response mechanism.
|
||||
|
||||
- Specify less-strict permissions for directories and files created by
|
||||
zeek-archiver to play more nicely with user's umask setting.
|
||||
|
||||
Zeek 7.0.0
|
||||
==========
|
||||
|
||||
We would like to thank the following people for their contributions to this
|
||||
release: Christopher Knill (cknill), Jan Grashöfer (J-Gras), Martin van
|
||||
Hensbergen (mvhensbergen), Matti Bispham (mbispham), Mike Dopheide
|
||||
(dopheide-esnet), Oleksandr Pastushkov (opastushkov), Peter Cullen (pbcullen),
|
||||
Steve Smoot (stevesmoot), Tanner Kvarfordt (Kardbord), Victor Dvornikov
|
||||
(lydiym).
|
||||
|
||||
Breaking Changes
|
||||
----------------
|
||||
|
||||
|
@ -20,7 +224,7 @@ Breaking Changes
|
|||
|
||||
All of the metrics-related script-level options, type, and methods have been
|
||||
moved to the Telemetry framework:
|
||||
* Option ``Broker::metrics_port` is now ``Telemetry::metrics_port``
|
||||
* Option ``Broker::metrics_port`` is now ``Telemetry::metrics_port``
|
||||
* Option ``Broker::metrics_export_endpoint_name`` is now ``Telemetry::metrics_endpoint_name``
|
||||
|
||||
The following options have been removed:
|
||||
|
@ -85,7 +289,8 @@ New Functionality
|
|||
environment variable configures the addition.
|
||||
|
||||
- SMB2 packets containing multiple PDUs now correctly parse all of the headers,
|
||||
instead of just the first one and ignoring the rest.
|
||||
instead of just the first one and ignoring the rest. This may cause increased
|
||||
CPU load on SMB2-heavy networks.
|
||||
|
||||
- The new built-in function ``lookup_connection_analyzer_id()`` retrieves the
|
||||
numeric identifier of an analyzer associated with a connection. This enables
|
||||
|
@ -167,6 +372,11 @@ Deprecated Functionality
|
|||
- The ``--disable-archiver`` configure flag no longer does anything and will be
|
||||
removed in 7.1. zeek-archiver has moved into the zeek-aux repository.
|
||||
|
||||
- The policy/frameworks/telemetry/prometheus.zeek script has been deprecated
|
||||
and will be removed with Zeek 7.1. Setting the ``metrics_port`` field on a
|
||||
``Cluster::Node`` implies listening on that port and exposing telemetry
|
||||
in Prometheus format.
|
||||
|
||||
Zeek 6.2.0
|
||||
==========
|
||||
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
7.0.0-dev.467
|
||||
7.0.9
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit fada26ae504981f7f5524bf2a5c82ae49acd556d
|
||||
Subproject commit a80bf420aa6f55b4eb959ae89c184522a096a119
|
|
@ -1 +1 @@
|
|||
Subproject commit 45ce017874aac9ffabac0ddc4d016f1747804234
|
||||
Subproject commit 4505c4323283b56ea59935210e105da26ab7bb0b
|
|
@ -1 +1 @@
|
|||
Subproject commit 2fec7205d1a9cb4829b86c943d599696d53de85c
|
||||
Subproject commit ad99e21f4706193670c42b36c9824dc997f4c475
|
|
@ -1 +1 @@
|
|||
Subproject commit 6581b1855a5ea8cc102c66b4ac6a431fc67484a0
|
||||
Subproject commit 597ec897fb13f9995e87c8748486f359558415de
|
|
@ -1 +1 @@
|
|||
Subproject commit 8a66cd60fb29a1237b5070854cb194f43a3f7a30
|
||||
Subproject commit e850412ab5dea10ee2ebb98e42527d80fcf9a7ed
|
|
@ -1 +1 @@
|
|||
Subproject commit 39c0ee1e1742bb28dff57632ee4620f905b892e7
|
||||
Subproject commit 9419b9a4242a4dc3860511d827f395971ae58ca0
|
|
@ -1 +1 @@
|
|||
Subproject commit 230f53c1596ee88289e96397f0810ca60ed897e3
|
||||
Subproject commit 1b7071e294fde14230c5908a2f0b05228d9d695c
|
|
@ -1,14 +1,16 @@
|
|||
FROM fedora:40
|
||||
FROM fedora:41
|
||||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20240617
|
||||
ENV DOCKERFILE_VERSION 20250203
|
||||
|
||||
RUN dnf -y install \
|
||||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-devel \
|
||||
diffutils \
|
||||
findutils \
|
||||
flex \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
|
@ -20,12 +22,14 @@ RUN dnf -y install \
|
|||
openssl \
|
||||
openssl-devel \
|
||||
procps-ng \
|
||||
python3 \
|
||||
python3-devel \
|
||||
python3-pip\
|
||||
sqlite \
|
||||
swig \
|
||||
which \
|
||||
zlib-devel \
|
||||
crypto-policies-scripts \
|
||||
&& dnf clean all && rm -rf /var/cache/dnf
|
||||
|
||||
RUN pip3 install websockets junit2html
|
|
@ -1,8 +1,8 @@
|
|||
FROM fedora:39
|
||||
FROM fedora:42
|
||||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20231208
|
||||
ENV DOCKERFILE_VERSION 20250508
|
||||
|
||||
RUN dnf -y install \
|
||||
bison \
|
||||
|
@ -11,6 +11,7 @@ RUN dnf -y install \
|
|||
diffutils \
|
||||
findutils \
|
||||
flex \
|
||||
gawk \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
git \
|
||||
|
@ -28,6 +29,7 @@ RUN dnf -y install \
|
|||
swig \
|
||||
which \
|
||||
zlib-devel \
|
||||
crypto-policies-scripts \
|
||||
&& dnf clean all && rm -rf /var/cache/dnf
|
||||
|
||||
RUN pip3 install websockets junit2html
|
|
@ -51,9 +51,9 @@ if [[ -n "${CIRRUS_CI}" ]] && [[ "${CIRRUS_REPO_OWNER}" == "zeek" ]] && [[ ! -d
|
|||
|
||||
banner "Trying to clone zeek-testing-private git repo"
|
||||
echo "${ZEEK_TESTING_PRIVATE_SSH_KEY}" >cirrus_key.b64
|
||||
if [ "${CIRRUS_TASK_NAME}" == "macos_ventura" -o "${CIRRUS_TASK_NAME}" == "macos_sonoma" ]; then
|
||||
# The base64 command provided with macOS Ventura/Sonoma requires an argument
|
||||
# to pass the input filename
|
||||
if [[ "${CIRRUS_TASK_NAME}" =~ ^macos_ ]]; then
|
||||
# The base64 command provided with macOS requires an argument
|
||||
# to pass the input filename, while -i elsewhere is "ignore garbage".
|
||||
base64 -d -i cirrus_key.b64 >cirrus_key
|
||||
else
|
||||
base64 -d cirrus_key.b64 >cirrus_key
|
||||
|
|
|
@ -7,10 +7,9 @@ set -x
|
|||
|
||||
brew update
|
||||
brew upgrade cmake
|
||||
brew install openssl@3 swig bison flex ccache libmaxminddb
|
||||
brew install openssl@3 python@3 swig bison flex ccache libmaxminddb
|
||||
|
||||
if [ $(sw_vers -productVersion | cut -d '.' -f 1) -lt 14 ]; then
|
||||
python3 -m pip install --upgrade pip
|
||||
fi
|
||||
which python3
|
||||
python3 --version
|
||||
|
||||
python3 -m pip install --user --break-system-packages websockets
|
||||
|
|
|
@ -21,6 +21,7 @@ RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.5
|
|||
libpcap-devel \
|
||||
make \
|
||||
openssh \
|
||||
procps \
|
||||
python311 \
|
||||
python311-devel \
|
||||
python311-pip \
|
||||
|
|
|
@ -21,6 +21,7 @@ RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.6
|
|||
libpcap-devel \
|
||||
make \
|
||||
openssh \
|
||||
procps \
|
||||
python312 \
|
||||
python312-devel \
|
||||
python312-pip \
|
||||
|
|
|
@ -2,7 +2,7 @@ FROM opensuse/tumbleweed
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230801
|
||||
ENV DOCKERFILE_VERSION 20250714
|
||||
|
||||
# Remove the repo-openh264 repository, it caused intermittent issues
|
||||
# and we should not be needing any packages from it.
|
||||
|
|
14
ci/test.sh
14
ci/test.sh
|
@ -40,6 +40,11 @@ function banner {
|
|||
}
|
||||
|
||||
function run_unit_tests {
|
||||
if [[ ${ZEEK_CI_SKIP_UNIT_TESTS} -eq 1 ]]; then
|
||||
printf "Skipping unit tests as requested by task configureation\n\n"
|
||||
return 0
|
||||
fi
|
||||
|
||||
banner "Running unit tests"
|
||||
|
||||
pushd build
|
||||
|
@ -60,7 +65,7 @@ function run_btests {
|
|||
pushd testing/btest
|
||||
|
||||
ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \
|
||||
${BTEST} -z ${ZEEK_CI_BTEST_RETRIES} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} || result=1
|
||||
${BTEST} -z ${ZEEK_CI_BTEST_RETRIES} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} ${ZEEK_CI_BTEST_EXTRA_ARGS} || result=1
|
||||
make coverage
|
||||
prep_artifacts
|
||||
popd
|
||||
|
@ -68,11 +73,16 @@ function run_btests {
|
|||
}
|
||||
|
||||
function run_external_btests {
|
||||
if [[ ${ZEEK_CI_SKIP_EXTERNAL_BTESTS} -eq 1 ]]; then
|
||||
printf "Skipping external tests as requested by task configuration\n\n"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local zeek_testing_pid=""
|
||||
local zeek_testing_pid_private=""
|
||||
pushd testing/external/zeek-testing
|
||||
ZEEK_PROFILER_FILE=$(pwd)/.tmp/script-coverage/XXXXXX \
|
||||
${BTEST} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} >btest.out 2>&1 &
|
||||
${BTEST} -d -A -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} ${ZEEK_CI_BTEST_EXTRA_ARGS} >btest.out 2>&1 &
|
||||
zeek_testing_pid=$!
|
||||
popd
|
||||
|
||||
|
|
2
cmake
2
cmake
|
@ -1 +1 @@
|
|||
Subproject commit 690483f76c149ffa8e035b612b406b0964f9886f
|
||||
Subproject commit 621d098e6dcc52ad355cb2a196a7aa1a7b1a676f
|
2
doc
2
doc
|
@ -1 +1 @@
|
|||
Subproject commit f65820ff0faf2887799fe691a443b5db39eeed54
|
||||
Subproject commit 2c5816ea62920979ff7cf92f42455e7a6827dd2f
|
|
@ -26,6 +26,8 @@ RUN apt-get -q update \
|
|||
libssl3 \
|
||||
libuv1 \
|
||||
libz1 \
|
||||
net-tools \
|
||||
procps \
|
||||
python3-minimal \
|
||||
python3-git \
|
||||
python3-semantic-version \
|
||||
|
|
|
@ -167,24 +167,26 @@ export {
|
|||
PEER_INVALID = 3,
|
||||
## Remote peer not listening.
|
||||
PEER_UNAVAILABLE = 4,
|
||||
## Remote peer disconnected during the handshake.
|
||||
PEER_DISCONNECT_DURING_HANDSHAKE = 5,
|
||||
## A peering request timed out.
|
||||
PEER_TIMEOUT = 5,
|
||||
PEER_TIMEOUT = 6,
|
||||
## Master with given name already exists.
|
||||
MASTER_EXISTS = 6,
|
||||
MASTER_EXISTS = 7,
|
||||
## Master with given name does not exist.
|
||||
NO_SUCH_MASTER = 7,
|
||||
NO_SUCH_MASTER = 8,
|
||||
## The given data store key does not exist.
|
||||
NO_SUCH_KEY = 8,
|
||||
NO_SUCH_KEY = 9,
|
||||
## The store operation timed out.
|
||||
REQUEST_TIMEOUT = 9,
|
||||
REQUEST_TIMEOUT = 10,
|
||||
## The operation expected a different type than provided.
|
||||
TYPE_CLASH = 10,
|
||||
TYPE_CLASH = 11,
|
||||
## The data value cannot be used to carry out the desired operation.
|
||||
INVALID_DATA = 11,
|
||||
INVALID_DATA = 12,
|
||||
## The storage backend failed to execute the operation.
|
||||
BACKEND_FAILURE = 12,
|
||||
BACKEND_FAILURE = 13,
|
||||
## The storage backend failed to execute the operation.
|
||||
STALE_DATA = 13,
|
||||
STALE_DATA = 14,
|
||||
## Catch-all for a CAF-level problem.
|
||||
CAF_ERROR = 100
|
||||
};
|
||||
|
|
|
@ -47,12 +47,18 @@ export {
|
|||
|
||||
# Marked with &is_used to suppress complaints when there aren't any
|
||||
# Spicy file analyzers loaded, and hence this event can't be generated.
|
||||
# The attribute is only supported for Zeek 5.0 and higher.
|
||||
event spicy_analyzer_for_mime_type(a: Files::Tag, mt: string) &is_used
|
||||
{
|
||||
Files::register_for_mime_type(a, mt);
|
||||
}
|
||||
|
||||
# Marked with &is_used to suppress complaints when there aren't any
|
||||
# Spicy protocol analyzers loaded, and hence this event can't be generated.
|
||||
event spicy_analyzer_for_port(a: Analyzer::Tag, p: port) &is_used
|
||||
{
|
||||
Analyzer::register_for_port(a, p);
|
||||
}
|
||||
|
||||
function enable_protocol_analyzer(tag: Analyzer::Tag) : bool
|
||||
{
|
||||
return Spicy::__toggle_analyzer(tag, T);
|
||||
|
|
|
@ -1,3 +1 @@
|
|||
@load ./main
|
||||
|
||||
@load base/frameworks/cluster
|
||||
|
|
|
@ -5,10 +5,28 @@
|
|||
##! enabled by setting :zeek:see:`Telemetry::metrics_port`.
|
||||
|
||||
@load base/misc/version
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@load base/frameworks/telemetry/options
|
||||
|
||||
module Telemetry;
|
||||
|
||||
# In a cluster configuration, open the port number for metrics
|
||||
# from the cluster node configuration for exporting data to
|
||||
# Prometheus.
|
||||
#
|
||||
# The manager node will also provide a ``/services.json`` endpoint
|
||||
# for the HTTP Service Discovery system in Prometheus to use for
|
||||
# configuration. This endpoint will include information for all of
|
||||
# the other nodes in the cluster.
|
||||
@if ( Cluster::is_enabled() )
|
||||
redef Telemetry::metrics_endpoint_name = Cluster::node;
|
||||
|
||||
@if ( Cluster::local_node_metrics_port() != 0/unknown )
|
||||
redef Telemetry::metrics_port = Cluster::local_node_metrics_port();
|
||||
@endif
|
||||
@endif
|
||||
|
||||
export {
|
||||
## Alias for a vector of label values.
|
||||
type labels_vector: vector of string;
|
||||
|
|
|
@ -2901,8 +2901,37 @@ export {
|
|||
auth_flavors: vector of auth_flavor_t &optional; ##< Returned authentication flavors
|
||||
};
|
||||
|
||||
} # end export
|
||||
}
|
||||
|
||||
module Log;
|
||||
|
||||
export {
|
||||
## Maximum size of a message that can be sent to a remote logger or logged
|
||||
## locally. If this limit is met, report a ``log_line_too_large`` weird and drop
|
||||
## the log entry. This isn't necessarily the full size of a line that might be
|
||||
## written to a log, but a general representation of the size as the log record is
|
||||
## serialized for writing. The size of end result from serialization might be
|
||||
## higher than this limit, but it prevents runaway-sized log entries from causing
|
||||
## problems.
|
||||
const max_log_record_size = 1024*1024*64 &redef;
|
||||
}
|
||||
|
||||
module POP3;
|
||||
|
||||
export {
|
||||
## How many commands a POP3 client may have pending
|
||||
## before Zeek forcefully removes the oldest.
|
||||
##
|
||||
## Setting this value to 0 removes the limit.
|
||||
const max_pending_commands = 10 &redef;
|
||||
|
||||
## How many invalid commands a POP3 client may use
|
||||
## before Zeek starts raising analyzer violations.
|
||||
##
|
||||
## Setting this value to 0 removes the limit.
|
||||
const max_unknown_client_commands = 10 &redef;
|
||||
|
||||
} # end export
|
||||
|
||||
module Threading;
|
||||
|
||||
|
@ -5883,6 +5912,13 @@ export {
|
|||
|
||||
type MetricVector : vector of Metric;
|
||||
type HistogramMetricVector : vector of HistogramMetric;
|
||||
|
||||
## Maximum amount of time for CivetWeb HTTP threads to
|
||||
## wait for metric callbacks to complete on the IO loop.
|
||||
const callback_timeout: interval = 5sec &redef;
|
||||
|
||||
## Number of CivetWeb threads to use.
|
||||
const civetweb_threads: count = 2 &redef;
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
|
|
@ -19,6 +19,7 @@ event zeek_init() &priority=20
|
|||
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_VLAN, 0x8035, PacketAnalyzer::ANALYZER_ARP);
|
||||
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_VLAN, 0x8100, PacketAnalyzer::ANALYZER_VLAN);
|
||||
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_VLAN, 0x8864, PacketAnalyzer::ANALYZER_PPPOE);
|
||||
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_VLAN, 0x8926, PacketAnalyzer::ANALYZER_VNTAG);
|
||||
|
||||
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_VLAN, SNAP_FORWARDING_KEY,
|
||||
PacketAnalyzer::ANALYZER_SNAP);
|
||||
|
|
|
@ -338,8 +338,8 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
|
|||
if ( /^[bB][aA][sS][iI][cC] / in value )
|
||||
{
|
||||
local userpass = decode_base64_conn(c$id, sub(value, /[bB][aA][sS][iI][cC][[:blank:]]+/, ""));
|
||||
local up = split_string(userpass, /:/);
|
||||
if ( |up| >= 2 )
|
||||
local up = split_string1(userpass, /:/);
|
||||
if ( |up| == 2 )
|
||||
{
|
||||
c$http$username = up[0];
|
||||
if ( c$http$capture_password )
|
||||
|
|
|
@ -26,6 +26,8 @@ export {
|
|||
|
||||
const BIND_SIMPLE = "bind simple";
|
||||
const BIND_SASL = "bind SASL";
|
||||
const BIND_SICILY_NEGOTIATE = "sicily_negotiate";
|
||||
const BIND_SICILY_RESPONSE= "sicily_response";
|
||||
|
||||
const RESULT_CODES = { [ LDAP::ResultCode_SUCCESS ] = "success", [
|
||||
LDAP::ResultCode_OPERATIONS_ERROR ] = "operations error", [
|
||||
|
@ -120,4 +122,11 @@ export {
|
|||
"searching", [ LDAP::SearchDerefAlias_DEREF_FINDING_BASE ] =
|
||||
"finding", [ LDAP::SearchDerefAlias_DEREF_ALWAYS ] = "always", }
|
||||
&default="unknown";
|
||||
|
||||
const EXTENDED_REQUESTS = {
|
||||
# StartTLS, https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1
|
||||
[ "1.3.6.1.4.1.1466.20037" ] = "StartTLS",
|
||||
# whoami, https://datatracker.ietf.org/doc/html/rfc4532#section-2
|
||||
[ "1.3.6.1.4.1.4203.1.11.3" ] = "whoami",
|
||||
} &default="unknown" &redef;
|
||||
}
|
||||
|
|
|
@ -258,6 +258,9 @@ event LDAP::message(c: connection,
|
|||
}
|
||||
|
||||
m$object = object;
|
||||
|
||||
if ( opcode == LDAP::ProtocolOpcode_EXTENDED_REQUEST )
|
||||
m$object += fmt(" (%s)", EXTENDED_REQUESTS[object]);
|
||||
}
|
||||
|
||||
if ( argument != "" ) {
|
||||
|
@ -369,13 +372,23 @@ event LDAP::bind_request(c: connection,
|
|||
if ( m?$opcode )
|
||||
Reporter::conn_weird("LDAP_bind_opcode_already_set", c, m$opcode, "LDAP");
|
||||
|
||||
if (authType == LDAP::BindAuthType_BIND_AUTH_SIMPLE) {
|
||||
switch ( authType ) {
|
||||
case LDAP::BindAuthType_BIND_AUTH_SIMPLE:
|
||||
m$opcode = BIND_SIMPLE;
|
||||
} else if (authType == LDAP::BindAuthType_BIND_AUTH_SASL) {
|
||||
break;
|
||||
case LDAP::BindAuthType_BIND_AUTH_SASL:
|
||||
m$opcode = BIND_SASL;
|
||||
} else {
|
||||
break;
|
||||
case LDAP::BindAuthType_SICILY_NEGOTIATE:
|
||||
m$opcode = BIND_SICILY_NEGOTIATE;
|
||||
break;
|
||||
case LDAP::BindAuthType_SICILY_RESPONSE:
|
||||
m$opcode = BIND_SICILY_RESPONSE;
|
||||
break;
|
||||
default:
|
||||
Reporter::conn_weird("LDAP_unknown_auth_type", c, cat(authType), "LDAP");
|
||||
m$opcode = cat(authType);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -98,3 +98,44 @@ global LDAP::search_result_entry: event (
|
|||
message_id: int,
|
||||
object_name: string
|
||||
);
|
||||
|
||||
## Event generated for each ExtendedRequest in LDAP messages.
|
||||
##
|
||||
## c: The connection.
|
||||
##
|
||||
## message_id: The messageID element.
|
||||
##
|
||||
## request_name: The name of the extended request.
|
||||
##
|
||||
## request_value: The value of the extended request (empty if missing).
|
||||
global LDAP::extended_request: event (
|
||||
c: connection,
|
||||
message_id: int,
|
||||
request_name: string,
|
||||
request_value: string
|
||||
);
|
||||
|
||||
## Event generated for each ExtendedResponse in LDAP messages.
|
||||
##
|
||||
## c: The connection.
|
||||
##
|
||||
## message_id: The messageID element.
|
||||
##
|
||||
## result: The result code of the response.
|
||||
##
|
||||
## response_name: The name of the extended response (empty if missing).
|
||||
##
|
||||
## response_value: The value of the extended response (empty if missing).
|
||||
global LDAP::extended_response: event (
|
||||
c: connection,
|
||||
message_id: int,
|
||||
result: LDAP::ResultCode,
|
||||
response_name: string,
|
||||
response_value: string
|
||||
);
|
||||
|
||||
## Event generated when a plaintext LDAP connection switched to TLS.
|
||||
##
|
||||
## c: The connection.
|
||||
##
|
||||
global LDAP::starttls: event(c: connection);
|
||||
|
|
|
@ -80,6 +80,7 @@ export {
|
|||
trans_mail_from_seen: bool &default=F;
|
||||
trans_rcpt_to_seen: bool &default=F;
|
||||
invalid_transactions: count &default=0;
|
||||
bdat_last_observed: bool &default=F;
|
||||
analyzer_id: count &optional;
|
||||
};
|
||||
|
||||
|
@ -261,6 +262,7 @@ event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &
|
|||
# the last BDAT command.
|
||||
c$smtp_state$trans_mail_from_seen = F;
|
||||
c$smtp_state$trans_rcpt_to_seen = F;
|
||||
c$smtp_state$bdat_last_observed = T;
|
||||
}
|
||||
}
|
||||
else if ( upper_command == "." )
|
||||
|
@ -284,10 +286,11 @@ event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
|
|||
event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
|
||||
msg: string, cont_resp: bool) &priority=-5
|
||||
{
|
||||
if ( cmd == "." )
|
||||
if ( cmd == "." || (!cont_resp && cmd == "BDAT" && c$smtp_state$bdat_last_observed ) )
|
||||
{
|
||||
# Track the number of messages seen in this session.
|
||||
++c$smtp_state$messages_transferred;
|
||||
c$smtp_state$bdat_last_observed = F;
|
||||
smtp_message(c);
|
||||
c$smtp = new_smtp_log(c);
|
||||
}
|
||||
|
|
|
@ -1,42 +1,49 @@
|
|||
#
|
||||
# Do not edit this file. This file is automatically generated by gen-ct-list.pl
|
||||
# File generated at Fri Feb 23 11:37:01 2024
|
||||
# File generated at Tue Mar 18 13:51:59 2025
|
||||
# File generated from https://www.gstatic.com/ct/log_list/v3/log_list.json
|
||||
# Source file generated at: 2024-02-22T12:56:21Z
|
||||
# Source file version: 32.9
|
||||
# Source file generated at: 2025-03-18T12:55:24Z
|
||||
# Source file version: 51.46
|
||||
#
|
||||
|
||||
@load base/protocols/ssl
|
||||
module SSL;
|
||||
redef ct_logs += {
|
||||
["\xee\xcd\xd0\x64\xd5\xdb\x1a\xce\xc5\x5c\xb7\x9d\xb4\xcd\x13\xa2\x32\x87\x46\x7c\xbc\xec\xde\xc3\x51\x48\x59\x46\x71\x1f\xb5\x9b"] = CTInfo($description="Google 'Argon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1d\xb9\x6c\xa9\xcb\x69\x94\xc5\x5c\xe6\xb6\xa6\x03\xbb\xd2\xb8\xdc\x54\x43\x17\x28\x99\x0c\x06\x01\x50\x1d\x9d\x64\xc0\x59\x46\x2b\xdc\xc8\x03\x1d\x05\xb4\x2d\xa8\x09\xf7\x99\x41\xed\x04\xfb\xe5\x57\xba\x26\x04\xf6\x11\x52\xce\x14\x65\x3b\x2f\x76\x2b\xc0"),
|
||||
["\x4e\x75\xa3\x27\x5c\x9a\x10\xc3\x38\x5b\x6c\xd4\xdf\x3f\x52\xeb\x1d\xf0\xe0\x8e\x1b\x8d\x69\xc0\xb1\xfa\x64\xb1\x62\x9a\x39\xdf"] = CTInfo($description="Google 'Argon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x20\x82\xa1\xf9\x67\x68\xa8\xe4\xdb\x94\x98\xe2\xe1\x68\x87\xe4\x09\x6d\x20\x35\x33\x38\x3c\xaf\x14\xaa\xd7\x08\x18\xf0\xfd\x16\x9b\xd3\xff\x7c\x27\x82\xd4\x87\xb7\x4e\x24\x46\x3b\xfb\xae\xbe\xc8\x23\x52\x20\x2b\xaa\x44\x05\xfe\x54\xf9\xd5\xf1\x1d\x45\x9a"),
|
||||
["\x12\xf1\x4e\x34\xbd\x53\x72\x4c\x84\x06\x19\xc3\x8f\x3f\x7a\x13\xf8\xe7\xb5\x62\x87\x88\x9c\x6d\x30\x05\x84\xeb\xe5\x86\x26\x3a"] = CTInfo($description="Google 'Argon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaf\xe4\xf3\x94\x2c\xdf\xa6\x27\xb5\xfe\xb2\x61\x83\x19\xc8\x21\x3a\x23\xa8\xa9\x3d\x54\xaf\xbc\x31\x9a\x1c\xd3\xc1\xe3\xb6\xc2\xf3\x0f\xc7\xb9\xca\x3b\x1d\x79\x65\x61\x22\x25\x82\x56\x4e\x98\xe8\xaa\x26\x29\x36\x1e\x28\x60\x6f\xeb\x15\x6e\xf7\x7c\xd0\xba"),
|
||||
["\x76\xff\x88\x3f\x0a\xb6\xfb\x95\x51\xc2\x61\xcc\xf5\x87\xba\x34\xb4\xa4\xcd\xbb\x29\xdc\x68\x42\x0a\x9f\xe6\x67\x4c\x5a\x3a\x74"] = CTInfo($description="Google 'Xenon2024' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xb9\x60\xe0\x34\x1e\x35\xe4\x65\x00\x93\x4f\x90\x09\xbd\x5a\xec\x44\xdd\x8c\x0f\xce\xed\x11\x3e\x2a\x59\x46\x9a\x31\xb6\xc7\x99\xf7\xdc\xef\x3d\xcd\x8f\x86\xc2\x35\xa5\x3e\xdc\x29\xba\xbb\xf2\x54\xe2\xa8\x0c\x83\x08\x51\x06\xde\x21\x6d\x36\x50\x8e\x38\x4d"),
|
||||
["\x0e\x57\x94\xbc\xf3\xae\xa9\x3e\x33\x1b\x2c\x99\x07\xb3\xf7\x90\xdf\x9b\xc2\x3d\x71\x32\x25\xdd\x21\xa9\x25\xac\x61\xc5\x4e\x21"] = CTInfo($description="Google 'Argon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x07\xfc\x1e\xe8\x63\x8e\xff\x1c\x31\x8a\xfc\xb8\x1e\x19\x2b\x60\x50\x00\x3e\x8e\x9e\xda\x77\x37\xe3\xa5\xa8\xda\x8d\x94\xf8\x6b\xe8\x3d\x64\x8f\x27\x3f\x75\xb3\xfc\x6b\x12\xf0\x37\x06\x4f\x64\x58\x75\x14\x5d\x56\x52\xe6\x6a\x2b\x14\x4c\xec\x81\xd1\xea\x3e"),
|
||||
["\xd7\x6d\x7d\x10\xd1\xa7\xf5\x77\xc2\xc7\xe9\x5f\xd7\x00\xbf\xf9\x82\xc9\x33\x5a\x65\xe1\xd0\xb3\x01\x73\x17\xc0\xc8\xc5\x69\x77"] = CTInfo($description="Google 'Argon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/us1/argon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2a\x3a\x67\x8b\xfe\xba\x0c\x86\x2b\x4a\x51\x8a\xe9\x17\xfe\x7b\xa1\x76\x73\xfd\xbc\x65\x4b\xc3\x27\xbf\x4d\xf3\x5f\xa0\xca\x29\x80\x11\x20\x32\x78\xd6\x7e\xf9\x34\x60\x8c\x75\xa0\xf5\x35\x50\x9c\xa1\xd3\x49\x4d\x13\xd5\x3b\x6a\x0e\xea\x45\x9d\x24\x13\x22"),
|
||||
["\xcf\x11\x56\xee\xd5\x2e\x7c\xaf\xf3\x87\x5b\xd9\x69\x2e\x9b\xe9\x1a\x71\x67\x4a\xb0\x17\xec\xac\x01\xd2\x5b\x77\xce\xcc\x3b\x08"] = CTInfo($description="Google 'Xenon2025h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x82\xe2\xce\x90\x40\x3f\x81\x0e\xdf\xea\xe1\x20\x2b\x5e\x2e\x30\x54\x46\x81\xb9\x58\xed\xaf\xbd\xff\x36\xa7\x9e\x0b\x5f\x6a\x6b\x91\xa5\xc1\x98\xe1\xf2\xcd\xeb\x17\x20\x70\xca\x2a\x12\xe6\x54\x78\x50\xdc\xff\x6d\xfd\x1c\xa7\xb6\x3a\x1f\xf9\x26\xa9\x1b\xbd"),
|
||||
["\xdd\xdc\xca\x34\x95\xd7\xe1\x16\x05\xe7\x95\x32\xfa\xc7\x9f\xf8\x3d\x1c\x50\xdf\xdb\x00\x3a\x14\x12\x76\x0a\x2c\xac\xbb\xc8\x2a"] = CTInfo($description="Google 'Xenon2025h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x6b\xe0\xaf\xed\x06\x7c\x3d\xef\xd9\x0e\xe4\x58\x4b\x04\xd8\x2a\x47\x99\x90\x89\x7a\xb9\x36\xa5\x75\xc8\x04\xb8\xcb\xe2\xaa\x2b\xb5\x68\x9d\x88\x29\xa2\xa5\xcf\xce\x2b\x9a\x15\x9b\xa0\x3e\x9d\x94\x1c\xb2\xb7\x4a\xf2\x51\xec\x40\xed\x62\x47\xa4\x03\x49\x86"),
|
||||
["\xda\xb6\xbf\x6b\x3f\xb5\xb6\x22\x9f\x9b\xc2\xbb\x5c\x6b\xe8\x70\x91\x71\x6c\xbb\x51\x84\x85\x34\xbd\xa4\x3d\x30\x48\xd7\xfb\xab"] = CTInfo($description="Cloudflare 'Nimbus2024' Log", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x77\xb1\x9b\x7b\x8f\xe6\x8b\x35\xfe\x3a\x92\x29\x2d\xac\x8a\x8d\x51\x8a\x25\xfc\x93\xb6\xd7\xa0\x8b\x29\x37\x71\x1d\x33\xca\xcc\x33\xea\x28\xb9\x1f\xe2\xac\xc3\xa9\x5d\xdd\x97\xbe\xf6\x9e\x94\x25\xdd\x36\x81\xd1\xeb\x5d\x29\xc3\x2b\x44\xf1\x5b\xca\x15\x48"),
|
||||
["\x96\x97\x64\xbf\x55\x58\x97\xad\xf7\x43\x87\x68\x37\x08\x42\x77\xe9\xf0\x3a\xd5\xf6\xa4\xf3\x36\x6e\x46\xa4\x3f\x0f\xca\xa9\xc6"] = CTInfo($description="Google 'Xenon2026h1' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x3a\x1f\xc8\xbb\xce\xd5\x90\x47\x34\xca\xca\x01\x04\x27\x21\x1c\xe2\x29\x3d\x92\xbb\x91\x45\xc7\x5a\x3e\xa5\xd4\xf2\x12\xe6\xe8\xe6\x43\xba\xf3\x7b\xc2\x38\xaf\xfc\x23\x8a\x05\x56\xeb\x03\x0a\x30\xcc\x63\x6c\xd9\x3c\xbe\xf5\x7b\x94\xba\x94\xd3\xbf\x88\x4c"),
|
||||
["\xd8\x09\x55\x3b\x94\x4f\x7a\xff\xc8\x16\x19\x6f\x94\x4f\x85\xab\xb0\xf8\xfc\x5e\x87\x55\x26\x0f\x15\xd1\x2e\x72\xbb\x45\x4b\x14"] = CTInfo($description="Google 'Xenon2026h2' log", $operator="Google", $url="https://ct.googleapis.com/logs/eu1/xenon2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe5\x77\x78\x95\x71\x28\xb3\x95\xc9\xa5\xcc\x7a\x4c\xe8\x32\x03\x96\x7b\xfc\x2e\x1d\xb9\xa4\xdb\x43\xa0\xbd\x69\x72\xf9\x45\xba\x9a\xc3\xe9\x96\xd5\x70\xe7\x0d\x7e\xc9\x95\x15\x27\x8a\x72\x30\x65\x86\x43\x53\xdc\x11\x44\x18\x49\x98\x25\x68\xa7\x3c\x05\xbf"),
|
||||
["\xcc\xfb\x0f\x6a\x85\x71\x09\x65\xfe\x95\x9b\x53\xce\xe9\xb2\x7c\x22\xe9\x85\x5c\x0d\x97\x8d\xb6\xa9\x7e\x54\xc0\xfe\x4c\x0d\xb0"] = CTInfo($description="Cloudflare 'Nimbus2025'", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2025/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x1a\x80\x1a\x15\x19\x19\x23\x79\xb4\xfa\xa0\x79\x8e\x8d\xd5\xc1\xdc\xc2\xb5\x96\x92\x7e\x94\xe0\xc3\x7e\x14\x7c\x0a\x0d\x2d\x46\xa8\x9d\x1b\xb1\x41\x65\x0c\x5f\x98\xc4\x5a\x17\x79\x81\x5b\x4a\x14\x41\xec\xaf\xa9\x5d\x0e\xab\x12\x19\x71\xcd\x43\xef\xbb\x97"),
|
||||
["\x48\xb0\xe3\x6b\xda\xa6\x47\x34\x0f\xe5\x6a\x02\xfa\x9d\x30\xeb\x1c\x52\x01\xcb\x56\xdd\x2c\x81\xd9\xbb\xbf\xab\x39\xd8\x84\x73"] = CTInfo($description="DigiCert Yeti2024 Log", $operator="DigiCert", $url="https://yeti2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x57\xb8\xc1\x6f\x30\xa4\x7f\x2e\xe4\xf0\xd0\xd9\x60\x62\x13\x95\xe3\x7a\xe3\x4e\x53\xc3\xb3\xb8\x73\x85\xc1\x18\x0d\x23\x0e\x58\x84\xd2\x78\xef\x9b\xb3\x1e\x2c\x1a\xde\xc1\x8f\x81\x1b\x19\x44\x58\xb7\x00\x77\x60\x20\x1a\x72\xd8\x82\xde\xae\x9e\xb1\xc6\x4b"),
|
||||
["\xcb\x38\xf7\x15\x89\x7c\x84\xa1\x44\x5f\x5b\xc1\xdd\xfb\xc9\x6e\xf2\x9a\x59\xcd\x47\x0a\x69\x05\x85\xb0\xcb\x14\xc3\x14\x58\xe7"] = CTInfo($description="Cloudflare 'Nimbus2026'", $operator="Cloudflare", $url="https://ct.cloudflare.com/logs/nimbus2026/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xd8\x5c\x61\x4f\xac\x6a\xd2\x20\x80\x4e\x8a\x42\xf6\x04\xad\x4b\xd4\xb1\x1c\x79\x8e\x29\x32\xde\x69\x53\x59\xeb\xad\x78\xf3\xc0\x2a\xf2\xd0\x11\x5d\x05\x7e\xeb\xe8\xc1\xd3\xdf\x37\xbf\x91\x64\x46\x6e\x0e\x27\x13\xea\xbb\x6f\x46\x27\x58\x86\xef\x40\x21\xa3"),
|
||||
["\x7d\x59\x1e\x12\xe1\x78\x2a\x7b\x1c\x61\x67\x7c\x5e\xfd\xf8\xd0\x87\x5c\x14\xa0\x4e\x95\x9e\xb9\x03\x2f\xd9\x0e\x8c\x2e\x79\xb8"] = CTInfo($description="DigiCert Yeti2025 Log", $operator="DigiCert", $url="https://yeti2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xdf\x95\x00\x5e\x10\xc1\x01\xf7\x37\xe3\x10\x74\xd1\xff\xb2\xca\x90\xed\x32\x99\x5f\x0c\x39\xfe\xa1\xd1\x13\x11\xac\xd1\xb3\x73\x93\x20\xc2\x13\x3c\x4c\xb5\x7a\x52\x86\x86\x3d\xe3\x95\x24\x7c\xd8\x91\x98\x48\x3b\xf0\xf0\xdf\x21\xf1\xb0\x81\x5a\x59\x25\x43"),
|
||||
["\x73\xd9\x9e\x89\x1b\x4c\x96\x78\xa0\x20\x7d\x47\x9d\xe6\xb2\xc6\x1c\xd0\x51\x5e\x71\x19\x2a\x8c\x6b\x80\x10\x7a\xc1\x77\x72\xb5"] = CTInfo($description="DigiCert Nessie2024 Log", $operator="DigiCert", $url="https://nessie2024.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2d\xfc\xa2\x7b\x36\xbf\x56\x91\xe9\xfe\x3f\xe8\x3d\xfc\xc3\xa7\xe0\x61\x52\xea\x2c\xe9\x05\xa3\x9f\x27\x17\x81\x05\x70\x6b\x81\x61\x44\x8a\xf8\x3b\x10\x80\x42\xed\x03\x2f\x00\x50\x21\xfc\x41\x54\x84\xa3\x54\xd5\x2e\xb2\x7a\x16\x4b\x2a\x1f\x2b\x66\x04\x2b"),
|
||||
["\xe6\xd2\x31\x63\x40\x77\x8c\xc1\x10\x41\x06\xd7\x71\xb9\xce\xc1\xd2\x40\xf6\x96\x84\x86\xfb\xba\x87\x32\x1d\xfd\x1e\x37\x8e\x50"] = CTInfo($description="DigiCert Nessie2025 Log", $operator="DigiCert", $url="https://nessie2025.ct.digicert.com/log/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\xf0\xf0\xa7\x8b\x81\x2e\x09\x39\x3b\x9f\x42\xda\x38\x44\x5f\xb4\xcc\xed\x36\xbb\xd8\x43\x7f\x16\x49\x57\x87\x04\x7f\xa5\x01\x34\xf7\xe8\x68\x3f\xb7\x78\x1f\x60\x66\x2d\x67\x9a\x75\x80\xb7\x53\xa7\x85\xd5\xbc\xab\x47\x06\x55\xdb\xb5\xdf\x88\xa1\x6f\x38"),
|
||||
["\x73\x20\x22\x0f\x08\x16\x8a\xf9\xf3\xc4\xa6\x8b\x0a\xb2\x6a\x9a\x4a\x00\xee\xf5\x77\x85\x8a\x08\x4d\x05\x00\xd4\xa5\x42\x44\x59"] = CTInfo($description="DigiCert 'Wyvern2025h1' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\xcb\x80\x61\x86\x1b\x1f\xb5\xab\x2b\x20\x76\x59\x83\x66\x0e\xce\xae\xb8\x6f\x3b\x88\x02\xeb\x43\xf4\x87\x90\xcb\x8b\xda\xac\x0e\x19\x50\xe0\xf9\x24\x0e\xab\x26\x93\x8c\x3f\x9e\x0d\x96\x58\x44\x9d\x3b\x8a\x80\xc5\xc8\xbe\xe1\x89\x46\x6b\x48\x4c\xd6\x09"),
|
||||
["\xed\x3c\x4b\xd6\xe8\x06\xc2\xa4\xa2\x00\x57\xdb\xcb\x24\xe2\x38\x01\xdf\x51\x2f\xed\xc4\x86\xc5\x70\x0f\x20\xdd\xb7\x3e\x3f\xe0"] = CTInfo($description="DigiCert 'Wyvern2025h2' Log", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe0\xdb\x41\xef\xe4\x04\xbd\xcb\x6b\x2e\x4c\xcc\xf1\x6c\xde\x41\x58\x7f\xfe\x94\xf6\x7a\xf6\x60\xed\x8b\x76\x72\xa3\xa2\x1c\x31\x13\x32\x35\xa1\xf2\x08\xd2\x68\xc5\x34\xa7\x56\x08\x1c\x63\xde\x95\xe2\x81\x69\x97\x8d\x1e\xa8\xb7\x66\x51\x25\x75\x4d\x78\x2e"),
|
||||
["\x64\x11\xc4\x6c\xa4\x12\xec\xa7\x89\x1c\xa2\x02\x2e\x00\xbc\xab\x4f\x28\x07\xd4\x1e\x35\x27\xab\xea\xfe\xd5\x03\xc9\x7d\xcd\xf0"] = CTInfo($description="DigiCert 'Wyvern2026h1'", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xec\xbc\x34\x39\xe2\x9a\x8d\xb7\x99\x7a\x91\xf1\x05\x72\x52\xda\x93\x89\x5d\x3a\x07\x8b\x99\xed\x80\xa5\x16\xda\x73\x21\x20\xeb\x86\x96\x87\xc5\xc6\xd9\x17\xba\x6e\xb9\x4c\x13\x58\xd5\xd1\x83\xf8\x7a\xdf\x1e\x07\xbc\x15\xcd\xc0\x4a\xcd\x2a\x31\x71\x07\x55"),
|
||||
["\xc2\x31\x7e\x57\x45\x19\xa3\x45\xee\x7f\x38\xde\xb2\x90\x41\xeb\xc7\xc2\x21\x5a\x22\xbf\x7f\xd5\xb5\xad\x76\x9a\xd9\x0e\x52\xcd"] = CTInfo($description="DigiCert 'Wyvern2026h2'", $operator="DigiCert", $url="https://wyvern.ct.digicert.com/2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7a\x73\xdb\x4a\xf2\xde\x4f\xec\xe1\x14\x1b\xbe\xa6\xa9\x3c\x21\xb8\x45\x12\xcd\x7a\x88\x26\x91\x20\x56\xf5\x49\x32\xc3\x75\x6c\xcb\xe9\x7c\x13\x75\x35\x9c\x6c\xec\xf1\x31\x3c\xc1\xde\x9b\x8c\x13\x92\xb7\xad\x3d\x0f\xa1\x9c\x8f\x48\xce\x74\x27\x18\x23\x99"),
|
||||
["\xde\x85\x81\xd7\x50\x24\x7c\x6b\xcd\xcb\xaf\x56\x37\xc5\xe7\x81\xc6\x4c\xe4\x6e\xd6\x17\x63\x9f\x8f\x34\xa7\x26\xc9\xe2\xbd\x37"] = CTInfo($description="DigiCert 'Sphinx2025h1' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xe3\x2f\x1f\x4d\x89\x05\x75\x29\x78\xbb\x22\x3d\x07\x62\x51\x14\x70\x94\xe7\x3c\xea\xf5\xee\xae\xa6\x48\x9a\x86\x52\x4e\x9e\x5c\xe3\x95\x97\x28\xbb\x52\x4b\x2a\xfd\xc8\xc9\x89\x4e\x45\x31\x17\xd3\x8d\xf2\xe7\xce\x18\x11\x58\x98\x2c\x60\x6f\x58\x20\x36\x6e"),
|
||||
["\xa4\x42\xc5\x06\x49\x60\x61\x54\x8f\x0f\xd4\xea\x9c\xfb\x7a\x2d\x26\x45\x4d\x87\xa9\x7f\x2f\xdf\x45\x59\xf6\x27\x4f\x3a\x84\x54"] = CTInfo($description="DigiCert 'Sphinx2025h2' Log", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x41\x8c\x50\x13\x54\xb1\x19\x05\xb7\x7f\x4a\x20\x6e\xa3\x75\x63\xca\x34\xf4\xcc\x74\xea\x32\x3b\xb6\x8b\x03\x14\xa8\x52\x7f\x32\x87\x5e\x59\x9e\x0f\xab\x18\x9e\x29\x6c\xb5\x72\x77\x1a\x27\x54\x85\x5d\xc1\x7b\x24\xa8\x34\xe3\xcd\x88\xce\xd4\x50\x1b\xbe\x69"),
|
||||
["\x49\x9c\x9b\x69\xde\x1d\x7c\xec\xfc\x36\xde\xcd\x87\x64\xa6\xb8\x5b\xaf\x0a\x87\x80\x19\xd1\x55\x52\xfb\xe9\xeb\x29\xdd\xf8\xc3"] = CTInfo($description="DigiCert 'Sphinx2026h1'", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xab\x84\xbe\xf8\x3c\x87\xa2\x42\x25\x9a\x66\x9c\xae\x2b\x52\xe7\x5a\xf9\x21\x1b\x19\x03\xa5\x07\xe2\x46\x0b\x1f\x8a\x5e\x7c\x6c\xae\xff\x19\x77\x86\xe8\x7b\xfc\xee\x6b\x36\x4f\xf2\xbc\xc3\x9e\x05\x02\x9a\x08\x01\xb5\x49\x23\x35\xc4\xd3\x50\x2b\x51\xe9\xf4"),
|
||||
["\x94\x4e\x43\x87\xfa\xec\xc1\xef\x81\xf3\x19\x24\x26\xa8\x18\x65\x01\xc7\xd3\x5f\x38\x02\x01\x3f\x72\x67\x7d\x55\x37\x2e\x19\xd8"] = CTInfo($description="DigiCert 'Sphinx2026h2'", $operator="DigiCert", $url="https://sphinx.ct.digicert.com/2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaa\xe0\xf4\x26\x44\x50\x4f\xfd\xa2\x9e\xe6\x80\xe0\x70\xb5\xb1\xce\x94\xa5\xf8\x97\x81\x44\x55\x42\x64\x1c\x22\x79\xa7\x64\x59\xd3\x89\x93\x21\x66\xfb\x09\x81\x60\x1f\x62\x55\x34\x38\x8c\xa4\x38\x2e\xac\x95\x0c\xeb\xed\x4f\x64\xbc\x45\x42\xf7\x06\x7a\xcd"),
|
||||
["\x55\x81\xd4\xc2\x16\x90\x36\x01\x4a\xea\x0b\x9b\x57\x3c\x53\xf0\xc0\xe4\x38\x78\x70\x25\x08\x17\x2f\xa3\xaa\x1d\x07\x13\xd3\x0c"] = CTInfo($description="Sectigo 'Sabre' CT log", $operator="Sectigo", $url="https://sabre.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xf2\x6f\xd2\x89\x0f\x3f\xc5\xf8\x87\x1e\xab\x65\xb3\xd9\xbb\x17\x23\x8c\x06\x0e\x09\x55\x96\x3d\x0a\x08\xa2\xc5\x71\xb3\xd1\xa9\x2f\x28\x3e\x83\x10\xbf\x12\xd0\x44\x66\x15\xef\x54\xe1\x98\x80\xd0\xce\x24\x6d\x3e\x67\x9a\xe9\x37\x23\xce\x52\x93\x86\xda\x80"),
|
||||
["\xa2\xe2\xbf\xd6\x1e\xde\x2f\x2f\x07\xa0\xd6\x4e\x6d\x37\xa7\xdc\x65\x43\xb0\xc6\xb5\x2e\xa2\xda\xb7\x8a\xf8\x9a\x6d\xf5\x17\xd8"] = CTInfo($description="Sectigo 'Sabre2024h1'", $operator="Sectigo", $url="https://sabre2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x2c\x01\xf6\xce\x31\xbc\xaa\x14\x61\x51\xfe\x6b\x7a\x87\xae\xa6\xd3\x9b\xc7\x87\x2d\x0a\x5a\xc8\x4f\xb5\x54\xdc\xc9\x93\xa0\x00\xee\xca\x1c\xb9\xa7\xb6\x7b\x47\x3b\xe5\x4f\xaa\x6c\x16\x1c\x70\x2e\xc8\xec\x53\x5a\x4c\x21\x4c\x7e\x27\x0b\x13\x14\x5e\xfc\x85"),
|
||||
["\x19\x98\x10\x71\x09\xf0\xd6\x52\x2e\x30\x80\xd2\x9e\x3f\x64\xbb\x83\x6e\x28\xcc\xf9\x0f\x52\x8e\xee\xdf\xce\x4a\x3f\x16\xb4\xca"] = CTInfo($description="Sectigo 'Sabre2024h2'", $operator="Sectigo", $url="https://sabre2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7a\x10\x4c\x8a\xe7\x22\x7b\x6d\x2a\xba\x8e\xfa\x6b\x4a\x81\xd5\x85\xae\x03\xef\xff\x4b\xfc\x4d\x53\x3d\xb7\x8c\xbb\x75\x09\xc9\xea\x16\x7e\xc1\x77\x16\xd2\xc2\x45\x74\x6d\x8d\xc4\xe1\x88\x37\xdf\xd4\xf3\x60\x65\xfc\xa0\x75\xf0\x20\x66\x8e\x4a\xcc\x19\xda"),
|
||||
["\xe0\x92\xb3\xfc\x0c\x1d\xc8\xe7\x68\x36\x1f\xde\x61\xb9\x96\x4d\x0a\x52\x78\x19\x8a\x72\xd6\x72\xc4\xb0\x4d\xa5\x6d\x6f\x54\x04"] = CTInfo($description="Sectigo 'Sabre2025h1'", $operator="Sectigo", $url="https://sabre2025h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x7e\x2f\x39\xf1\xe8\x23\x8e\xb3\x32\x04\xaf\x4d\x57\xf6\xdb\xc5\x74\xa4\x7a\x6d\x3b\x07\x51\x0c\x5a\xfb\x80\x30\x05\xc6\x5a\x0c\xc4\x76\xd6\x06\xa8\x57\x4d\xfb\xdf\xe4\x82\x90\xc2\x41\xae\x70\xb3\x31\xa2\xe3\xfa\x3d\x5f\x2c\x5d\x04\xcd\xb4\x9d\x55\xab\x41"),
|
||||
["\x1a\x04\xff\x49\xd0\x54\x1d\x40\xaf\xf6\xa0\xc3\xbf\xf1\xd8\xc4\x67\x2f\x4e\xec\xee\x23\x40\x68\x98\x6b\x17\x40\x2e\xdc\x89\x7d"] = CTInfo($description="Sectigo 'Sabre2025h2'", $operator="Sectigo", $url="https://sabre2025h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x13\x11\x2d\x7b\xf3\x93\x81\xe4\xb9\x7c\xd9\x64\x3b\xe7\xb5\x83\x99\x66\x79\x59\x47\x6a\x42\x5e\xd6\xbd\x63\x2e\xb7\x91\x4b\xae\xbc\x56\xc4\xc5\x6e\x09\xa0\xd7\x64\x1a\xc8\xc1\xaf\x89\x8b\xf5\x58\xd8\xba\xeb\x7b\x83\x52\xe9\xf4\xe0\xa5\xcd\xcd\x92\xcc"),
|
||||
["\x6f\x53\x76\xac\x31\xf0\x31\x19\xd8\x99\x00\xa4\x51\x15\xff\x77\x15\x1c\x11\xd9\x02\xc1\x00\x29\x06\x8d\xb2\x08\x9a\x37\xd9\x13"] = CTInfo($description="Sectigo 'Mammoth' CT log", $operator="Sectigo", $url="https://mammoth.ct.comodo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xef\xe4\x7d\x74\x2e\x15\x15\xb6\xe9\xbb\x23\x8b\xfb\x2c\xb5\xe1\xc7\x80\x98\x47\xfb\x40\x69\x68\xfc\x49\xad\x61\x4e\x83\x47\x3c\x1a\xb7\x8d\xdf\xff\x7b\x30\xb4\xba\xff\x2f\xcb\xa0\x14\xe3\xad\xd5\x85\x3f\x44\x59\x8c\x8c\x60\x8b\xd7\xb8\xb1\xbf\xae\x8c\x67"),
|
||||
["\x29\xd0\x3a\x1b\xb6\x74\xaa\x71\x1c\xd3\x03\x5b\x65\x57\xc1\x4f\x8a\xa7\x8b\x4f\xe8\x38\x94\x49\xec\xa4\x53\xf9\x44\xbd\x24\x68"] = CTInfo($description="Sectigo 'Mammoth2024h1'", $operator="Sectigo", $url="https://mammoth2024h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa4\x59\x90\xf3\x71\x24\x24\xf7\xc3\x55\x27\x56\x9c\xa3\x59\x1e\xf7\xb7\x9f\xce\xab\x4e\x19\x66\x4d\xd0\x8a\xfa\x9d\x62\xa4\x24\xf0\x3b\x20\xe4\x1d\x14\x67\xc8\xfc\xe4\x37\xf2\x4b\x38\x54\x5a\xcf\x9f\x6b\x07\x90\xd0\x0e\x7e\x3d\x4c\x87\xb2\xe8\x3f\x07\xcc"),
|
||||
["\x50\x85\x01\x58\xdc\xb6\x05\x95\xc0\x0e\x92\xa8\x11\x02\xec\xcd\xfe\x3f\x6b\x78\x58\x42\x9f\x57\x98\x35\x38\xc9\xda\x52\x50\x63"] = CTInfo($description="Sectigo 'Mammoth2024h1b'", $operator="Sectigo", $url="https://mammoth2024h1b.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa3\xd5\x07\x28\x7a\x04\x34\xae\xca\xbe\x80\x79\x4f\x3e\xf6\x41\xf4\x24\x04\xe1\xd6\x36\x5a\x1a\x09\xf2\xd1\xba\x84\x17\xae\x1e\xa1\x7c\x00\x1d\x54\x73\x90\x75\x21\xa8\xd1\xda\x5e\x10\xe1\x8c\xec\xb2\x8a\x8c\xc8\xe7\xdd\xcd\xe2\x07\xf0\x4e\x16\x02\x57\x37"),
|
||||
["\xdf\xe1\x56\xeb\xaa\x05\xaf\xb5\x9c\x0f\x86\x71\x8d\xa8\xc0\x32\x4e\xae\x56\xd9\x6e\xa7\xf5\xa5\x6a\x01\xd1\xc1\x3b\xbe\x52\x5c"] = CTInfo($description="Sectigo 'Mammoth2024h2'", $operator="Sectigo", $url="https://mammoth2024h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x85\x66\x22\x24\x6e\xbe\x52\x62\x0a\xa0\xaf\xc3\x25\x1a\x36\x2e\xa7\x60\x89\xa2\x65\xbf\xa4\x5f\xbd\x85\x6a\x94\x05\x81\x35\x90\x54\x31\x95\xe7\x11\x9e\xa3\x2e\x0f\x85\xef\xa7\x88\x57\x8b\x63\x1a\x81\xc1\x41\x9d\x7d\xec\x01\x3a\xdb\xb9\xc1\x27\xf4\x65\x1e"),
|
||||
["\x13\x4a\xdf\x1a\xb5\x98\x42\x09\x78\x0c\x6f\xef\x4c\x7a\x91\xa4\x16\xb7\x23\x49\xce\x58\x57\x6a\xdf\xae\xda\xa7\xc2\xab\xe0\x22"] = CTInfo($description="Sectigo 'Mammoth2025h1'", $operator="Sectigo", $url="https://mammoth2025h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x13\x3c\x41\xb5\x30\x7d\x2e\x4a\xa1\xa8\x6b\xd2\xc5\x57\x6b\x98\xfe\x7e\xef\xd5\x21\xe2\xba\x5d\xb0\xba\x85\x11\x6e\x94\xe0\x3d\xa8\x8e\x6d\x56\x8d\x44\x02\x9e\xb0\x83\xcc\x54\xdf\x9b\x4e\x72\x62\x4b\x3c\x0c\x32\xdd\x86\xfb\xeb\x3e\x66\xcd\x77\x58\x5b\xe5"),
|
||||
["\xaf\x18\x1a\x28\xd6\x8c\xa3\xe0\xa9\x8a\x4c\x9c\x67\xab\x09\xf8\xbb\xbc\x22\xba\xae\xbc\xb1\x38\xa3\xa1\x9d\xd3\xf9\xb6\x03\x0d"] = CTInfo($description="Sectigo 'Mammoth2025h2'", $operator="Sectigo", $url="https://mammoth2025h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x88\xe2\xc7\xb3\xd7\x37\xa3\x91\xd7\xb3\xc5\xda\x07\x51\x04\x2b\x81\xed\xc2\x44\x3b\x75\xa0\xe6\x65\xe1\x4a\xba\x1b\xb1\x9c\xa9\x2a\x84\x31\x29\xae\x1d\x8b\xf1\x33\x9f\x12\x2e\x90\xb1\x15\x67\x66\xa0\x7c\x0b\x5b\x62\x7f\x6c\x9a\x6a\x30\x9b\x68\x02\x16\x6f"),
|
||||
["\x3b\x53\x77\x75\x3e\x2d\xb9\x80\x4e\x8b\x30\x5b\x06\xfe\x40\x3b\x67\xd8\x4f\xc3\xf4\xc7\xbd\x00\x0d\x2d\x72\x6f\xe1\xfa\xd4\x17"] = CTInfo($description="Let's Encrypt 'Oak2024H1' log", $operator="Let's Encrypt", $url="https://oak.ct.letsencrypt.org/2024h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x56\x43\xd7\x7e\x7b\xd4\x72\xb7\xba\xa9\x51\xbd\x36\x93\xb7\xe9\xb5\x92\x0f\xea\x5e\xb7\x45\xa3\x92\xfd\xc9\xa5\x3c\x80\xac\x1a\x20\xef\x25\x2f\xb8\xe1\x20\xf7\xa8\x3a\x2e\x07\x8d\xe6\xeb\xa4\xe2\x7d\x24\x63\x9f\x46\xbf\x94\x73\x52\x8d\x96\xae\xa9\x26\xfd"),
|
||||
["\x3f\x17\x4b\x4f\xd7\x22\x47\x58\x94\x1d\x65\x1c\x84\xbe\x0d\x12\xed\x90\x37\x7f\x1f\x85\x6a\xeb\xc1\xbf\x28\x85\xec\xf8\x64\x6e"] = CTInfo($description="Let's Encrypt 'Oak2024H2' log", $operator="Let's Encrypt", $url="https://oak.ct.letsencrypt.org/2024h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xd7\x73\xd6\x53\x47\xe9\xf3\xc9\xd5\x7c\x16\xc2\xd6\x8f\x70\x65\xfa\xf2\x51\x36\xa9\x13\x80\x2f\xed\xf9\x94\xd3\x5a\x8b\xe8\x4f\x33\xcf\xc3\xd3\x89\xd4\x5f\x5a\x66\x89\xba\x20\x1f\x71\xcb\xca\xbb\x9f\x9f\xf3\x5c\x2d\x1e\xa3\x81\x59\xaf\x92\xb3\x6d\x30\x68"),
|
||||
["\x25\x2f\x94\xc2\x2b\x29\xe9\x6e\x9f\x41\x1a\x72\x07\x2b\x69\x5c\x5b\x52\xff\x97\xa9\x0d\x25\x40\xbb\xfc\xdc\x51\xec\x4d\xee\x0b"] = CTInfo($description="Sectigo 'Mammoth2026h1'", $operator="Sectigo", $url="https://mammoth2026h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x9e\xcb\x0c\x8a\x51\xcc\x8a\xe2\x0b\xce\x85\xe6\xaf\x4d\x31\xdb\x1b\x6a\x4c\xfd\xb0\x79\x6b\x99\x97\xc0\x5d\xfb\x6e\x45\x50\x1d\x62\xaa\xc6\x9f\x9b\x6b\x05\x3d\xa2\xab\x2b\x5d\x88\x9b\x50\x28\xe2\x9e\x58\xa5\xa5\xfa\xf9\xe3\xfa\x15\x25\xe3\x14\x13\x32\xc4"),
|
||||
["\x94\xb1\xc1\x8a\xb0\xd0\x57\xc4\x7b\xe0\xac\x04\x0e\x1f\x2c\xbc\x8d\xc3\x75\x72\x7b\xc9\x51\xf2\x0a\x52\x61\x26\x86\x3b\xa7\x3c"] = CTInfo($description="Sectigo 'Mammoth2026h2'", $operator="Sectigo", $url="https://mammoth2026h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xec\x83\x61\xf2\xd7\xb4\xbb\xe4\xe4\x3b\xeb\xc8\x63\x75\x98\xcf\x61\x90\x63\x14\x3d\x5f\x22\xdf\x74\xba\x50\xa7\x58\x9b\x69\x7d\xe6\x63\x89\x6d\xd9\xd7\x51\x84\x3f\xf8\x02\xd8\xc8\xff\xc2\x97\x71\xe5\x7e\x27\xf5\x72\xb1\x8f\x24\x27\x57\x0a\x0d\x74\xc0\xb6"),
|
||||
["\x56\x6c\xd5\xa3\x76\xbe\x83\xdf\xe3\x42\xb6\x75\xc4\x9c\x23\x24\x98\xa7\x69\xba\xc3\x82\xcb\xab\x49\xa3\x87\x7d\x9a\xb3\x2d\x01"] = CTInfo($description="Sectigo 'Sabre2026h1'", $operator="Sectigo", $url="https://sabre2026h1.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x84\x26\xbc\x36\xbd\xd8\x8d\x3c\x87\x9e\xe0\x10\xaf\xcd\x94\xd9\xd7\xb9\x51\x80\x34\x7e\xf7\x58\x5c\x73\xea\xeb\x09\x93\xb8\x10\x7b\x90\x9c\x7d\xc7\xcd\x96\x43\xed\x53\x6e\x95\x21\x46\x67\x51\xf0\xde\xb6\xc9\x9e\xaa\xe2\x80\x6d\xce\x25\x81\x34\xd7\x6a\x60"),
|
||||
["\x1f\x56\xd1\xab\x94\x70\x4a\x41\xdd\x3f\xea\xfd\xf4\x69\x93\x55\x30\x2c\x14\x31\xbf\xe6\x13\x46\x08\x9f\xff\xae\x79\x5d\xcc\x2f"] = CTInfo($description="Sectigo 'Sabre2026h2'", $operator="Sectigo", $url="https://sabre2026h2.ct.sectigo.com/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xce\x35\xca\xec\x39\x07\x82\xda\x77\x27\x86\xe4\xf2\x7e\xc5\xdc\x38\xf2\x9b\xa9\xab\x8c\xa7\xc0\xed\x83\x1e\x3e\x6a\x1b\xc0\xf0\x95\x56\xba\x32\x33\x4c\x75\x7c\x09\x07\xe9\xe1\x3e\x65\x35\x63\xf0\x49\xbe\x72\xd1\xaa\x9d\xaf\x7d\x08\xc4\xb4\x8d\x59\x3d\x73"),
|
||||
["\xa2\xe3\x0a\xe4\x45\xef\xbd\xad\x9b\x7e\x38\xed\x47\x67\x77\x53\xd7\x82\x5b\x84\x94\xd7\x2b\x5e\x1b\x2c\xc4\xb9\x50\xa4\x47\xe7"] = CTInfo($description="Let's Encrypt 'Oak2025h1'", $operator="Let's Encrypt", $url="https://oak.ct.letsencrypt.org/2025h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x29\xe0\x69\x53\xd7\xa3\x9c\x26\x88\x65\xe5\xf7\xf4\x4b\x1d\x17\x9b\xc3\xbd\xff\x04\x2d\x31\xdd\x2c\xfc\x62\x92\x5e\x32\xe0\x48\x91\x38\x84\x1f\x4b\x87\xab\x72\x99\xcc\x1d\xf8\x7c\xf9\x3c\x58\x54\x5b\x37\x10\xb1\xab\xd8\x83\xfb\x84\xf1\x95\x3f\x2e\x2f\x1c"),
|
||||
["\x0d\xe1\xf2\x30\x2b\xd3\x0d\xc1\x40\x62\x12\x09\xea\x55\x2e\xfc\x47\x74\x7c\xb1\xd7\xe9\x30\xef\x0e\x42\x1e\xb4\x7e\x4e\xaa\x34"] = CTInfo($description="Let's Encrypt 'Oak2025h2'", $operator="Let's Encrypt", $url="https://oak.ct.letsencrypt.org/2025h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xb5\x76\x30\x07\xad\xc6\xc8\xd2\xe4\x4b\xd2\xf5\xbe\xa2\x8d\x9c\xfd\x74\xfa\x3a\xd6\xfa\x59\x5d\xb6\x1c\x60\xd3\xdd\x1f\x63\x87\x86\xe3\x45\xe0\xd5\x1b\xc0\x35\x6a\xab\x27\x91\x95\xc9\xd7\x3d\xbb\xc1\xf7\x71\x86\x69\xf4\xb3\x5f\x90\x09\xaa\xae\xbd\x8d\xa9"),
|
||||
["\x87\x4f\xb5\x0d\xc0\x29\xd9\x93\x1d\xe5\x73\xe9\xf2\x89\x9e\x8e\x45\x33\xb3\x92\xd3\x8b\x0a\x46\x25\x74\xbf\x0f\xee\xb2\xfc\x1e"] = CTInfo($description="Trust Asia Log2024-2", $operator="TrustAsia", $url="https://ct2024.trustasia.com/log2024/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x64\xe2\x79\x81\x3f\x61\xd7\xec\xc6\xf8\x65\x28\x1d\xa0\xb4\x66\x33\xc3\x25\xd5\x0a\x95\x78\x9c\x8f\xfe\xa4\x2a\xd8\x8f\x7e\x72\xe0\xfe\xa8\x7f\xf8\xb1\x2d\x85\xc0\x8e\x12\x74\x0d\x2f\x8c\xab\xd7\x7f\x7a\x1e\xd9\x84\x33\x39\xe8\xfd\x89\x5f\x96\x48\x08"),
|
||||
["\x19\x86\xd4\xc7\x28\xaa\x6f\xfe\xba\x03\x6f\x78\x2a\x4d\x01\x91\xaa\xce\x2d\x72\x31\x0f\xae\xce\x5d\x70\x41\x2d\x25\x4c\xc7\xd4"] = CTInfo($description="Let's Encrypt 'Oak2026h1'", $operator="Let's Encrypt", $url="https://oak.ct.letsencrypt.org/2026h1/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x99\xd4\x61\x70\x22\xfa\x77\x93\x0d\xb3\xc7\x80\x96\x22\x51\xbf\x25\x79\xb1\x01\x42\xe9\x41\x7b\x8b\x0c\xc7\xb2\x65\x5a\x89\xf4\xfa\xe2\x02\x46\xd4\x8a\xc7\xcc\x10\x07\x11\x27\x45\x48\x90\x23\x40\xde\x7a\x4d\x89\x32\xfb\xd7\x0a\xeb\x5e\x8c\xa2\xf1\xf6\x49"),
|
||||
["\xac\xab\x30\x70\x6c\xeb\xec\x84\x31\xf4\x13\xd2\xf4\x91\x5f\x11\x1e\x42\x24\x43\xb1\xf2\xa6\x8c\x4f\x3c\x2b\x3b\xa7\x1e\x02\xc3"] = CTInfo($description="Let's Encrypt 'Oak2026h2'", $operator="Let's Encrypt", $url="https://oak.ct.letsencrypt.org/2026h2/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x6a\x70\x9d\xb3\x96\xe3\xec\x85\x36\x95\xc3\x4f\x9c\x8b\xd9\x7c\xc9\xd5\x91\x29\xae\xeb\xd6\x87\xdc\x04\xbc\x3b\xf6\x34\x0f\xf6\xdb\x08\xf7\x52\xa9\x88\xef\xbb\x3f\x59\xd6\xd4\xf6\xf4\xfc\x5c\xa9\x8c\x5f\xfb\x0d\x60\xe4\x2c\x0f\x16\xec\x2a\xb2\x6d\xeb\x15"),
|
||||
["\x28\xe2\x81\x38\xfd\x83\x21\x45\xe9\xa9\xd6\xaa\x75\x37\x6d\x83\x77\xa8\x85\x12\xb3\xc0\x7f\x72\x41\x48\x21\xdc\xbd\xe9\x8c\x66"] = CTInfo($description="TrustAsia Log2025a", $operator="TrustAsia", $url="https://ct2025-a.trustasia.com/log2025a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x70\xe5\xb1\xa4\x09\x79\x2b\x9d\xf8\xa3\xa0\xdf\x18\xef\x95\x5d\x03\x6c\x7b\xa1\x91\xa9\xb8\x80\x7d\xec\x5c\x02\x08\xe2\x6e\x2f\x7c\x32\x70\xbd\x96\x84\x5f\xa6\x62\xe9\x65\xb5\x7c\x90\x58\xba\x22\xd5\xf9\xf5\x69\x54\xb7\xa8\x94\x4e\x32\x09\xae\x26\x11\x4d"),
|
||||
["\x28\x2c\x8b\xdd\x81\x0f\xf9\x09\x12\x0a\xce\x16\xd6\xe0\xec\x20\x1b\xea\x82\xa3\xa4\xaf\x19\xd9\xef\xfb\x59\xe8\x3f\xdc\x42\x68"] = CTInfo($description="TrustAsia Log2025b", $operator="TrustAsia", $url="https://ct2025-b.trustasia.com/log2025b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xaa\xa0\x8b\xdb\x67\x14\x5d\x97\x89\x1d\x08\x8d\x06\xd7\xc1\x94\x8e\xb0\xfa\x4c\x46\xd5\x53\x08\x78\x2b\x04\x53\x6c\xf3\xde\xb1\xd1\x53\x40\xda\x90\x57\xe6\x1a\x9e\x3c\xc7\x03\xb8\xbd\x2f\xa9\xcf\xe8\x7b\x5e\xe1\x4b\x60\xe5\x38\x43\x60\x97\xc1\x5b\x2f\x65"),
|
||||
["\x74\xdb\x9d\x58\xf7\xd4\x7e\x9d\xfd\x78\x7a\x16\x2a\x99\x1c\x18\xcf\x69\x8d\xa7\xc7\x29\x91\x8c\x9a\x18\xb0\x45\x0d\xba\x44\xbc"] = CTInfo($description="TrustAsia 'log2026a'", $operator="TrustAsia", $url="https://ct2026-a.trustasia.com/log2026a/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\xa7\x4e\x7a\xc9\xa6\x07\xf9\xff\x74\xec\x98\xcb\x49\xe1\x00\x24\xb3\x59\x2e\x83\xfd\xc0\x70\x35\x33\x4c\x63\xca\x74\x83\xc0\x3c\x5b\x53\x40\x7c\x31\x1f\x35\xa4\x5f\x0f\xe4\xee\x4f\x89\x17\xe8\x5b\x2e\xc5\xac\x00\x05\xc9\x76\x37\x45\x97\x03\x15\xff\x60\x59"),
|
||||
["\x25\xb7\xef\xde\xa1\x13\x01\x93\xed\x93\x07\x97\x70\xaa\x32\x2a\x26\x62\x0d\xe3\x5a\xc8\xaa\x7c\x75\x19\x7d\xe0\xb1\xa9\xe0\x65"] = CTInfo($description="TrustAsia 'log2026b'", $operator="TrustAsia", $url="https://ct2026-b.trustasia.com/log2026b/", $maximum_merge_delay=86400, $key="\x30\x59\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07\x03\x42\x00\x04\x0f\x12\x8c\xa9\xe6\xe3\xec\x62\xee\xdf\x58\xc8\x50\xe6\x26\x70\x76\x10\xb7\x04\x39\xb3\xa7\xf8\x4c\x73\x3b\xc3\x38\x5a\x12\x00\x4c\xe0\xda\x0e\x16\x8a\x45\x32\x0a\x31\xaa\x22\xc7\x9d\x7d\x05\x53\xc7\x9e\x94\xea\x9b\x57\x46\xbf\x4f\xa4\x7e\xfb\xdf\xfa\x85"),
|
||||
};
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -20,6 +20,12 @@ export {
|
|||
|
||||
hook Notice::notice(n: Notice::Info)
|
||||
{
|
||||
if ( CommunityID::Notice::enabled && n?$conn )
|
||||
n$community_id = community_id_v1(n$conn$id, CommunityID::seed, CommunityID::do_base64);
|
||||
if ( CommunityID::Notice::enabled && n?$conn && n$conn?$conn )
|
||||
{
|
||||
local info = n$conn$conn;
|
||||
# This is set during new_connection(), so it should
|
||||
# always be there, but better safe than sorry.
|
||||
if ( info?$community_id )
|
||||
n$community_id = info$community_id;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,19 +1,2 @@
|
|||
##! In a cluster configuration, open the port number for metrics
|
||||
##! from the cluster node configuration for exporting data to
|
||||
##! Prometheus.
|
||||
##!
|
||||
##! The manager node will also provide a ``/services.json`` endpoint
|
||||
##! for the HTTP Service Discovery system in Prometheus to use for
|
||||
##! configuration. This endpoint will include information for all of
|
||||
##! the other nodes in the cluster.
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
|
||||
redef Telemetry::metrics_endpoint_name = Cluster::node;
|
||||
|
||||
@if ( Cluster::local_node_metrics_port() != 0/unknown )
|
||||
redef Telemetry::metrics_port = Cluster::local_node_metrics_port();
|
||||
@endif
|
||||
|
||||
@endif
|
||||
@deprecated "Remove in v7.1: Cluster nodes now implicitly listen on metrics port if set in cluster-layout."
|
||||
@load base/frameworks/telemetry
|
||||
|
|
|
@ -17,7 +17,10 @@ export {
|
|||
};
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection)
|
||||
module Conn;
|
||||
|
||||
event new_connection(c: connection) &priority=5
|
||||
{
|
||||
Conn::set_conn(c, F); # likely first to access :-/
|
||||
c$conn$community_id = community_id_v1(c$id, CommunityID::seed, CommunityID::do_base64);
|
||||
}
|
||||
|
|
|
@ -46,11 +46,11 @@ export {
|
|||
|
||||
## Regular expression is used to match URI based SQL injections.
|
||||
const match_sql_injection_uri =
|
||||
/[\?&][^[:blank:]\x00-\x1f\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f]|\/\*.*?\*\/)+/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|]+?=[\-0-9%]+([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x1f]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f]+?=[\-0-9%]*([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|]+?=([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/
|
||||
| /[\?&][^[:blank:]\x00-\x1f]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/
|
||||
/[\?&][^[:blank:]\x00-\x1f\|\+]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)+/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|\+]+?=[\-0-9%]+([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\+]+?=[\-0-9%]*([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\|\+]+?=([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x1f\+]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/
|
||||
| /[\?&][^[:blank:]\x00-\x1f\+]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/
|
||||
| /\/\*![[:digit:]]{5}.*?\*\// &redef;
|
||||
|
||||
## A hook that can be used to prevent specific requests from being counted
|
||||
|
|
|
@ -94,10 +94,6 @@ redef digest_salt = "Please change this value.";
|
|||
# telemetry_histogram.log.
|
||||
@load frameworks/telemetry/log
|
||||
|
||||
# Enable Prometheus metrics scraping in the cluster: each Zeek node will listen
|
||||
# on the metrics port defined in its Cluster::nodes entry.
|
||||
# @load frameworks/telemetry/prometheus
|
||||
|
||||
# Uncomment the following line to enable detection of the heartbleed attack. Enabling
|
||||
# this might impact performance a bit.
|
||||
# @load policy/protocols/ssl/heartbleed
|
||||
|
|
|
@ -201,6 +201,9 @@ public:
|
|||
|
||||
bool PermitWeird(const char* name, uint64_t threshold, uint64_t rate, double duration);
|
||||
|
||||
// Returns true once Done() is called.
|
||||
bool IsFinished() { return finished; }
|
||||
|
||||
private:
|
||||
friend class session::detail::Timer;
|
||||
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
#include "zeek/DFA.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
||||
#include "zeek/Desc.h"
|
||||
#include "zeek/EquivClass.h"
|
||||
#include "zeek/Hash.h"
|
||||
|
@ -265,9 +263,9 @@ DFA_State_Cache::~DFA_State_Cache() {
|
|||
DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, DigestStr* digest) {
|
||||
// We assume that state ID's don't exceed 10 digits, plus
|
||||
// we allow one more character for the delimiter.
|
||||
auto id_tag_buf = std::make_unique<u_char[]>(nfas.length() * 11 + 1);
|
||||
auto id_tag_buf = std::make_unique<char[]>(nfas.length() * 11 + 1);
|
||||
auto id_tag = id_tag_buf.get();
|
||||
u_char* p = id_tag;
|
||||
char* p = id_tag;
|
||||
|
||||
for ( int i = 0; i < nfas.length(); ++i ) {
|
||||
NFA_State* n = nfas[i];
|
||||
|
@ -287,7 +285,7 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, DigestStr* digest
|
|||
// HashKey because the data is copied into the key.
|
||||
hash128_t hash;
|
||||
KeyedHash::Hash128(id_tag, p - id_tag, &hash);
|
||||
*digest = DigestStr(reinterpret_cast<const unsigned char*>(hash), 16);
|
||||
*digest = DigestStr(reinterpret_cast<const char*>(hash), 16);
|
||||
|
||||
auto entry = states.find(*digest);
|
||||
if ( entry == states.end() ) {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <sys/types.h> // for u_char
|
||||
#include <sys/types.h>
|
||||
#include <cassert>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
@ -18,7 +18,7 @@ class DFA_Machine;
|
|||
|
||||
// Transitions to the uncomputed state indicate that we haven't yet
|
||||
// computed the state to go to.
|
||||
#define DFA_UNCOMPUTED_STATE -2
|
||||
#define DFA_UNCOMPUTED_STATE (-2)
|
||||
#define DFA_UNCOMPUTED_STATE_PTR ((DFA_State*)DFA_UNCOMPUTED_STATE)
|
||||
|
||||
class DFA_State : public Obj {
|
||||
|
@ -67,7 +67,7 @@ protected:
|
|||
DFA_State* mark;
|
||||
};
|
||||
|
||||
using DigestStr = std::basic_string<u_char>;
|
||||
using DigestStr = std::string;
|
||||
|
||||
struct DFA_State_Cache_Stats {
|
||||
// Sum of all NFA states
|
||||
|
|
|
@ -1642,6 +1642,9 @@ class CoerceToAnyExpr : public UnaryExpr {
|
|||
public:
|
||||
CoerceToAnyExpr(ExprPtr op);
|
||||
|
||||
bool IsReduced(Reducer* c) const override;
|
||||
ExprPtr Reduce(Reducer* c, StmtPtr& red_stmt) override;
|
||||
|
||||
protected:
|
||||
ValPtr Fold(Val* v) const override;
|
||||
|
||||
|
|
|
@ -130,11 +130,14 @@ bool Obj::SetLocationInfo(const detail::Location* start, const detail::Location*
|
|||
// We already have a better location, so don't use this one.
|
||||
return true;
|
||||
|
||||
delete location;
|
||||
|
||||
location =
|
||||
auto new_location =
|
||||
new detail::Location(start->filename, start->first_line, end->last_line, start->first_column, end->last_column);
|
||||
|
||||
// Don't delete this until we've constructed the new location, in case
|
||||
// "start" or "end" are our own location.
|
||||
delete location;
|
||||
location = new_location;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -11,14 +11,15 @@ namespace zeek::detail {
|
|||
std::list<ScannedFile> files_scanned;
|
||||
std::vector<SignatureFile> sig_files;
|
||||
|
||||
ScannedFile::ScannedFile(int arg_include_level, std::string arg_name, bool arg_skipped, bool arg_prefixes_checked)
|
||||
ScannedFile::ScannedFile(int arg_include_level, std::string arg_name, bool arg_skipped, bool arg_prefixes_checked,
|
||||
bool arg_is_canonical)
|
||||
: include_level(arg_include_level),
|
||||
skipped(arg_skipped),
|
||||
prefixes_checked(arg_prefixes_checked),
|
||||
name(std::move(arg_name)) {
|
||||
if ( name == canonical_stdin_path )
|
||||
canonical_path = canonical_stdin_path;
|
||||
else {
|
||||
else if ( ! arg_is_canonical ) {
|
||||
std::error_code ec;
|
||||
auto canon = filesystem::canonical(name, ec);
|
||||
if ( ec )
|
||||
|
@ -26,6 +27,9 @@ ScannedFile::ScannedFile(int arg_include_level, std::string arg_name, bool arg_s
|
|||
|
||||
canonical_path = canon.string();
|
||||
}
|
||||
else {
|
||||
canonical_path = name;
|
||||
}
|
||||
}
|
||||
|
||||
bool ScannedFile::AlreadyScanned() const {
|
||||
|
|
|
@ -12,10 +12,13 @@ namespace zeek::detail {
|
|||
|
||||
// Script file we have already scanned (or are in the process of scanning).
|
||||
// They are identified by normalized canonical path.
|
||||
//
|
||||
// If arg_is_canonical is set to true, assume arg_name is canonicalized and
|
||||
// skip resolving the canonical name.
|
||||
class ScannedFile {
|
||||
public:
|
||||
ScannedFile(int arg_include_level, std::string arg_name, bool arg_skipped = false,
|
||||
bool arg_prefixes_checked = false);
|
||||
bool arg_prefixes_checked = false, bool arg_is_canonical = false);
|
||||
|
||||
/**
|
||||
* Compares the canonical path of this file against every canonical path
|
||||
|
|
|
@ -1849,7 +1849,8 @@ void WhenInfo::Build(StmtPtr ws) {
|
|||
auto else_branch = timeout_s ? timeout_s : empty;
|
||||
|
||||
auto do_bodies = make_intrusive<IfStmt>(two_test, s, else_branch);
|
||||
auto dummy_return = make_intrusive<ReturnStmt>(true_const);
|
||||
auto any_true_const = make_intrusive<CoerceToAnyExpr>(true_const);
|
||||
auto dummy_return = make_intrusive<ReturnStmt>(any_true_const);
|
||||
|
||||
auto shebang = make_intrusive<StmtList>(do_test, do_bodies, dummy_return);
|
||||
|
||||
|
|
|
@ -2,14 +2,11 @@
|
|||
|
||||
#include "zeek/Timer.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
||||
#include "zeek/Desc.h"
|
||||
#include "zeek/NetVar.h"
|
||||
#include "zeek/RunState.h"
|
||||
#include "zeek/broker/Manager.h"
|
||||
#include "zeek/iosource/Manager.h"
|
||||
#include "zeek/iosource/PktSrc.h"
|
||||
#include "zeek/util.h"
|
||||
|
||||
namespace zeek::detail {
|
||||
|
|
|
@ -3241,10 +3241,11 @@ bool VectorVal::Assign(unsigned int index, ValPtr element) {
|
|||
|
||||
if ( yield_types ) {
|
||||
const auto& t = element->GetType();
|
||||
(*yield_types)[index] = t;
|
||||
auto& yt_i = (*yield_types)[index];
|
||||
auto& elem = vector_val[index];
|
||||
if ( elem )
|
||||
ZVal::DeleteIfManaged(*elem, t);
|
||||
ZVal::DeleteIfManaged(*elem, yt_i);
|
||||
yt_i = t;
|
||||
elem = ZVal(std::move(element), t);
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -113,19 +113,7 @@ void Analyzer::CtorInit(const zeek::Tag& arg_tag, Connection* arg_conn) {
|
|||
|
||||
Analyzer::~Analyzer() {
|
||||
assert(finished);
|
||||
|
||||
// Make sure any late entries into the analyzer tree are handled (e.g.
|
||||
// from some Done() implementation).
|
||||
LOOP_OVER_GIVEN_CHILDREN(i, new_children) {
|
||||
if ( ! (*i)->finished )
|
||||
(*i)->Done();
|
||||
}
|
||||
|
||||
// Deletion of new_children done in separate loop in case a Done()
|
||||
// implementation tries to inspect analyzer tree w/ assumption that
|
||||
// all analyzers are still valid.
|
||||
LOOP_OVER_GIVEN_CHILDREN(i, new_children)
|
||||
delete *i;
|
||||
assert(new_children.empty());
|
||||
|
||||
LOOP_OVER_CHILDREN(i)
|
||||
delete *i;
|
||||
|
@ -330,6 +318,30 @@ void Analyzer::ForwardEndOfData(bool orig) {
|
|||
bool Analyzer::AddChildAnalyzer(Analyzer* analyzer, bool init) {
|
||||
auto t = analyzer->GetAnalyzerTag();
|
||||
|
||||
// Prevent attaching child analyzers to analyzer subtrees where
|
||||
// either the parent has finished or is being removed. Further,
|
||||
// don't attach analyzers when the connection has finished or is
|
||||
// currently being finished (executing Done()).
|
||||
//
|
||||
// Scenarios in which analyzers have been observed that late in
|
||||
// analyzer / connection lifetime are:
|
||||
//
|
||||
// * A DPD signature match on undelivered TCP data that is flushed
|
||||
// during Connection::Done(). The PIA analyzer activates a new
|
||||
// analyzer adding it to the TCP analyzer.
|
||||
//
|
||||
// * Analyzers flushing buffered state during Done(), resulting
|
||||
// in new analyzers being created.
|
||||
//
|
||||
// Analyzers added during Done() are problematic as calling Done()
|
||||
// within the parent's destructor isn't safe, so we prevent these
|
||||
// situations.
|
||||
if ( Removing() || IsFinished() || Conn()->IsFinished() ) {
|
||||
analyzer->Done();
|
||||
delete analyzer;
|
||||
return false;
|
||||
}
|
||||
|
||||
if ( HasChildAnalyzer(t) || IsPreventedChildAnalyzer(t) ) {
|
||||
analyzer->Done();
|
||||
delete analyzer;
|
||||
|
|
|
@ -15,7 +15,7 @@ public type Request = unit {
|
|||
|
||||
switch {
|
||||
-> : /\/W/ { self.whois = True; }
|
||||
-> void;
|
||||
-> : void;
|
||||
};
|
||||
|
||||
: OptionalWhiteSpace;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
spicy_add_analyzer(
|
||||
NAME LDAP
|
||||
PACKAGE_NAME spicy-ldap
|
||||
SOURCES ldap.spicy ldap.evt asn1.spicy
|
||||
MODULES LDAP ASN1)
|
||||
SOURCES ldap.spicy ldap.evt asn1.spicy ldap_zeek.spicy
|
||||
MODULES LDAP ASN1 LDAP_Zeek)
|
||||
|
|
|
@ -41,3 +41,18 @@ on LDAP::SearchRequest -> event LDAP::search_request($conn,
|
|||
on LDAP::SearchResultEntry -> event LDAP::search_result_entry($conn,
|
||||
message.messageID,
|
||||
self.objectName);
|
||||
|
||||
on LDAP::ExtendedRequest -> event LDAP::extended_request($conn,
|
||||
message.messageID,
|
||||
self.requestName,
|
||||
self.requestValue);
|
||||
|
||||
on LDAP::ExtendedResponse -> event LDAP::extended_response($conn,
|
||||
message.messageID,
|
||||
message.result_.code,
|
||||
self.responseName,
|
||||
self.responseValue);
|
||||
|
||||
# Once switched into MessageMode::TLS, we won't parse messages anymore,
|
||||
# so this is raised just once.
|
||||
on LDAP::Message if (ctx.messageMode == LDAP::MessageMode::TLS) -> event LDAP::starttls($conn);
|
||||
|
|
|
@ -126,125 +126,206 @@ public type Result = unit {
|
|||
# https://tools.ietf.org/html/rfc4511#section-4.1.10
|
||||
};
|
||||
|
||||
# 1.2.840.48018.1.2.2 (MS KRB5 - Microsoft Kerberos 5)
|
||||
const GSSAPI_MECH_MS_KRB5 = "1.2.840.48018.1.2.2";
|
||||
|
||||
# Supported SASL stripping modes.
|
||||
type MessageMode = enum {
|
||||
MS_KRB5 = 1, # Payload starts with a 4 byte length followed by a wrap token that may or may not be sealed.
|
||||
TLS = 2, # Client/server used StartTLS, forward to SSL analyzer.
|
||||
MAYBE_ENCRYPTED = 3, # Use a heuristic to determine encrypted traffic.
|
||||
CLEARTEXT = 4, # Assume cleartext.
|
||||
ENCRYPTED = 5, # Assume encrypted.
|
||||
};
|
||||
|
||||
type Ctx = struct {
|
||||
messageMode: MessageMode; # Message dispatching mode
|
||||
saslMechanism: string; # The SASL mechanism selected by the client.
|
||||
startTlsRequested: bool; # Did the client use the StartTLS extended request?
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type Messages = unit {
|
||||
: MessageWrapper[];
|
||||
%context = Ctx;
|
||||
: MessageDispatch(self.context())[];
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type SASLLayer = unit {
|
||||
# For the time being (before we support parsing the SASL layer) this unit
|
||||
# is used by MessageWrapper below to strip it (SASL) so that the parser
|
||||
# can attempt to resume parsing afterward. It also sets the success flag
|
||||
# if '\x30' is found, otherwise backtracks so that we can deal with encrypted
|
||||
# SASL payloads without raising a parse error.
|
||||
var success: bool = False;
|
||||
: bytes &until=b"\x30" {
|
||||
self.success = True;
|
||||
public type MessageDispatch = unit(ctx: Ctx&) {
|
||||
switch( ctx.messageMode ) {
|
||||
MessageMode::Undef -> : Message(ctx);
|
||||
MessageMode::MS_KRB5 -> : SaslMsKrb5Stripper(ctx);
|
||||
MessageMode::TLS -> : TlsForward; # never returns
|
||||
MessageMode::MAYBE_ENCRYPTED -> : MaybeEncrypted(ctx);
|
||||
MessageMode::CLEARTEXT -> : Message(ctx);
|
||||
MessageMode::ENCRYPTED -> : EncryptedMessage;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type MaybeEncrypted = unit(ctx: Ctx&) {
|
||||
# A plaintext LDAP message always starts with at least 3 bytes and the first
|
||||
# byte is 0x30 for the sequence. A SASL encrypted message starts with a 4 byte
|
||||
# length field. The heuristic here is that if the first byte is a 0x30,
|
||||
# assume it's unencrypted LDAP. This should be pretty good, if it was an
|
||||
# encrypted/SASL wrapped message, it would have a size between 0x30000000 and
|
||||
# 0x30FFFFFF, meaning at least a size of ~768MB, which seems unlikely.
|
||||
var start: iterator<stream>;
|
||||
var saslLen: uint64;
|
||||
var mech: bytes;
|
||||
|
||||
on %init {
|
||||
self.start = self.input();
|
||||
# Don't have starts_with() on string, work around that.
|
||||
# https://github.com/zeek/spicy/issues/1807
|
||||
self.mech = ctx.saslMechanism.encode(spicy::Charset::UTF8);
|
||||
}
|
||||
|
||||
on %error {
|
||||
self.backtrack();
|
||||
first: uint8 {
|
||||
if ( $$ == 0x30 ) {
|
||||
ctx.messageMode = MessageMode::CLEARTEXT;
|
||||
} else {
|
||||
ctx.messageMode = MessageMode::ENCRYPTED;
|
||||
}
|
||||
}
|
||||
|
||||
# As a further heuristic, if encrypted mode was decided and the client
|
||||
# requested GSSAPI or GSS-SPNEGO (or we just didn't see it) peak a bit
|
||||
# into the SASL payload and check if it starts with a 0504 (WRAP_TOKEN).
|
||||
# If so, switch into KRB mode assuming that's what is being used and
|
||||
# have a chance seeing some more plaintext LDAP in non-sealed tokens.
|
||||
rem: uint8[3] if ( ctx.messageMode == MessageMode::ENCRYPTED && (|self.mech| == 0 || self.mech.starts_with(b"GSS")) ) {
|
||||
self.saslLen = (uint64(self.first) << 24) + (uint64($$[0]) << 16) + (uint64($$[1]) << 8) + uint64($$[2]);
|
||||
}
|
||||
|
||||
: uint16 if ( self.saslLen >= 2 ) {
|
||||
if ( $$ == 0x0504 ) {
|
||||
ctx.messageMode = MessageMode::MS_KRB5;
|
||||
}
|
||||
}
|
||||
|
||||
# Rewind the input.
|
||||
: void {
|
||||
# Prevent MessageDispatch from recursing endlessly.
|
||||
assert ctx.messageMode != MessageMode::MAYBE_ENCRYPTED;
|
||||
self.set_input(self.start);
|
||||
}
|
||||
|
||||
# One recursion to parse with the new ctx.messageMode setting.
|
||||
: MessageDispatch(ctx);
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type MessageWrapper = unit {
|
||||
# A wrapper around 'Message'. First, we try to parse a Message unit.
|
||||
# There are two possible outcomes:
|
||||
# (1) Success -> We consumed all bytes and successfully parsed a Message unit
|
||||
# (2) No success -> self.backtrack() is called in the Message unit,
|
||||
# so effectively we didn't consume any bytes yet.
|
||||
# The outcome can be determined by checking the `success` variable of the Message unit
|
||||
|
||||
# This success variable is different, because this keeps track of the status for the MessageWrapper object
|
||||
var success: bool = False;
|
||||
var message: Message;
|
||||
|
||||
# Here, we try to parse the message...
|
||||
: Message &try {
|
||||
|
||||
# ... and only if the Message unit successfully parsed, we can set
|
||||
# the status of this MessageWrapper's success to 'True'
|
||||
if ( $$.success == True ) {
|
||||
self.success = True;
|
||||
self.message = $$;
|
||||
}
|
||||
}
|
||||
|
||||
# If we failed to parse the message, then we're going to scan the remaining bytes for the '\x30'
|
||||
# start byte and try to parse a Message starting from that byte. This effectively
|
||||
# strips the SASL layer if SASL Signing was enabled. Until now, I haven't found A
|
||||
# better way to scan / determine the exact SASL header length yet, so we'll stick with this
|
||||
# for the time being. If the entire LDAP packet was encrypted with SASL, then we skip parsing for
|
||||
# now (in the long run we need to be parsing SASL/GSSAPI instead, in which case encrypted payloads
|
||||
# are just another message type).
|
||||
|
||||
# SASLLayer (see unit above) just consumes bytes &until=b"\x30" or backtracks if it isn't found
|
||||
# and sets a success flag we can use later to decide if those bytes contain a parsable message.
|
||||
var sasl_success: bool = False;
|
||||
: SASLLayer &try if ( self.success == False ) {
|
||||
if ( $$.success == True ) {
|
||||
self.sasl_success = True;
|
||||
}
|
||||
}
|
||||
var remainder: bytes;
|
||||
|
||||
# SASLLayer consumes the delimiter ('\x30'), and because this is the first byte of a valid LDAP message
|
||||
# we should re-add it to the remainder if the delimiter was found. If the delimiter was not found, we
|
||||
# leave the remainder empty, but note that the bytes must be consumed either way to avoid stalling the
|
||||
# parser and causing an infinite loop error.
|
||||
: bytes &eod if ( self.success == False ) {
|
||||
if ( self.sasl_success == True ) {
|
||||
self.remainder = b"\x30" + $$;
|
||||
}
|
||||
}
|
||||
|
||||
# Again, try to parse a Message unit. Be aware that in this will sometimes fail if the '\x30' byte is
|
||||
# also present in the SASL header.
|
||||
|
||||
# Also, we could try to do this recursively or try a few iterations, but for now I would suggest
|
||||
# to try this extra parsing once to get the best cost/benefit tradeoff.
|
||||
: Message &try &parse-from=self.remainder if ( self.success == False && self.sasl_success == True ) {
|
||||
if ( $$.success == True ) {
|
||||
self.success = True;
|
||||
self.message = $$;
|
||||
}
|
||||
}
|
||||
|
||||
# If we still didn't manage to parse a message (so the &try resulted in another backtrack()) then
|
||||
# this is probably an encrypted LDAP message, so skip it
|
||||
|
||||
} &convert=self.message;
|
||||
type EncryptedMessage = unit {
|
||||
len: uint32;
|
||||
: skip bytes &size=self.len;
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type Message = unit {
|
||||
type TlsForward = unit {
|
||||
# Just consume everything. This is hooked in ldap_zeek.spicy
|
||||
chunk: bytes &chunked &eod;
|
||||
};
|
||||
|
||||
type KrbWrapToken = unit {
|
||||
# https://datatracker.ietf.org/doc/html/rfc4121#section-4.2.6.2
|
||||
|
||||
# Number of bytes to expect *after* the payload.
|
||||
var trailer_ec: uint64;
|
||||
var header_ec: uint64;
|
||||
|
||||
ctx_flags: bitfield(8) {
|
||||
send_by_acceptor: 0;
|
||||
sealed: 1;
|
||||
acceptor_subkey: 2;
|
||||
};
|
||||
filler: skip b"\xff";
|
||||
ec: uint16; # extra count
|
||||
rrc: uint16 { # right rotation count
|
||||
# Handle rrc == ec or rrc == 0.
|
||||
if ( self.rrc == self.ec ) {
|
||||
self.header_ec = self.ec;
|
||||
} else if ( self.rrc == 0 ) {
|
||||
self.trailer_ec = self.ec;
|
||||
} else {
|
||||
if ( ! self.ctx_flags.sealed )
|
||||
# If it's sealed, we'll consume until &eod anyhow
|
||||
# and ec/rrc shouldn't apply, otherwise, bail.
|
||||
throw "Unhandled rc %s and ec %s" % (self.ec, self.rrc);
|
||||
}
|
||||
}
|
||||
|
||||
snd_seq: uint64;
|
||||
header_e: skip bytes &size=self.header_ec;
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type SaslMsKrb5Stripper = unit(ctx: Ctx&) {
|
||||
# This is based on Wireshark output and example traffic we have. There's always
|
||||
# a 4 byte length field followed by the krb5_tok_id field in messages after
|
||||
# MS_KRB5 was selected. I haven't read enough specs to understand if it's
|
||||
# just this one case that works, or others could use the same stripping.
|
||||
var switch_size: uint64;
|
||||
|
||||
len: uint32;
|
||||
krb5_tok_id: uint16;
|
||||
|
||||
switch ( self.krb5_tok_id ) {
|
||||
0x0504 -> krb_wrap_token: KrbWrapToken;
|
||||
* -> : void;
|
||||
};
|
||||
|
||||
: skip bytes &size=0 {
|
||||
self.switch_size = self.len - (self.offset() - 4);
|
||||
if ( self?.krb_wrap_token )
|
||||
self.switch_size -= self.krb_wrap_token.trailer_ec;
|
||||
}
|
||||
|
||||
switch ( self?.krb_wrap_token && ! self.krb_wrap_token.ctx_flags.sealed ) {
|
||||
True -> : Message(ctx)[] &eod;
|
||||
* -> : skip bytes &eod;
|
||||
} &size=self.switch_size;
|
||||
|
||||
# Consume the wrap token trailer, if any.
|
||||
trailer_e: skip bytes &size=self.krb_wrap_token.trailer_ec if (self?.krb_wrap_token);
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
public type Message = unit(ctx: Ctx&) {
|
||||
var messageID: int64;
|
||||
var opcode: ProtocolOpcode = ProtocolOpcode::Undef;
|
||||
var applicationBytes: bytes;
|
||||
var unsetResultDefault: Result;
|
||||
var result_: Result& = self.unsetResultDefault;
|
||||
var obj: string = "";
|
||||
var arg: string = "";
|
||||
var success: bool = False;
|
||||
var seqHeaderLen: uint64;
|
||||
var msgLen: uint64;
|
||||
var opLen: uint64;
|
||||
|
||||
: ASN1::ASN1Message(True) {
|
||||
if (($$.head.tag.type_ == ASN1::ASN1Type::Sequence) &&
|
||||
($$.body?.seq) &&
|
||||
(|$$.body.seq.submessages| >= 2)) {
|
||||
if ($$.body.seq.submessages[0].body?.num_value) {
|
||||
self.messageID = $$.body.seq.submessages[0].body.num_value;
|
||||
}
|
||||
if ($$.body.seq.submessages[1]?.application_id) {
|
||||
self.opcode = cast<ProtocolOpcode>(cast<uint8>($$.body.seq.submessages[1].application_id));
|
||||
self.applicationBytes = $$.body.seq.submessages[1].application_data;
|
||||
}
|
||||
}
|
||||
seqHeader: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::Sequence) {
|
||||
self.msgLen = $$.len.len;
|
||||
}
|
||||
|
||||
# Use offset() to determine how many bytes the seqHeader took. This
|
||||
# needs to be done after the seqHeader field hook.
|
||||
: void {
|
||||
self.seqHeaderLen = self.offset();
|
||||
}
|
||||
|
||||
messageID_header: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::Integer);
|
||||
: ASN1::ASN1Body(self.messageID_header, False) {
|
||||
self.messageID = $$.num_value;
|
||||
}
|
||||
|
||||
protocolOp: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Application) {
|
||||
self.opcode = cast<ProtocolOpcode>(cast<uint8>($$.tag.type_));
|
||||
self.opLen = $$.len.len;
|
||||
}
|
||||
|
||||
switch ( self.opcode ) {
|
||||
ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self);
|
||||
ProtocolOpcode::BIND_RESPONSE -> BIND_RESPONSE: BindResponse(self);
|
||||
ProtocolOpcode::BIND_REQUEST -> BIND_REQUEST: BindRequest(self, ctx);
|
||||
ProtocolOpcode::BIND_RESPONSE -> BIND_RESPONSE: BindResponse(self, ctx);
|
||||
ProtocolOpcode::UNBIND_REQUEST -> UNBIND_REQUEST: UnbindRequest(self);
|
||||
ProtocolOpcode::SEARCH_REQUEST -> SEARCH_REQUEST: SearchRequest(self);
|
||||
ProtocolOpcode::SEARCH_RESULT_ENTRY -> SEARCH_RESULT_ENTRY: SearchResultEntry(self);
|
||||
|
@ -262,22 +343,20 @@ public type Message = unit {
|
|||
# just commenting this out, it will stop processing LDAP Messages in this connection
|
||||
ProtocolOpcode::ADD_REQUEST -> ADD_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::COMPARE_REQUEST -> COMPARE_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: NotImplemented(self);
|
||||
ProtocolOpcode::EXTENDED_REQUEST -> EXTENDED_REQUEST: ExtendedRequest(self, ctx);
|
||||
ProtocolOpcode::EXTENDED_RESPONSE -> EXTENDED_RESPONSE: ExtendedResponse(self, ctx);
|
||||
ProtocolOpcode::INTERMEDIATE_RESPONSE -> INTERMEDIATE_RESPONSE: NotImplemented(self);
|
||||
ProtocolOpcode::MOD_DN_REQUEST -> MOD_DN_REQUEST: NotImplemented(self);
|
||||
ProtocolOpcode::SEARCH_RESULT_REFERENCE -> SEARCH_RESULT_REFERENCE: NotImplemented(self);
|
||||
} &parse-from=self.applicationBytes if ( self.opcode );
|
||||
} &size=self.opLen;
|
||||
|
||||
on %error {
|
||||
self.backtrack();
|
||||
}
|
||||
# Ensure some invariants hold after parsing the command.
|
||||
: void &requires=(self.offset() >= self.seqHeaderLen);
|
||||
: void &requires=(self.msgLen >= (self.offset() - self.seqHeaderLen));
|
||||
|
||||
on %done {
|
||||
self.success = True;
|
||||
}
|
||||
|
||||
} &requires=((self?.messageID) && (self?.opcode) && (self.opcode != ProtocolOpcode::Undef));
|
||||
# Eat the controls field if it exists.
|
||||
: skip bytes &size=self.msgLen - (self.offset() - self.seqHeaderLen);
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Bind Operation
|
||||
|
@ -286,20 +365,110 @@ public type Message = unit {
|
|||
public type BindAuthType = enum {
|
||||
BIND_AUTH_SIMPLE = 0,
|
||||
BIND_AUTH_SASL = 3,
|
||||
|
||||
# https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-adts/8b9dbfb2-5b6a-497a-a533-7e709cb9a982
|
||||
# 5.1.1.1.3 Sicily Authentication
|
||||
SICILY_PACKAGE_DISCOVERY = 9,
|
||||
SICILY_NEGOTIATE = 10,
|
||||
SICILY_RESPONSE = 11,
|
||||
};
|
||||
|
||||
type GSS_SPNEGO_negTokenInit = unit {
|
||||
oidHeader: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::Universal && $$.tag.type_ == ASN1::ASN1Type::ObjectIdentifier);
|
||||
oid: ASN1::ASN1ObjectIdentifier(self.oidHeader.len.len) &requires=(self.oid.oidstring == "1.3.6.1.5.5.2");
|
||||
|
||||
# TODO: Parse the rest of negTokenInit.
|
||||
: skip bytes &eod;
|
||||
};
|
||||
|
||||
# Peak into GSS-SPNEGO payload and ensure it is indeed GSS-SPNEGO,
|
||||
# or GSS-SPNEGO with a NTMLSSP payload that starts with NTLMSSP.
|
||||
type GSS_SPNEGO_Init = unit {
|
||||
# This is the optional octet string in SaslCredentials.
|
||||
credentialsHeader: ASN1::ASN1Header &requires=($$.tag.type_ == ASN1::ASN1Type::OctetString);
|
||||
|
||||
# Now we either have the initial message as specified in RFC2743 or
|
||||
# a continuation from RFC4178, or a "NTMLSSP" signature.
|
||||
#
|
||||
# 60 -> APPLICATION [0] https://datatracker.ietf.org/doc/html/rfc2743#page-81
|
||||
# a1 -> CHOICE [1] https://www.rfc-editor.org/rfc/rfc4178#section-4.2
|
||||
# "NTMLSSP" https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/907f519d-6217-45b1-b421-dca10fc8af0d
|
||||
#
|
||||
switch {
|
||||
-> spnegoInitByte: uint8(0x60);
|
||||
-> spnegoChoiceByte: uint8(0xa1);
|
||||
-> ntlmSignature: skip b"NTLMSSP"; # Unsupported, should forward to child analyzer!
|
||||
};
|
||||
|
||||
spnegoLen: skip ASN1::LengthType if (self?.spnegoInitByte || self?.spnegoChoiceByte);
|
||||
|
||||
# Peak into the SPNEGO_negTokenInit
|
||||
spnegoInitial: skip GSS_SPNEGO_negTokenInit if (self?.spnegoInitByte);
|
||||
};
|
||||
|
||||
type SaslCredentials = unit() {
|
||||
mechanism: ASN1::ASN1Message(True) &convert=$$.body.str_value;
|
||||
# TODO: if we want to parse the (optional) credentials string
|
||||
mechanism: ASN1::ASN1Message(False) &convert=$$.body.str_value;
|
||||
|
||||
# Peak into GSS-SPNEGO payload if we have any.
|
||||
switch ( self.mechanism ) {
|
||||
"GSS-SPNEGO" -> gss_spnego: GSS_SPNEGO_Init;
|
||||
* -> : skip bytes &eod;
|
||||
};
|
||||
};
|
||||
|
||||
type SicilyMessage = unit() {
|
||||
# Just ensure the signature matches. We could do more,
|
||||
# but it'd be better to forward to an NTLM analyzer.
|
||||
signature: skip b"NTLMSSP";
|
||||
var signature_decoded: string = "NTLMSSP";
|
||||
};
|
||||
|
||||
type GSS_SPNEGO_Subsequent = unit {
|
||||
switch {
|
||||
-> spnegoChoiceByte: uint8(0xa1);
|
||||
-> ntmlSignature: skip b"NTLMSSP"; # Unsupported, should forward to NTLM!
|
||||
};
|
||||
|
||||
spnegoChoiceLen: skip ASN1::LengthType if (self?.spnegoChoiceByte);
|
||||
negTokenResp: GSS_SPNEGO_negTokenResp if (self?.spnegoChoiceByte);
|
||||
};
|
||||
|
||||
type GSS_SPNEGO_negTokenResp = unit {
|
||||
var accepted: bool;
|
||||
var supportedMech: ASN1::ASN1Message;
|
||||
var responseToken: optional<bytes>;
|
||||
|
||||
# Parse the contained Sequence.
|
||||
seq: ASN1::ASN1Message(True) {
|
||||
for ( msg in $$.body.seq.submessages ) {
|
||||
# https://www.rfc-editor.org/rfc/rfc4178#section-4.2.2
|
||||
if ( msg.application_id == 0 ) {
|
||||
self.accepted = msg.application_data == b"\x0a\x01\x00";
|
||||
} else if ( msg.application_id == 1 ) {
|
||||
self.supportedMech = msg;
|
||||
} else if ( msg.application_id == 2 ) {
|
||||
self.responseToken = msg.application_data;
|
||||
} else if ( msg.application_id == 3 ) {
|
||||
# ignore mechListMec
|
||||
} else {
|
||||
throw "unhandled NegTokenResp id %s" % msg.application_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch ( self?.supportedMech ) {
|
||||
True -> supportedMechOid: ASN1::ASN1Message(False) &convert=$$.body.str_value;
|
||||
* -> : void;
|
||||
} &parse-from=self.supportedMech.application_data;
|
||||
};
|
||||
|
||||
# TODO(fox-ds): A helper unit for requests for which no handling has been implemented.
|
||||
# Eventually all uses of this unit should be replaced with actual parsers so this unit can be removed.
|
||||
type NotImplemented = unit(inout message: Message) {
|
||||
# Do nothing
|
||||
: skip bytes &eod;
|
||||
};
|
||||
|
||||
type BindRequest = unit(inout message: Message) {
|
||||
type BindRequest = unit(inout message: Message, ctx: Ctx&) {
|
||||
version: ASN1::ASN1Message(True) &convert=$$.body.num_value;
|
||||
name: ASN1::ASN1Message(True) &convert=$$.body.str_value {
|
||||
message.obj = self.name;
|
||||
|
@ -313,25 +482,79 @@ type BindRequest = unit(inout message: Message) {
|
|||
self.authType = cast<BindAuthType>(cast<uint8>($$.application_id));
|
||||
self.authData = $$.application_data;
|
||||
}
|
||||
if ((self.authType == BindAuthType::BIND_AUTH_SIMPLE) && (|self.authData| > 0)) {
|
||||
self.simpleCreds = self.authData.decode();
|
||||
if (|self.simpleCreds| > 0) {
|
||||
message.arg = self.simpleCreds;
|
||||
}
|
||||
|
||||
switch ( |self.authData| > 0 ) {
|
||||
True -> switch ( self.authType ) {
|
||||
BindAuthType::BIND_AUTH_SIMPLE ->
|
||||
: void {
|
||||
self.simpleCreds = self.authData.decode();
|
||||
message.arg = self.simpleCreds;
|
||||
}
|
||||
|
||||
BindAuthType::BIND_AUTH_SASL ->
|
||||
saslCreds: SaslCredentials {
|
||||
message.arg = self.saslCreds.mechanism;
|
||||
ctx.saslMechanism = self.saslCreds.mechanism;
|
||||
}
|
||||
|
||||
BindAuthType::SICILY_NEGOTIATE, BindAuthType::SICILY_RESPONSE ->
|
||||
sicilyMessage: SicilyMessage {
|
||||
message.arg = self.sicilyMessage.signature_decoded;
|
||||
}
|
||||
* -> : void;
|
||||
};
|
||||
* -> : void;
|
||||
} &parse-from=self.authData;
|
||||
} &requires=(self?.authType && (self.authType != BindAuthType::Undef));
|
||||
|
||||
type ServerSaslCreds = unit {
|
||||
serverSaslCreds: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific && $$.tag.type_ == ASN1::ASN1Type(7));
|
||||
payload: bytes &size=self.serverSaslCreds.len.len;
|
||||
};
|
||||
|
||||
type BindResponse = unit(inout message: Message, ctx: Ctx&) {
|
||||
: Result {
|
||||
message.result_ = $$;
|
||||
|
||||
# The SASL authentication was successful. We do not actually
|
||||
# know if the following messages are encrypted or not. This may be
|
||||
# mechanism and parameter specific. For example SCRAM-SHA512 or NTLM
|
||||
# will continue to be cleartext, while SRP or GSS-API would be encrypted.
|
||||
#
|
||||
# Switch messageMode into trial mode which is explored via MessageDispatch
|
||||
# and the MaybeEncrypted unit.
|
||||
#
|
||||
# Note, messageMode may be changed to something more specific like
|
||||
# MS_KRB5 below.
|
||||
if ( |ctx.saslMechanism| > 0 && $$.code == ResultCode::SUCCESS ) {
|
||||
ctx.messageMode = MessageMode::MAYBE_ENCRYPTED;
|
||||
}
|
||||
}
|
||||
|
||||
# Try to parse serverSaslCreds if there's any input remaining. This
|
||||
# unit is parsed with &size, so &eod here works.
|
||||
#
|
||||
# Technically we should be able to tell from the ASN.1 structure
|
||||
# if the serverSaslCreds field exists or not. But, not sure we can
|
||||
# check if there's any bytes left at this point outside of passing
|
||||
# in the length and playing with offset().
|
||||
serverSaslCreds: ServerSaslCreds[] &eod;
|
||||
|
||||
# If the client requested GSS-SPNEGO, try to parse the server's response
|
||||
# to switch message mode.
|
||||
gss_spnego: GSS_SPNEGO_Subsequent &parse-from=self.serverSaslCreds[0].payload
|
||||
if (ctx.saslMechanism == "GSS-SPNEGO" && |self.serverSaslCreds| > 0) {
|
||||
|
||||
if ( $$?.negTokenResp ) {
|
||||
local token = $$.negTokenResp;
|
||||
if ( token.accepted && token?.supportedMechOid ) {
|
||||
if ( token.supportedMechOid == GSSAPI_MECH_MS_KRB5 && token.responseToken ) {
|
||||
ctx.messageMode = MessageMode::MS_KRB5;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
saslCreds: SaslCredentials() &parse-from=self.authData if ((self.authType == BindAuthType::BIND_AUTH_SASL) &&
|
||||
(|self.authData| > 0)) {
|
||||
message.arg = self.saslCreds.mechanism;
|
||||
}
|
||||
} &requires=((self?.authType) && (self.authType != BindAuthType::Undef));
|
||||
|
||||
type BindResponse = unit(inout message: Message) {
|
||||
: Result {
|
||||
message.result_ = $$;
|
||||
}
|
||||
|
||||
# TODO: if we want to parse SASL credentials returned
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
|
@ -879,16 +1102,61 @@ type AbandonRequest = unit(inout message: Message) {
|
|||
#-----------------------------------------------------------------------------
|
||||
# Extended Operation
|
||||
# https://tools.ietf.org/html/rfc4511#section-4.12
|
||||
type ExtendedRequest = unit(inout message: Message, ctx: Ctx&) {
|
||||
var requestValue: bytes;
|
||||
header: ASN1::ASN1Header &requires=($$.tag.class == ASN1::ASN1Class::ContextSpecific);
|
||||
requestName: bytes &size=self.header.len.len &convert=$$.decode(spicy::Charset::ASCII) {
|
||||
message.obj = $$;
|
||||
}
|
||||
|
||||
# TODO: implement ExtendedRequest
|
||||
# type ExtendedRequest = unit(inout message: Message) {
|
||||
#
|
||||
# };
|
||||
# If there's more byte to parse, it's the requestValue.
|
||||
: ASN1::ASN1Message(False)
|
||||
&requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific)
|
||||
if ( message.opLen > self.offset() ) {
|
||||
|
||||
# TODO: implement ExtendedResponse
|
||||
# type ExtendedResponse = unit(inout message: Message) {
|
||||
#
|
||||
# };
|
||||
self.requestValue = $$.application_data;
|
||||
}
|
||||
|
||||
on %done {
|
||||
# Did the client request StartTLS?
|
||||
#
|
||||
# https://datatracker.ietf.org/doc/html/rfc4511#section-4.14.1
|
||||
if ( self.requestName == "1.3.6.1.4.1.1466.20037" )
|
||||
ctx.startTlsRequested = True;
|
||||
}
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type ExtendedResponseEntry = unit(inout r: ExtendedResponse) {
|
||||
: ASN1::ASN1Message(False) &requires=($$.head.tag.class == ASN1::ASN1Class::ContextSpecific) {
|
||||
if ( $$.head.tag.type_ == ASN1::ASN1Type(10) )
|
||||
r.responseName = $$.application_data;
|
||||
else if ( $$.head.tag.type_ == ASN1::ASN1Type(11) )
|
||||
r.responseValue = $$.application_data;
|
||||
else
|
||||
throw "Unhandled extended response tag %s" % $$.head.tag;
|
||||
}
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
type ExtendedResponse = unit(inout message: Message, ctx: Ctx&) {
|
||||
var responseName: bytes;
|
||||
var responseValue: bytes;
|
||||
: Result {
|
||||
message.result_ = $$;
|
||||
}
|
||||
|
||||
# Try to parse two ASN1 entries if there are bytes left in the unit.
|
||||
# Both are optional and identified by context specific tagging.
|
||||
: ExtendedResponseEntry(self) if ( message.opLen > self.offset() );
|
||||
: ExtendedResponseEntry(self) if ( message.opLen > self.offset() );
|
||||
|
||||
on %done {
|
||||
# Client had requested StartTLS and it was successful? Switch to SSL.
|
||||
if ( ctx.startTlsRequested && message.result_.code == ResultCode::SUCCESS )
|
||||
ctx.messageMode = MessageMode::TLS;
|
||||
}
|
||||
};
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# IntermediateResponse Message
|
||||
|
@ -899,6 +1167,6 @@ type AbandonRequest = unit(inout message: Message) {
|
|||
#
|
||||
# };
|
||||
|
||||
on LDAP::MessageWrapper::%done {
|
||||
on LDAP::Message::%done {
|
||||
spicy::accept_input();
|
||||
}
|
||||
|
|
12
src/analyzer/protocol/ldap/ldap_zeek.spicy
Normal file
12
src/analyzer/protocol/ldap/ldap_zeek.spicy
Normal file
|
@ -0,0 +1,12 @@
|
|||
module LDAP_Zeek;
|
||||
|
||||
import LDAP;
|
||||
import zeek;
|
||||
|
||||
on LDAP::TlsForward::%init {
|
||||
zeek::protocol_begin("SSL");
|
||||
}
|
||||
|
||||
on LDAP::TlsForward::chunk {
|
||||
zeek::protocol_data_in(zeek::is_orig(), self.chunk);
|
||||
}
|
|
@ -90,8 +90,8 @@ type ModbusTCP_PDU(is_orig: bool) = record {
|
|||
|
||||
type ModbusTCP_TransportHeader = record {
|
||||
tid: uint16; # Transaction identifier
|
||||
pid: uint16; # Protocol identifier
|
||||
len: uint16; # Length of everything after this field
|
||||
pid: uint16 &enforce(pid == 0); # Protocol identifier
|
||||
len: uint16 &enforce(len >= 2); # Length of everything after this field
|
||||
uid: uint8; # Unit identifier (previously 'slave address')
|
||||
fc: uint8; # MODBUS function code (see function_codes enum)
|
||||
} &byteorder=bigendian, &let {
|
||||
|
|
|
@ -5,4 +5,5 @@ zeek_add_plugin(
|
|||
POP3.cc
|
||||
Plugin.cc
|
||||
BIFS
|
||||
consts.bif
|
||||
events.bif)
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
|
||||
#include "zeek/analyzer/protocol/pop3/POP3.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
||||
#include <cctype>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -12,6 +10,7 @@
|
|||
#include "zeek/Base64.h"
|
||||
#include "zeek/Reporter.h"
|
||||
#include "zeek/analyzer/Manager.h"
|
||||
#include "zeek/analyzer/protocol/pop3/consts.bif.h"
|
||||
#include "zeek/analyzer/protocol/pop3/events.bif.h"
|
||||
|
||||
namespace zeek::analyzer::pop3 {
|
||||
|
@ -41,6 +40,7 @@ POP3_Analyzer::POP3_Analyzer(Connection* conn) : analyzer::tcp::TCP_ApplicationA
|
|||
authLines = 0;
|
||||
|
||||
mail = nullptr;
|
||||
unknown_client_cmds = 0;
|
||||
|
||||
cl_orig = new analyzer::tcp::ContentLine_Analyzer(conn, true);
|
||||
AddSupportAnalyzer(cl_orig);
|
||||
|
@ -205,6 +205,19 @@ void POP3_Analyzer::ProcessRequest(int length, const char* line) {
|
|||
// keep a list of pending commands.
|
||||
cmds.emplace_back(line);
|
||||
|
||||
// Prevent unbounded state growth of cmds if there are no matching
|
||||
// server replies by simply dropping the oldest command.
|
||||
//
|
||||
// This may be caused by packet drops of the server side, one-sided
|
||||
// traffic, or analyzing the wrong protocol (Redis), etc.
|
||||
if ( zeek::BifConst::POP3::max_pending_commands > 0 ) {
|
||||
if ( cmds.size() > zeek::BifConst::POP3::max_pending_commands ) {
|
||||
Weird("pop3_client_too_many_pending_commands");
|
||||
|
||||
cmds.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
if ( cmds.size() == 1 )
|
||||
// Not waiting for another server response,
|
||||
// so we can process it immediately.
|
||||
|
@ -236,10 +249,19 @@ void POP3_Analyzer::ProcessClientCmd() {
|
|||
|
||||
if ( cmd_code == -1 ) {
|
||||
if ( ! waitingForAuthentication ) {
|
||||
Weird("pop3_client_command_unknown");
|
||||
Weird("pop3_client_command_unknown", (tokens.size() > 0 ? tokens[0].c_str() : "???"));
|
||||
if ( subState == detail::POP3_WOK )
|
||||
subState = detail::POP3_OK;
|
||||
|
||||
++unknown_client_cmds;
|
||||
|
||||
if ( zeek::BifConst::POP3::max_unknown_client_commands > 0 ) {
|
||||
if ( unknown_client_cmds > zeek::BifConst::POP3::max_unknown_client_commands ) {
|
||||
AnalyzerViolation("too many unknown client commands");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -299,6 +321,7 @@ void POP3_Analyzer::ProcessClientCmd() {
|
|||
if ( masterState == detail::POP3_AUTHORIZATION ) {
|
||||
POP3Event(pop3_request, true, cmd, message);
|
||||
if ( ! *message ) {
|
||||
// This is the client requesting a list of AUTH mechanisms available.
|
||||
requestForMultiLine = true;
|
||||
state = detail::AUTH;
|
||||
subState = detail::POP3_WOK;
|
||||
|
@ -555,7 +578,7 @@ void POP3_Analyzer::ProcessReply(int length, const char* line) {
|
|||
AnalyzerViolation(util::fmt("unknown server command (%s)", (tokens.size() > 0 ? tokens[0].c_str() : "???")),
|
||||
line, length);
|
||||
|
||||
Weird("pop3_server_command_unknown");
|
||||
Weird("pop3_server_command_unknown", (tokens.size() > 0 ? tokens[0].c_str() : "???"));
|
||||
if ( subState == detail::POP3_WOK )
|
||||
subState = detail::POP3_OK;
|
||||
}
|
||||
|
|
|
@ -4,11 +4,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "zeek/analyzer/protocol/login/NVT.h"
|
||||
#include "zeek/analyzer/protocol/mime/MIME.h"
|
||||
#include "zeek/analyzer/protocol/tcp/ContentLine.h"
|
||||
#include "zeek/analyzer/protocol/tcp/TCP.h"
|
||||
|
@ -106,6 +104,7 @@ protected:
|
|||
|
||||
analyzer::mime::MIME_Mail* mail;
|
||||
std::list<std::string> cmds;
|
||||
zeek_uint_t unknown_client_cmds;
|
||||
|
||||
private:
|
||||
bool tls;
|
||||
|
|
2
src/analyzer/protocol/pop3/consts.bif
Normal file
2
src/analyzer/protocol/pop3/consts.bif
Normal file
|
@ -0,0 +1,2 @@
|
|||
const POP3::max_pending_commands: count;
|
||||
const POP3::max_unknown_client_commands: count;
|
|
@ -9,7 +9,7 @@ import zeek;
|
|||
# The interface to the C++ code that handles the decryption of the INITIAL packet payload using well-known keys
|
||||
public function decrypt_crypto_payload(
|
||||
version: uint32,
|
||||
all_data: bytes,
|
||||
data: bytes,
|
||||
connection_id: bytes,
|
||||
encrypted_offset: uint64,
|
||||
payload_offset: uint64,
|
||||
|
@ -17,27 +17,104 @@ public function decrypt_crypto_payload(
|
|||
): bytes &cxxname="QUIC_decrypt_crypto_payload";
|
||||
|
||||
|
||||
##############
|
||||
## Context - tracked in one connection
|
||||
##############
|
||||
|
||||
# Can we decrypt?
|
||||
function can_decrypt(long_header: LongHeaderPacket, context: ConnectionIDInfo, is_client: bool): bool {
|
||||
function can_decrypt(long_header: LongHeaderPacket, context: Context, crypto: CryptoSinkUnit&): bool {
|
||||
|
||||
if ( ! long_header.is_initial )
|
||||
return False;
|
||||
|
||||
if ( is_client )
|
||||
return ! context.client_initial_processed;
|
||||
if ( crypto == Null )
|
||||
return False;
|
||||
|
||||
# This is the responder, can only decrypt if we have an initial
|
||||
# destination_id from the client
|
||||
return context.client_initial_processed
|
||||
&& |context.initial_destination_conn_id| > 0
|
||||
&& ! context.server_initial_processed;
|
||||
# Can only decrypt the responder if we've seen the initial destination conn id.
|
||||
if ( ! crypto.is_orig && ! context.initial_destination_conn_id )
|
||||
return False;
|
||||
|
||||
# Only attempt decryption if we haven't flushed some SSL data yet.
|
||||
return ! crypto.finished;
|
||||
}
|
||||
|
||||
type ConnectionIDInfo = struct {
|
||||
function reset_crypto(context: Context&) {
|
||||
# Recreate all the crypto state on the next %init of Packet.
|
||||
zeek::protocol_handle_close(context.ssl_handle);
|
||||
unset context.ssl_handle;
|
||||
context.client_crypto = Null;
|
||||
context.server_crypto = Null;
|
||||
context.client_sink = Null;
|
||||
context.server_sink = Null;
|
||||
context.initial_destination_conn_id = Null;
|
||||
}
|
||||
|
||||
# This unit is connected with the server and client sinks receiving
|
||||
# CRYPTO frames and forwards data to the SSL handle in the context.
|
||||
type CryptoSinkUnit = unit(is_orig: bool, context: Context&) {
|
||||
var buffered: bytes;
|
||||
var length: uint32 = 0;
|
||||
var is_orig: bool = is_orig;
|
||||
var finished: bool;
|
||||
|
||||
# The first 4 bytes of crypto data contain the expected tag and a
|
||||
# 24bit length from the TLS HandshakeMessage. Extract the length
|
||||
# so we can determine when all CRYPTO frames have arrived.
|
||||
#
|
||||
# https://datatracker.ietf.org/doc/html/rfc8446#section-4
|
||||
#
|
||||
# struct {
|
||||
# HandshakeType msg_type; /* handshake type */
|
||||
# uint24 length; /* remaining bytes in message */
|
||||
# ...
|
||||
#
|
||||
: uint8 {
|
||||
self.buffered += $$;
|
||||
}
|
||||
|
||||
len: uint8[3] {
|
||||
self.length = (cast<uint32>($$[0]) << 16) + (cast<uint32>($$[1]) << 8) + cast<uint32>($$[2]) + 4;
|
||||
|
||||
self.buffered += $$[0];
|
||||
self.buffered += $$[1];
|
||||
self.buffered += $$[2];
|
||||
}
|
||||
|
||||
: void &requires=(self.length <= 2**14 + 256) { # The length MUST NOT exceed 2^14 + 256 bytes (RFC 8446)
|
||||
|
||||
# The client or server hello data is forwarded to the SSL analyzer as a
|
||||
# TLSPlaintext record with legacy_record_version set to \x03\x03 (1.3).
|
||||
#
|
||||
# enum {
|
||||
# invalid(0),
|
||||
# change_cipher_spec(20),
|
||||
# alert(21),
|
||||
# handshake(22),
|
||||
# application_data(23),
|
||||
# (255)
|
||||
# } ContentType;
|
||||
#
|
||||
# struct {
|
||||
# ContentType type;
|
||||
# ProtocolVersion legacy_record_version;
|
||||
# uint16 length;
|
||||
# opaque fragment[TLSPlaintext.length];
|
||||
# } TLSPlaintext;
|
||||
#
|
||||
# https://datatracker.ietf.org/doc/html/rfc8446#section-5.1
|
||||
local length_bytes = pack(cast<uint16>(self.length), spicy::ByteOrder::Big);
|
||||
zeek::protocol_data_in(is_orig, b"\x16\x03\x03" + length_bytes + self.buffered, context.ssl_handle);
|
||||
}
|
||||
|
||||
: bytes &chunked &size=(self.length - 4) {
|
||||
zeek::protocol_data_in(is_orig, $$, context.ssl_handle);
|
||||
}
|
||||
|
||||
: void {
|
||||
self.finished = True;
|
||||
}
|
||||
};
|
||||
|
||||
##############
|
||||
## Context
|
||||
##############
|
||||
type Context = struct {
|
||||
client_cid_len: uint8;
|
||||
server_cid_len: uint8;
|
||||
|
||||
|
@ -46,26 +123,13 @@ type ConnectionIDInfo = struct {
|
|||
# will make life miserable.
|
||||
#
|
||||
# https://quicwg.org/base-drafts/rfc9001.html#appendix-A
|
||||
initial_destination_conn_id: bytes;
|
||||
initial_destination_conn_id: optional<bytes>;
|
||||
|
||||
# Currently, this analyzer assumes that ClientHello
|
||||
# and ServerHello fit into the first INITIAL packet (and
|
||||
# that there is only one that we're interested in.
|
||||
#
|
||||
# But minimally the following section sounds like this might not
|
||||
# hold in general and the Wireshark has samples showing
|
||||
# the handshake spanning across more than two INITIAL packets.
|
||||
# (quic-fragmented-handshakes.pcapng.gz)
|
||||
#
|
||||
# https://datatracker.ietf.org/doc/html/rfc9001#section-4.3
|
||||
#
|
||||
# Possible fix is to buffer up all CRYPTO frames across multiple
|
||||
# INITIAL packets until we see a non-INITIAL frame.
|
||||
#
|
||||
# We also rely heavily on getting originator and responder right.
|
||||
#
|
||||
client_initial_processed: bool;
|
||||
server_initial_processed: bool;
|
||||
# Track crypto state.
|
||||
client_crypto: CryptoSinkUnit&;
|
||||
client_sink: sink&;
|
||||
server_crypto: CryptoSinkUnit&;
|
||||
server_sink: sink&;
|
||||
|
||||
ssl_handle: zeek::ProtocolHandle &optional;
|
||||
};
|
||||
|
@ -272,16 +336,28 @@ public type LongHeaderPacket = unit {
|
|||
};
|
||||
|
||||
# A QUIC Frame.
|
||||
public type Frame = unit(header: LongHeaderPacket, from_client: bool, crypto_sink: sink&) {
|
||||
public type Frame = unit(header: LongHeaderPacket, from_client: bool, crypto: CryptoSinkUnit, crypto_sink: sink&) {
|
||||
frame_type : uint8 &convert=cast<FrameType>($$);
|
||||
|
||||
# TODO: add other FrameTypes as well
|
||||
switch ( self.frame_type ) {
|
||||
FrameType::ACK1 -> a: ACKPayload;
|
||||
FrameType::ACK2 -> b: ACKPayload;
|
||||
FrameType::ACK1 -> a: ACKPayload(FrameType::ACK1);
|
||||
FrameType::ACK2 -> b: ACKPayload(FrameType::ACK2);
|
||||
FrameType::CRYPTO -> c: CRYPTOPayload(from_client) {
|
||||
# Have the sink re-assemble potentially out-of-order cryptodata
|
||||
crypto_sink.write(self.c.cryptodata, self.c.offset.result_);
|
||||
|
||||
# If the crypto unit has determined a valid length, ensure we
|
||||
# don't attempt to write more bytes into the sink. If it doesn't,
|
||||
# use 2000 bytes as an arbitrary limit required to observe the
|
||||
# length of the contained Client Hello or Server Hello.
|
||||
if ( crypto.length > 0 ) {
|
||||
if ( |crypto_sink| > crypto.length )
|
||||
throw "too much crypto data received %s > %s" % ( |crypto_sink|, crypto.length);
|
||||
} else {
|
||||
if ( |crypto_sink| > 2000 )
|
||||
throw "too much crypto data without length received %s" % |crypto_sink|;
|
||||
}
|
||||
}
|
||||
FrameType::CONNECTION_CLOSE1 -> : ConnectionClosePayload(header);
|
||||
FrameType::PADDING -> : skip /\x00*/; # eat the padding
|
||||
|
@ -298,11 +374,26 @@ type CRYPTOPayload = unit(from_client: bool) {
|
|||
cryptodata: bytes &size=self.length.result_;
|
||||
};
|
||||
|
||||
type ACKPayload = unit {
|
||||
# https://datatracker.ietf.org/doc/html/rfc9000#ack-ranges
|
||||
type ACKRange = unit {
|
||||
gap: VariableLengthInteger;
|
||||
ack_range_length: VariableLengthInteger;
|
||||
};
|
||||
|
||||
type ACKECNCounts = unit {
|
||||
ect0: VariableLengthInteger;
|
||||
ect1: VariableLengthInteger;
|
||||
ecn_ce: VariableLengthInteger;
|
||||
};
|
||||
|
||||
# https://datatracker.ietf.org/doc/html/rfc9000#name-ack-frames
|
||||
type ACKPayload = unit(frame_type: FrameType) {
|
||||
latest_ack: VariableLengthInteger;
|
||||
ack_delay: VariableLengthInteger;
|
||||
ack_range_count: VariableLengthInteger;
|
||||
first_ack_range: VariableLengthInteger;
|
||||
ack_ranges: ACKRange[self.ack_range_count.result_];
|
||||
ecn_counts: ACKECNCounts if(frame_type == FrameType::ACK2);
|
||||
};
|
||||
|
||||
type ConnectionClosePayload = unit(header: LongHeaderPacket) {
|
||||
|
@ -393,35 +484,18 @@ public type ShortPacketPayload = unit {
|
|||
payload: skip bytes &eod;
|
||||
};
|
||||
|
||||
# TODO: investigate whether we can do something useful with this
|
||||
public type EncryptedLongPacketPayload = unit {
|
||||
payload: skip bytes &eod;
|
||||
};
|
||||
|
||||
# Buffer all crypto messages (which might be fragmented and unordered)
|
||||
# into the following unit.
|
||||
type CryptoBuffer = unit() {
|
||||
|
||||
var buffered: bytes;
|
||||
|
||||
: bytes &chunked &eod {
|
||||
self.buffered += $$;
|
||||
# print "crypto_buffer got data", |$$|, |self.buffered|;
|
||||
}
|
||||
};
|
||||
|
||||
##############
|
||||
# QUIC packet parsing
|
||||
#
|
||||
# A UDP datagram contains one or more QUIC packets.
|
||||
##############
|
||||
type Packet = unit(from_client: bool, context: ConnectionIDInfo&) {
|
||||
type Packet = unit(from_client: bool, context: Context&) {
|
||||
var decrypted_data: bytes;
|
||||
var full_packet: bytes;
|
||||
var packet_size: uint64 = 0;
|
||||
var start: iterator<stream>;
|
||||
|
||||
sink crypto_sink;
|
||||
var crypto_buffer: CryptoBuffer&;
|
||||
var crypto: CryptoSinkUnit&;
|
||||
var crypto_sink: sink&;
|
||||
|
||||
# Attach an SSL analyzer to this connection once.
|
||||
on %init {
|
||||
|
@ -430,6 +504,26 @@ type Packet = unit(from_client: bool, context: ConnectionIDInfo&) {
|
|||
}
|
||||
|
||||
self.start = self.input();
|
||||
|
||||
# Initialize crypto state in context for both sides if not already done.
|
||||
if ( context.client_crypto == Null ) {
|
||||
assert ! context.server_crypto;
|
||||
context.client_crypto = new CryptoSinkUnit(True, context);
|
||||
context.client_sink = new sink;
|
||||
context.client_sink.connect(context.client_crypto);
|
||||
|
||||
context.server_crypto = new CryptoSinkUnit(False, context);
|
||||
context.server_sink = new sink;
|
||||
context.server_sink.connect(context.server_crypto);
|
||||
}
|
||||
|
||||
if ( from_client ) {
|
||||
self.crypto = context.client_crypto;
|
||||
self.crypto_sink = context.client_sink;
|
||||
} else {
|
||||
self.crypto = context.server_crypto;
|
||||
self.crypto_sink = context.server_sink;
|
||||
}
|
||||
}
|
||||
|
||||
# Peek into the first byte and determine the header type.
|
||||
|
@ -443,7 +537,6 @@ type Packet = unit(from_client: bool, context: ConnectionIDInfo&) {
|
|||
self.set_input(self.start); # rewind
|
||||
}
|
||||
|
||||
|
||||
# Depending on the header, parse it and update the src/dest ConnectionID's
|
||||
switch ( self.first_byte.header_form ) {
|
||||
HeaderForm::SHORT -> short_header: ShortHeader(context.client_cid_len);
|
||||
|
@ -453,51 +546,54 @@ type Packet = unit(from_client: bool, context: ConnectionIDInfo&) {
|
|||
# If we see a retry packet from the responder, reset the decryption
|
||||
# context such that the next DCID from the client is used for decryption.
|
||||
if ( self.long_header.is_retry ) {
|
||||
context.client_initial_processed = False;
|
||||
context.server_initial_processed = False;
|
||||
context.initial_destination_conn_id = b"";
|
||||
reset_crypto(context);
|
||||
|
||||
# Allow re-opening the SSL analyzer the next time around.
|
||||
zeek::protocol_handle_close(context.ssl_handle);
|
||||
unset context.ssl_handle;
|
||||
self.crypto = Null;
|
||||
self.crypto_sink = Null;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
# Slurp in the whole packet if we determined we have a chance to decrypt.
|
||||
all_data: bytes &parse-at=self.start &eod if ( self?.long_header && can_decrypt(self.long_header, context, from_client) ) {
|
||||
self.crypto_buffer = new CryptoBuffer();
|
||||
self.crypto_sink.connect(self.crypto_buffer);
|
||||
: void {
|
||||
if ( self?.long_header && can_decrypt(self.long_header, context, self.crypto ) )
|
||||
# If we have parsed an initial packet that we can decrypt the payload,
|
||||
# determine the size to store into a buffer.
|
||||
self.packet_size = self.offset();
|
||||
}
|
||||
|
||||
# Buffer the whole packet if we determined we have a chance to decrypt.
|
||||
packet_data: bytes &parse-at=self.start &size=self.packet_size if ( self.packet_size > 0 ) {
|
||||
|
||||
if ( from_client ) {
|
||||
context.server_cid_len = self.long_header.dest_conn_id_len;
|
||||
context.client_cid_len = self.long_header.src_conn_id_len;
|
||||
|
||||
# This is the first INITIAL packet we attempt to decrypt and it is
|
||||
# coming from the client. Use its destination connection ID for
|
||||
# decryption purposes.
|
||||
if ( ! context.initial_destination_conn_id ) {
|
||||
context.initial_destination_conn_id = self.long_header.dest_conn_id;
|
||||
}
|
||||
|
||||
# This means that here, we can try to decrypt the initial packet!
|
||||
# All data is accessible via the `long_header` unit
|
||||
self.decrypted_data = decrypt_crypto_payload(
|
||||
self.long_header.version,
|
||||
self.all_data,
|
||||
self.long_header.dest_conn_id,
|
||||
self.packet_data,
|
||||
*context.initial_destination_conn_id,
|
||||
self.long_header.encrypted_offset,
|
||||
self.long_header.payload_length,
|
||||
from_client
|
||||
);
|
||||
|
||||
# Assuming that the client set up the connection, this can be considered the first
|
||||
# received Initial from the client. So disable change of ConnectionID's afterwards
|
||||
if ( |context.initial_destination_conn_id| == 0 ) {
|
||||
context.initial_destination_conn_id = self.long_header.dest_conn_id;
|
||||
}
|
||||
|
||||
} else {
|
||||
context.server_cid_len = self.long_header.src_conn_id_len;
|
||||
context.client_cid_len = self.long_header.dest_conn_id_len;
|
||||
|
||||
self.decrypted_data = decrypt_crypto_payload(
|
||||
self.long_header.version,
|
||||
self.all_data,
|
||||
context.initial_destination_conn_id,
|
||||
self.packet_data,
|
||||
*context.initial_destination_conn_id,
|
||||
self.long_header.encrypted_offset,
|
||||
self.long_header.payload_length,
|
||||
from_client
|
||||
|
@ -509,56 +605,29 @@ type Packet = unit(from_client: bool, context: ConnectionIDInfo&) {
|
|||
# connection.
|
||||
if ( |self.decrypted_data| == 0 )
|
||||
throw "decryption failed";
|
||||
|
||||
# We were able to decrypt the INITIAL packet. Confirm QUIC!
|
||||
spicy::accept_input();
|
||||
}
|
||||
|
||||
# Depending on the type of header and whether we were able to decrypt
|
||||
# some of it, parse the remaining payload.
|
||||
# If this packet has a SHORT header, consume until &eod, there's nothing
|
||||
# we can do with it anyhow.
|
||||
: ShortPacketPayload if (self.first_byte.header_form == HeaderForm::SHORT);
|
||||
: EncryptedLongPacketPayload if (self.first_byte.header_form == HeaderForm::LONG && |self.decrypted_data| == 0);
|
||||
|
||||
# If this was packet with a long header and decrypted data exists, attempt
|
||||
# to parse the plain QUIC frames from it.
|
||||
frames: Frame(self.long_header, from_client, self.crypto_sink)[] &parse-from=self.decrypted_data if (self.first_byte.header_form == HeaderForm::LONG && |self.decrypted_data| > 0);
|
||||
|
||||
# Once the Packet is fully parsed, pass the accumulated CRYPTO frames
|
||||
# to the SSL analyzer as handshake data.
|
||||
on %done {
|
||||
# print "packet done", zeek::is_orig(), self.first_byte.header_form, |self.decrypted_data|;
|
||||
|
||||
if ( self.crypto_buffer != Null && |self.crypto_buffer.buffered| > 0 ) {
|
||||
local handshake_data = self.crypto_buffer.buffered;
|
||||
|
||||
# The data is passed to the SSL analyzer as part of a HANDSHAKE (0x16) message with TLS1.3 (\x03\x03).
|
||||
# The 2 length bytes are also passed, followed by the actual CRYPTO blob which contains a CLIENT HELLO or SERVER HELLO
|
||||
local length_bytes = pack(cast<uint16>(|handshake_data|), spicy::ByteOrder::Big);
|
||||
zeek::protocol_data_in(
|
||||
from_client
|
||||
, b"\x16\x03\x03" + length_bytes + handshake_data
|
||||
, context.ssl_handle
|
||||
);
|
||||
|
||||
# Stop decryption attempts after processing the very first INITIAL
|
||||
# INITIAL packet for which we forwarded data to the SSL analyzer.
|
||||
if ( from_client )
|
||||
context.client_initial_processed = True;
|
||||
else
|
||||
context.server_initial_processed = True;
|
||||
|
||||
# Take buffered crypto data as confirmation signal.
|
||||
spicy::accept_input();
|
||||
}
|
||||
}
|
||||
frames: Frame(self.long_header, from_client, self.crypto, self.crypto_sink)[] &parse-from=self.decrypted_data if (self.first_byte.header_form == HeaderForm::LONG && |self.decrypted_data| > 0);
|
||||
};
|
||||
|
||||
##############
|
||||
# Entrypoints
|
||||
##############
|
||||
public type RequestFrame = unit {
|
||||
%context = ConnectionIDInfo;
|
||||
: Packet(True, self.context());
|
||||
%context = Context;
|
||||
: Packet(True, self.context())[];
|
||||
};
|
||||
|
||||
public type ResponseFrame = unit {
|
||||
%context = ConnectionIDInfo;
|
||||
: Packet(False, self.context());
|
||||
%context = Context;
|
||||
: Packet(False, self.context())[];
|
||||
};
|
||||
|
|
|
@ -60,7 +60,6 @@ const size_t AEAD_IV_LEN = 12;
|
|||
const size_t AEAD_HP_LEN = 16;
|
||||
const size_t AEAD_SAMPLE_LENGTH = 16;
|
||||
const size_t AEAD_TAG_LENGTH = 16;
|
||||
const size_t MAXIMUM_PACKET_LENGTH = 1500;
|
||||
const size_t MAXIMUM_PACKET_NUMBER_LENGTH = 4;
|
||||
|
||||
EVP_CIPHER_CTX* get_aes_128_ecb() {
|
||||
|
@ -88,7 +87,7 @@ Removes the header protection from the INITIAL packet and returns a DecryptionIn
|
|||
that is partially filled
|
||||
*/
|
||||
DecryptionInformation remove_header_protection(const std::vector<uint8_t>& client_hp, uint64_t encrypted_offset,
|
||||
const hilti::rt::Bytes& all_data) {
|
||||
const hilti::rt::Bytes& data) {
|
||||
DecryptionInformation decryptInfo;
|
||||
int outlen;
|
||||
auto* ctx = get_aes_128_ecb();
|
||||
|
@ -97,16 +96,16 @@ DecryptionInformation remove_header_protection(const std::vector<uint8_t>& clien
|
|||
EVP_CipherInit_ex(ctx, NULL, NULL, client_hp.data(), NULL, 1);
|
||||
|
||||
static_assert(AEAD_SAMPLE_LENGTH > 0);
|
||||
assert(all_data.size() >= encrypted_offset + MAXIMUM_PACKET_NUMBER_LENGTH + AEAD_SAMPLE_LENGTH);
|
||||
assert(data.size() >= encrypted_offset + MAXIMUM_PACKET_NUMBER_LENGTH + AEAD_SAMPLE_LENGTH);
|
||||
|
||||
const uint8_t* sample = data_as_uint8(all_data) + encrypted_offset + MAXIMUM_PACKET_NUMBER_LENGTH;
|
||||
const uint8_t* sample = data_as_uint8(data) + encrypted_offset + MAXIMUM_PACKET_NUMBER_LENGTH;
|
||||
|
||||
std::array<uint8_t, AEAD_SAMPLE_LENGTH> mask;
|
||||
EVP_CipherUpdate(ctx, mask.data(), &outlen, sample, AEAD_SAMPLE_LENGTH);
|
||||
|
||||
// To determine the actual packet number length,
|
||||
// we have to remove the mask from the first byte
|
||||
uint8_t first_byte = data_as_uint8(all_data)[0];
|
||||
uint8_t first_byte = data_as_uint8(data)[0];
|
||||
|
||||
if ( first_byte & 0x80 ) {
|
||||
first_byte ^= mask[0] & 0x0F;
|
||||
|
@ -119,8 +118,8 @@ DecryptionInformation remove_header_protection(const std::vector<uint8_t>& clien
|
|||
int recovered_packet_number_length = (first_byte & 0x03) + 1;
|
||||
|
||||
// .. and use this to reconstruct the (partially) unprotected header
|
||||
std::vector<uint8_t> unprotected_header(data_as_uint8(all_data), data_as_uint8(all_data) + encrypted_offset +
|
||||
recovered_packet_number_length);
|
||||
std::vector<uint8_t> unprotected_header(data_as_uint8(data),
|
||||
data_as_uint8(data) + encrypted_offset + recovered_packet_number_length);
|
||||
|
||||
uint32_t decoded_packet_number = 0;
|
||||
|
||||
|
@ -151,29 +150,35 @@ std::vector<uint8_t> calculate_nonce(std::vector<uint8_t> client_iv, uint64_t pa
|
|||
/*
|
||||
Function that calls the AEAD decryption routine, and returns the decrypted data.
|
||||
*/
|
||||
hilti::rt::Bytes decrypt(const std::vector<uint8_t>& client_key, const hilti::rt::Bytes& all_data,
|
||||
uint64_t payload_length, const DecryptionInformation& decryptInfo) {
|
||||
int out, out2, res;
|
||||
hilti::rt::Bytes decrypt(const std::vector<uint8_t>& client_key, const hilti::rt::Bytes& data, uint64_t payload_length,
|
||||
const DecryptionInformation& decryptInfo) {
|
||||
int out, out2;
|
||||
|
||||
if ( payload_length < decryptInfo.packet_number_length + AEAD_TAG_LENGTH )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("payload too small %ld < %ld", payload_length,
|
||||
decryptInfo.packet_number_length + AEAD_TAG_LENGTH));
|
||||
|
||||
const uint8_t* encrypted_payload = data_as_uint8(all_data) + decryptInfo.unprotected_header.size();
|
||||
// Bail on large payloads, somewhat arbitrarily. 10k allows for Jumbo frames
|
||||
// and sometimes the fuzzer produces packets up to that size as well.
|
||||
if ( payload_length > 10000 )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("payload_length too large %ld", payload_length));
|
||||
|
||||
const uint8_t* encrypted_payload = data_as_uint8(data) + decryptInfo.unprotected_header.size();
|
||||
|
||||
int encrypted_payload_size = payload_length - decryptInfo.packet_number_length - AEAD_TAG_LENGTH;
|
||||
|
||||
if ( encrypted_payload_size < 0 )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("encrypted_payload_size underflow %ld", encrypted_payload_size));
|
||||
|
||||
if ( all_data.size() < decryptInfo.unprotected_header.size() + encrypted_payload_size + AEAD_TAG_LENGTH )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("all_data too short %ld < %ld", all_data.size(),
|
||||
if ( data.size() < decryptInfo.unprotected_header.size() + encrypted_payload_size + AEAD_TAG_LENGTH )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("data too short %ld < %ld", data.size(),
|
||||
decryptInfo.unprotected_header.size() + encrypted_payload_size));
|
||||
|
||||
const void* tag_to_check = all_data.data() + decryptInfo.unprotected_header.size() + encrypted_payload_size;
|
||||
const void* tag_to_check = data.data() + decryptInfo.unprotected_header.size() + encrypted_payload_size;
|
||||
int tag_to_check_length = AEAD_TAG_LENGTH;
|
||||
|
||||
std::array<uint8_t, MAXIMUM_PACKET_LENGTH> decrypt_buffer;
|
||||
// Allocate memory for decryption.
|
||||
std::vector<uint8_t> decrypt_buffer(encrypted_payload_size);
|
||||
|
||||
// Setup context
|
||||
auto* ctx = get_aes_128_gcm();
|
||||
|
@ -197,7 +202,8 @@ hilti::rt::Bytes decrypt(const std::vector<uint8_t>& client_key, const hilti::rt
|
|||
EVP_CipherUpdate(ctx, decrypt_buffer.data(), &out, encrypted_payload, encrypted_payload_size);
|
||||
|
||||
// Validate whether the decryption was successful or not
|
||||
EVP_CipherFinal_ex(ctx, NULL, &out2);
|
||||
if ( EVP_CipherFinal_ex(ctx, NULL, &out2) == 0 )
|
||||
throw hilti::rt::RuntimeError("decryption failed");
|
||||
|
||||
// Copy the decrypted data from the decrypted buffer into a Bytes instance.
|
||||
return hilti::rt::Bytes(decrypt_buffer.data(), decrypt_buffer.data() + out);
|
||||
|
@ -438,7 +444,7 @@ Function that is called from Spicy, decrypting an INITIAL packet and returning
|
|||
the decrypted payload back to the analyzer.
|
||||
*/
|
||||
hilti::rt::Bytes QUIC_decrypt_crypto_payload(const hilti::rt::integer::safe<uint32_t>& version,
|
||||
const hilti::rt::Bytes& all_data, const hilti::rt::Bytes& connection_id,
|
||||
const hilti::rt::Bytes& data, const hilti::rt::Bytes& connection_id,
|
||||
const hilti::rt::integer::safe<uint64_t>& encrypted_offset,
|
||||
const hilti::rt::integer::safe<uint64_t>& payload_length,
|
||||
const hilti::rt::Bool& from_client) {
|
||||
|
@ -452,9 +458,9 @@ hilti::rt::Bytes QUIC_decrypt_crypto_payload(const hilti::rt::integer::safe<uint
|
|||
if ( payload_length < 20 )
|
||||
throw hilti::rt::RuntimeError(hilti::rt::fmt("payload too small %ld < 20", payload_length));
|
||||
|
||||
if ( (all_data.size() < encrypted_offset + payload_length) )
|
||||
if ( (data.size() < encrypted_offset + payload_length) )
|
||||
throw hilti::rt::RuntimeError(
|
||||
hilti::rt::fmt("packet too small %ld %ld", all_data.size(), encrypted_offset + payload_length));
|
||||
hilti::rt::fmt("packet too small %ld %ld", data.size(), encrypted_offset + payload_length));
|
||||
|
||||
uint32_t v = version;
|
||||
QuicPacketProtection* qpp = nullptr;
|
||||
|
@ -474,10 +480,10 @@ hilti::rt::Bytes QUIC_decrypt_crypto_payload(const hilti::rt::integer::safe<uint
|
|||
std::vector<uint8_t> iv = qpp->GetIv(secret);
|
||||
std::vector<uint8_t> hp = qpp->GetHp(secret);
|
||||
|
||||
DecryptionInformation decryptInfo = remove_header_protection(hp, encrypted_offset, all_data);
|
||||
DecryptionInformation decryptInfo = remove_header_protection(hp, encrypted_offset, data);
|
||||
|
||||
// Calculate the correct nonce for the decryption
|
||||
decryptInfo.nonce = calculate_nonce(iv, decryptInfo.packet_number);
|
||||
|
||||
return decrypt(key, all_data, payload_length, decryptInfo);
|
||||
return decrypt(key, data, payload_length, decryptInfo);
|
||||
}
|
||||
|
|
|
@ -413,7 +413,7 @@ type SMB2_error_response(header: SMB2_Header) = record {
|
|||
byte_count : uint32;
|
||||
# This is implemented incorrectly and is disabled for now.
|
||||
#error_data : SMB2_error_data(header, byte_count);
|
||||
stuff : bytestring &restofdata &transient;
|
||||
stuff : bytestring &length=byte_count &transient;
|
||||
} &byteorder = littleendian;
|
||||
|
||||
type SMB2_logoff_request(header: SMB2_Header) = record {
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
#include <arpa/inet.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/opensslv.h>
|
||||
#include <vector>
|
||||
|
||||
#include "zeek/Reporter.h"
|
||||
#include "zeek/analyzer/Manager.h"
|
||||
#include "zeek/analyzer/protocol/ssl/events.bif.h"
|
||||
#include "zeek/analyzer/protocol/ssl/ssl_pac.h"
|
||||
#include "zeek/analyzer/protocol/ssl/tls-handshake_pac.h"
|
||||
|
@ -22,6 +22,8 @@
|
|||
|
||||
namespace zeek::analyzer::ssl {
|
||||
|
||||
using byte_buffer = std::vector<u_char>;
|
||||
|
||||
template<typename T>
|
||||
static inline T MSB(const T a) {
|
||||
return ((a >> 8) & 0xff);
|
||||
|
@ -32,12 +34,13 @@ static inline T LSB(const T a) {
|
|||
return (a & 0xff);
|
||||
}
|
||||
|
||||
static std::basic_string<unsigned char> fmt_seq(uint32_t num) {
|
||||
std::basic_string<unsigned char> out(4, '\0');
|
||||
static byte_buffer fmt_seq(uint32_t num) {
|
||||
byte_buffer out(4, '\0');
|
||||
out.reserve(13);
|
||||
uint32_t netnum = htonl(num);
|
||||
out.append(reinterpret_cast<u_char*>(&netnum), 4);
|
||||
out.append(5, '\0');
|
||||
uint8_t* p = reinterpret_cast<uint8_t*>(&netnum);
|
||||
out.insert(out.end(), p, p + 4);
|
||||
out.insert(out.end(), 5, '\0');
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -271,8 +274,8 @@ bool SSL_Analyzer::TryDecryptApplicationData(int len, const u_char* data, bool i
|
|||
const u_char* s_iv = keys.data() + 68;
|
||||
|
||||
// FIXME: should we change types here?
|
||||
u_char* encrypted = (u_char*)data;
|
||||
size_t encrypted_len = len;
|
||||
const u_char* encrypted = data;
|
||||
int encrypted_len = len;
|
||||
|
||||
if ( is_orig )
|
||||
c_seq++;
|
||||
|
@ -280,14 +283,15 @@ bool SSL_Analyzer::TryDecryptApplicationData(int len, const u_char* data, bool i
|
|||
s_seq++;
|
||||
|
||||
// AEAD nonce, length 12
|
||||
std::basic_string<unsigned char> s_aead_nonce;
|
||||
byte_buffer s_aead_nonce;
|
||||
s_aead_nonce.reserve(12);
|
||||
if ( is_orig )
|
||||
s_aead_nonce.assign(c_iv, 4);
|
||||
s_aead_nonce.insert(s_aead_nonce.end(), c_iv, c_iv + 4);
|
||||
else
|
||||
s_aead_nonce.assign(s_iv, 4);
|
||||
s_aead_nonce.insert(s_aead_nonce.end(), s_iv, s_iv + 4);
|
||||
|
||||
// this should be the explicit counter
|
||||
s_aead_nonce.append(encrypted, 8);
|
||||
s_aead_nonce.insert(s_aead_nonce.end(), encrypted, encrypted + 8);
|
||||
assert(s_aead_nonce.size() == 12);
|
||||
|
||||
EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new();
|
||||
|
@ -310,28 +314,28 @@ bool SSL_Analyzer::TryDecryptApplicationData(int len, const u_char* data, bool i
|
|||
else
|
||||
EVP_DecryptInit(ctx, EVP_aes_256_gcm(), s_wk, s_aead_nonce.data());
|
||||
|
||||
EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, encrypted + encrypted_len);
|
||||
EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, const_cast<u_char*>(encrypted + encrypted_len));
|
||||
|
||||
// AEAD tag
|
||||
std::basic_string<unsigned char> s_aead_tag;
|
||||
byte_buffer s_aead_tag;
|
||||
if ( is_orig )
|
||||
s_aead_tag = fmt_seq(c_seq);
|
||||
else
|
||||
s_aead_tag = fmt_seq(s_seq);
|
||||
|
||||
assert(s_aead_tag.size() == 13);
|
||||
s_aead_tag[8] = content_type;
|
||||
s_aead_tag[9] = MSB(raw_tls_version);
|
||||
s_aead_tag[10] = LSB(raw_tls_version);
|
||||
s_aead_tag[11] = MSB(encrypted_len);
|
||||
s_aead_tag[12] = LSB(encrypted_len);
|
||||
assert(s_aead_tag.size() == 13);
|
||||
|
||||
auto decrypted = std::vector<u_char>(encrypted_len +
|
||||
16); // see OpenSSL manpage - 16 is the block size for the supported cipher
|
||||
int decrypted_len = 0;
|
||||
|
||||
EVP_DecryptUpdate(ctx, NULL, &decrypted_len, s_aead_tag.data(), s_aead_tag.size());
|
||||
EVP_DecryptUpdate(ctx, decrypted.data(), &decrypted_len, (const u_char*)encrypted, encrypted_len);
|
||||
EVP_DecryptUpdate(ctx, decrypted.data(), &decrypted_len, encrypted, encrypted_len);
|
||||
assert(static_cast<decltype(decrypted.size())>(decrypted_len) <= decrypted.size());
|
||||
decrypted.resize(decrypted_len);
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ import spicy;
|
|||
public type Message = unit {
|
||||
switch {
|
||||
-> prio: Priority;
|
||||
-> void;
|
||||
-> : void;
|
||||
};
|
||||
|
||||
msg: bytes &eod;
|
||||
|
|
|
@ -273,7 +273,12 @@ void TCP_Reassembler::MatchUndelivered(uint64_t up_to_seq, bool use_last_upper)
|
|||
if ( b.upper > last_reassem_seq )
|
||||
break;
|
||||
|
||||
tcp_analyzer->Conn()->Match(zeek::detail::Rule::PAYLOAD, b.block, b.Size(), false, false, IsOrig(), false);
|
||||
// Note: Even though this passes bol=false, at the point where
|
||||
// this code runs, the matcher is re-initialized resulting in
|
||||
// undelivered data implicitly being bol-anchored. It's unclear
|
||||
// if that was intended, but there's hardly a right way here,
|
||||
// so that seems ok.
|
||||
tcp_analyzer->Conn()->Match(zeek::detail::Rule::PAYLOAD, b.block, b.Size(), IsOrig(), false, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ Raw::Raw(ReaderFrontend* frontend) : ReaderBackend(frontend), file(nullptr, fclo
|
|||
sep_length = BifConst::InputRaw::record_separator->Len();
|
||||
|
||||
bufpos = 0;
|
||||
bufsize = 0;
|
||||
|
||||
stdin_fileno = fileno(stdin);
|
||||
stdout_fileno = fileno(stdout);
|
||||
|
@ -420,59 +421,74 @@ bool Raw::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fie
|
|||
|
||||
int64_t Raw::GetLine(FILE* arg_file) {
|
||||
errno = 0;
|
||||
int pos = 0; // strstr_n only works on ints - so no use to use something different here
|
||||
int offset = 0;
|
||||
|
||||
if ( ! buf )
|
||||
if ( ! buf ) {
|
||||
buf = std::unique_ptr<char[]>(new char[block_size]);
|
||||
|
||||
int repeats = 1;
|
||||
bufpos = 0;
|
||||
bufsize = block_size;
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
size_t readbytes = fread(buf.get() + bufpos + offset, 1, block_size - bufpos, arg_file);
|
||||
pos += bufpos + readbytes;
|
||||
// printf("Pos: %d\n", pos);
|
||||
bufpos = offset = 0; // read full block size in next read...
|
||||
size_t readbytes = fread(buf.get() + bufpos, 1, bufsize - bufpos, arg_file);
|
||||
|
||||
if ( pos == 0 && errno != 0 )
|
||||
bufpos = bufpos + readbytes;
|
||||
|
||||
// Nothing in the buffer and errno set, yield.
|
||||
if ( bufpos == 0 && errno != 0 )
|
||||
break;
|
||||
|
||||
// researching everything each time is a bit... cpu-intensive. But otherwise we have
|
||||
// to deal with situations where the separator is multi-character and split over multiple
|
||||
// reads...
|
||||
int found = util::strstr_n(pos, (unsigned char*)buf.get(), separator.size(), (unsigned char*)separator.c_str());
|
||||
//
|
||||
// memmem() would be more appropriate, but not available on Windows.
|
||||
int found = util::strstr_n(bufpos, reinterpret_cast<u_char*>(buf.get()), separator.size(),
|
||||
reinterpret_cast<const u_char*>(separator.c_str()));
|
||||
|
||||
if ( found == -1 ) {
|
||||
// we did not find it and have to search again in the next try. resize buffer....
|
||||
// we did not find it and have to search again in the next try.
|
||||
// but first check if we encountered the file end - because if we did this was it.
|
||||
if ( feof(arg_file) != 0 ) {
|
||||
if ( pos == 0 )
|
||||
if ( bufpos == 0 )
|
||||
return -1; // signal EOF - and that we had no more data.
|
||||
else {
|
||||
outbuf = std::move(buf); // buf is null after this
|
||||
return pos;
|
||||
return bufpos; // flush out remaining buffered data as line
|
||||
}
|
||||
}
|
||||
|
||||
repeats++;
|
||||
// bah, we cannot use realloc because we would have to change the delete in the manager
|
||||
// to a free.
|
||||
std::unique_ptr<char[]> newbuf = std::unique_ptr<char[]>(new char[block_size * repeats]);
|
||||
memcpy(newbuf.get(), buf.get(), block_size * (repeats - 1));
|
||||
buf = std::move(newbuf);
|
||||
offset = block_size * (repeats - 1);
|
||||
// No separator found and buffer full, realloc and retry reading more right away.
|
||||
if ( bufpos == bufsize ) {
|
||||
std::unique_ptr<char[]> newbuf = std::unique_ptr<char[]>(new char[bufsize + block_size]);
|
||||
memcpy(newbuf.get(), buf.get(), bufsize);
|
||||
buf = std::move(newbuf);
|
||||
bufsize = bufsize + block_size;
|
||||
}
|
||||
else {
|
||||
// Short or empty read, some data in the buffer, but no separator found
|
||||
// and also not EOF: This is likely reading from a pipe where the separator
|
||||
// wasn't yet produced. Yield to retry on the next heartbeat.
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
else {
|
||||
size_t sep_idx = static_cast<size_t>(found);
|
||||
assert(sep_idx <= bufsize - sep_length);
|
||||
size_t remaining = bufpos - sep_idx - sep_length;
|
||||
|
||||
outbuf = std::move(buf);
|
||||
|
||||
if ( found < pos ) {
|
||||
if ( remaining > 0 ) {
|
||||
// we have leftovers. copy them into the buffer for the next line
|
||||
assert(remaining <= block_size);
|
||||
buf = std::unique_ptr<char[]>(new char[block_size]);
|
||||
memcpy(buf.get(), outbuf.get() + found + sep_length, pos - found - sep_length);
|
||||
bufpos = pos - found - sep_length;
|
||||
bufpos = remaining;
|
||||
bufsize = block_size;
|
||||
|
||||
memcpy(buf.get(), outbuf.get() + sep_idx + sep_length, remaining);
|
||||
}
|
||||
|
||||
return found;
|
||||
return sep_idx;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,8 @@ private:
|
|||
std::string separator;
|
||||
unsigned int sep_length; // length of the separator
|
||||
|
||||
int bufpos;
|
||||
size_t bufpos; // Where in buf to read more data.
|
||||
size_t bufsize; // Currently allocated size of buf.
|
||||
std::unique_ptr<char[]> buf;
|
||||
std::unique_ptr<char[]> outbuf;
|
||||
|
||||
|
|
|
@ -224,8 +224,11 @@ struct Manager::WriterInfo {
|
|||
string instantiating_filter;
|
||||
|
||||
std::shared_ptr<telemetry::Counter> total_writes;
|
||||
std::shared_ptr<telemetry::Counter> total_discarded_writes;
|
||||
|
||||
WriterInfo(std::shared_ptr<telemetry::Counter> total_writes) : total_writes(std::move(total_writes)) {}
|
||||
WriterInfo(std::shared_ptr<telemetry::Counter> total_writes,
|
||||
std::shared_ptr<telemetry::Counter> total_discarded_writes)
|
||||
: total_writes(std::move(total_writes)), total_discarded_writes(std::move(total_discarded_writes)) {}
|
||||
};
|
||||
|
||||
struct Manager::Stream {
|
||||
|
@ -423,7 +426,11 @@ Manager::Manager()
|
|||
telemetry_mgr
|
||||
->CounterFamily("zeek", "log-writer-writes", {"writer", "module", "stream", "filter-name", "path"},
|
||||
"Total number of log writes passed to a concrete log writer not vetoed by stream or "
|
||||
"filter policies.")) {
|
||||
"filter policies.")),
|
||||
total_log_writer_discarded_writes_family(
|
||||
telemetry_mgr->CounterFamily("zeek", "log-writer-discarded-writes",
|
||||
{"writer", "module", "stream", "filter-name", "path"},
|
||||
"Total number of log writes discarded due to size limitations.")) {
|
||||
rotations_pending = 0;
|
||||
}
|
||||
|
||||
|
@ -435,6 +442,7 @@ Manager::~Manager() {
|
|||
void Manager::InitPostScript() {
|
||||
rotation_format_func = id::find_func("Log::rotation_format_func");
|
||||
log_stream_policy_hook = id::find_func("Log::log_stream_policy");
|
||||
max_log_record_size = id::find_val("Log::max_log_record_size")->AsCount();
|
||||
}
|
||||
|
||||
WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, EnumVal* tag) {
|
||||
|
@ -1153,8 +1161,15 @@ bool Manager::WriteToFilters(const Manager::Stream* stream, zeek::RecordValPtr c
|
|||
}
|
||||
|
||||
// Alright, can do the write now.
|
||||
size_t total_size = 0;
|
||||
threading::Value** vals = RecordToFilterVals(stream, filter, columns.get(), total_size);
|
||||
|
||||
threading::Value** vals = RecordToFilterVals(stream, filter, columns.get());
|
||||
if ( total_size > max_log_record_size ) {
|
||||
reporter->Weird("log_record_too_large", util::fmt("%s", stream->name.c_str()));
|
||||
w->second->total_discarded_writes->Inc();
|
||||
DeleteVals(filter->num_fields, vals);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ( ! PLUGIN_HOOK_WITH_RESULT(HOOK_LOG_WRITE,
|
||||
HookLogWrite(filter->writer->GetType()->AsEnumType()->Lookup(
|
||||
|
@ -1385,7 +1400,7 @@ bool Manager::SetMaxDelayQueueSize(const EnumValPtr& id, zeek_uint_t queue_size)
|
|||
return true;
|
||||
}
|
||||
|
||||
threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
||||
threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty, size_t& total_size) {
|
||||
if ( ! val )
|
||||
return new threading::Value(ty->Tag(), false);
|
||||
|
||||
|
@ -1393,7 +1408,10 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
|
||||
switch ( lval->type ) {
|
||||
case TYPE_BOOL:
|
||||
case TYPE_INT: lval->val.int_val = val->AsInt(); break;
|
||||
case TYPE_INT:
|
||||
lval->val.int_val = val->AsInt();
|
||||
total_size += sizeof(lval->val.int_val);
|
||||
break;
|
||||
|
||||
case TYPE_ENUM: {
|
||||
const char* s = ty->AsEnumType()->Lookup(val->AsInt());
|
||||
|
@ -1410,10 +1428,16 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
lval->val.string_val.data = util::copy_string("", 0);
|
||||
lval->val.string_val.length = 0;
|
||||
}
|
||||
|
||||
total_size += lval->val.string_val.length;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case TYPE_COUNT: lval->val.uint_val = val->AsCount(); break;
|
||||
case TYPE_COUNT:
|
||||
lval->val.uint_val = val->AsCount();
|
||||
total_size += sizeof(lval->val.uint_val);
|
||||
break;
|
||||
|
||||
case TYPE_PORT: {
|
||||
auto p = val->AsCount();
|
||||
|
@ -1429,16 +1453,26 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
|
||||
lval->val.port_val.port = p & ~PORT_SPACE_MASK;
|
||||
lval->val.port_val.proto = pt;
|
||||
total_size += lval->val.port_val.size();
|
||||
break;
|
||||
}
|
||||
|
||||
case TYPE_SUBNET: val->AsSubNet()->Get().ConvertToThreadingValue(&lval->val.subnet_val); break;
|
||||
case TYPE_SUBNET:
|
||||
val->AsSubNet()->Get().ConvertToThreadingValue(&lval->val.subnet_val);
|
||||
total_size += lval->val.subnet_val.size();
|
||||
break;
|
||||
|
||||
case TYPE_ADDR: val->AsAddr()->Get().ConvertToThreadingValue(&lval->val.addr_val); break;
|
||||
case TYPE_ADDR:
|
||||
val->AsAddr()->Get().ConvertToThreadingValue(&lval->val.addr_val);
|
||||
total_size += lval->val.addr_val.size();
|
||||
break;
|
||||
|
||||
case TYPE_DOUBLE:
|
||||
case TYPE_TIME:
|
||||
case TYPE_INTERVAL: lval->val.double_val = val->AsDouble(); break;
|
||||
case TYPE_INTERVAL:
|
||||
lval->val.double_val = val->AsDouble();
|
||||
total_size += sizeof(lval->val.double_val);
|
||||
break;
|
||||
|
||||
case TYPE_STRING: {
|
||||
const String* s = val->AsString()->AsString();
|
||||
|
@ -1447,6 +1481,7 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
|
||||
lval->val.string_val.data = buf;
|
||||
lval->val.string_val.length = s->Len();
|
||||
total_size += lval->val.string_val.length;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1456,6 +1491,7 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
auto len = strlen(s);
|
||||
lval->val.string_val.data = util::copy_string(s, len);
|
||||
lval->val.string_val.length = len;
|
||||
total_size += lval->val.string_val.length;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1467,6 +1503,7 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
auto len = strlen(s);
|
||||
lval->val.string_val.data = util::copy_string(s, len);
|
||||
lval->val.string_val.length = len;
|
||||
total_size += lval->val.string_val.length;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1483,14 +1520,15 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
auto& set_t = tbl_t->GetIndexTypes()[0];
|
||||
bool is_managed = ZVal::IsManagedType(set_t);
|
||||
|
||||
lval->val.set_val.size = set->Length();
|
||||
lval->val.set_val.vals = new threading::Value*[lval->val.set_val.size];
|
||||
zeek_int_t set_length = set->Length();
|
||||
lval->val.set_val.vals = new threading::Value*[set_length];
|
||||
|
||||
for ( zeek_int_t i = 0; i < lval->val.set_val.size; i++ ) {
|
||||
for ( zeek_int_t i = 0; i < set_length && total_size < max_log_record_size; i++ ) {
|
||||
std::optional<ZVal> s_i = ZVal(set->Idx(i), set_t);
|
||||
lval->val.set_val.vals[i] = ValToLogVal(s_i, set_t.get());
|
||||
lval->val.set_val.vals[i] = ValToLogVal(s_i, set_t.get(), total_size);
|
||||
if ( is_managed )
|
||||
ZVal::DeleteManagedType(*s_i);
|
||||
lval->val.set_val.size++;
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -1498,14 +1536,15 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
|
||||
case TYPE_VECTOR: {
|
||||
VectorVal* vec = val->AsVector();
|
||||
lval->val.vector_val.size = vec->Size();
|
||||
lval->val.vector_val.vals = new threading::Value*[lval->val.vector_val.size];
|
||||
zeek_int_t vec_length = vec->Size();
|
||||
lval->val.vector_val.vals = new threading::Value*[vec_length];
|
||||
|
||||
auto& vv = vec->RawVec();
|
||||
auto& vt = vec->GetType()->Yield();
|
||||
|
||||
for ( zeek_int_t i = 0; i < lval->val.vector_val.size; i++ ) {
|
||||
lval->val.vector_val.vals[i] = ValToLogVal(vv[i], vt.get());
|
||||
for ( zeek_int_t i = 0; i < vec_length && total_size < max_log_record_size; i++ ) {
|
||||
lval->val.vector_val.vals[i] = ValToLogVal(vv[i], vt.get(), total_size);
|
||||
lval->val.vector_val.size++;
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -1517,7 +1556,8 @@ threading::Value* Manager::ValToLogVal(std::optional<ZVal>& val, Type* ty) {
|
|||
return lval;
|
||||
}
|
||||
|
||||
threading::Value** Manager::RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns) {
|
||||
threading::Value** Manager::RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns,
|
||||
size_t& total_size) {
|
||||
RecordValPtr ext_rec;
|
||||
|
||||
if ( filter->num_ext_fields > 0 ) {
|
||||
|
@ -1565,7 +1605,11 @@ threading::Value** Manager::RecordToFilterVals(const Stream* stream, Filter* fil
|
|||
}
|
||||
|
||||
if ( val )
|
||||
vals[i] = ValToLogVal(val, vt);
|
||||
vals[i] = ValToLogVal(val, vt, total_size);
|
||||
|
||||
if ( total_size > max_log_record_size ) {
|
||||
return vals;
|
||||
}
|
||||
}
|
||||
|
||||
return vals;
|
||||
|
@ -1614,7 +1658,8 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, WriterBacken
|
|||
{"filter-name", instantiating_filter},
|
||||
{"path", info->path}};
|
||||
|
||||
WriterInfo* winfo = new WriterInfo(zeek::log_mgr->total_log_writer_writes_family->GetOrAdd(labels));
|
||||
WriterInfo* winfo = new WriterInfo(zeek::log_mgr->total_log_writer_writes_family->GetOrAdd(labels),
|
||||
zeek::log_mgr->total_log_writer_discarded_writes_family->GetOrAdd(labels));
|
||||
winfo->type = writer->Ref()->AsEnumVal();
|
||||
winfo->writer = nullptr;
|
||||
winfo->open_time = run_state::network_time;
|
||||
|
|
|
@ -376,9 +376,9 @@ private:
|
|||
bool TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, TableVal* include, TableVal* exclude,
|
||||
const std::string& path, const std::list<int>& indices);
|
||||
|
||||
threading::Value** RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns);
|
||||
threading::Value** RecordToFilterVals(const Stream* stream, Filter* filter, RecordVal* columns, size_t& total_size);
|
||||
|
||||
threading::Value* ValToLogVal(std::optional<ZVal>& val, Type* ty);
|
||||
threading::Value* ValToLogVal(std::optional<ZVal>& val, Type* ty, size_t& total_size);
|
||||
Stream* FindStream(EnumVal* id);
|
||||
void RemoveDisabledWriters(Stream* stream);
|
||||
void InstallRotationTimer(WriterInfo* winfo);
|
||||
|
@ -399,12 +399,14 @@ private:
|
|||
bool DelayCompleted(Manager::Stream* stream, detail::DelayInfo& delay_info);
|
||||
|
||||
std::vector<Stream*> streams; // Indexed by stream enum.
|
||||
int rotations_pending; // Number of rotations not yet finished.
|
||||
int rotations_pending = 0; // Number of rotations not yet finished.
|
||||
FuncPtr rotation_format_func;
|
||||
FuncPtr log_stream_policy_hook;
|
||||
size_t max_log_record_size = 0;
|
||||
|
||||
std::shared_ptr<telemetry::CounterFamily> total_log_stream_writes_family;
|
||||
std::shared_ptr<telemetry::CounterFamily> total_log_writer_writes_family;
|
||||
std::shared_ptr<telemetry::CounterFamily> total_log_writer_discarded_writes_family;
|
||||
|
||||
zeek_uint_t last_delay_token = 0;
|
||||
std::vector<detail::WriteContext> active_writes;
|
||||
|
|
|
@ -744,10 +744,9 @@ void Ascii::RotateLeftoverLogs() {
|
|||
auto ppf = default_ppf;
|
||||
|
||||
if ( ! ll.post_proc_func.empty() ) {
|
||||
auto func = id::find_func(ll.post_proc_func.data());
|
||||
|
||||
if ( func )
|
||||
ppf = std::move(func);
|
||||
const auto& id = id::find(ll.post_proc_func.data());
|
||||
if ( id && id->GetVal() && same_type(id->GetVal()->GetType(), default_ppf->GetType()) )
|
||||
ppf = id->GetVal()->AsFuncVal()->AsFuncPtr();
|
||||
else
|
||||
reporter->Warning(
|
||||
"Could not postprocess log '%s' with intended "
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
|
114
src/scan.l
114
src/scan.l
|
@ -219,6 +219,15 @@ static zeek::PList<FileInfo> file_stack;
|
|||
// Returns true if the file is new, false if it's already been scanned.
|
||||
static int load_files(const char* file);
|
||||
|
||||
// Update the current parsing and location state for the given file and buffer.
|
||||
static int switch_to(const char* file, YY_BUFFER_STATE buffer);
|
||||
|
||||
// Be careful to never delete things from this list, as the strings
|
||||
// are referred to (in order to save the locations of tokens and statements,
|
||||
// for error reporting and debugging).
|
||||
static zeek::name_list input_files;
|
||||
static zeek::name_list essential_input_files;
|
||||
|
||||
// ### TODO: columns too - use yyless with '.' action?
|
||||
%}
|
||||
|
||||
|
@ -464,10 +473,74 @@ when return TOK_WHEN;
|
|||
rc = PLUGIN_HOOK_WITH_RESULT(HOOK_LOAD_FILE_EXT, HookLoadFileExtended(zeek::plugin::Plugin::PLUGIN, plugin, ""), std::make_pair(-1, std::nullopt));
|
||||
|
||||
switch ( rc.first ) {
|
||||
case -1:
|
||||
// No plugin in charge of this file. (We ignore any returned content.)
|
||||
case -1: {
|
||||
// No plugin took charge this @load-plugin directive.
|
||||
auto pre_load_input_files = input_files.size();
|
||||
zeek::plugin_mgr->ActivateDynamicPlugin(plugin);
|
||||
|
||||
// No new input files: Likely the plugin was already loaded
|
||||
// or has failed to load.
|
||||
if ( input_files.size() == pre_load_input_files )
|
||||
break;
|
||||
|
||||
// Lookup the plugin to get the path to the shared object.
|
||||
// We use that for the loaded_scripts.log and name of the
|
||||
// generated file loading the scripts.
|
||||
const zeek::plugin::Plugin *pp = nullptr;
|
||||
for ( const auto* p : zeek::plugin_mgr->ActivePlugins() )
|
||||
{
|
||||
if ( p->DynamicPlugin() && p->Name() == plugin )
|
||||
{
|
||||
pp = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::string name;
|
||||
if ( pp )
|
||||
name = pp->PluginPath();
|
||||
else
|
||||
{
|
||||
// This shouldn't happen. If it does, we come up
|
||||
// with an artificial filename rather than using
|
||||
// the shared object name.
|
||||
zeek::reporter->Warning("Did not find %s after loading", plugin);
|
||||
name = std::string("@load-plugin ") + plugin;
|
||||
}
|
||||
|
||||
// Render all needed @load lines into a string
|
||||
std::string buf = "# @load-plugin generated script\n";
|
||||
|
||||
while ( input_files.size() > pre_load_input_files )
|
||||
{
|
||||
// Any relative files found by the plugin manager are
|
||||
// converted to absolute paths relative to Zeek's working
|
||||
// directory. That way it is clear where these are supposed
|
||||
// to be found and find_relative_script_file() won't get
|
||||
// confused by any ZEEKPATH settings. Also, plugin files
|
||||
// containing any relative @loads themselves will work.
|
||||
std::error_code ec;
|
||||
auto canonical = zeek::filesystem::canonical(input_files[0]);
|
||||
if ( ec )
|
||||
zeek::reporter->FatalError("plugin script %s not found: %s",
|
||||
input_files[0], ec.message().c_str());
|
||||
|
||||
buf += std::string("@load ") + canonical.string() + "\n";
|
||||
|
||||
delete[] input_files.remove_nth(0);
|
||||
}
|
||||
|
||||
zeek::detail::zeekygen_mgr->Script(name);
|
||||
zeek::detail::ScannedFile sf(file_stack.length(), name, false /*skipped*/,
|
||||
true /*prefixes_checked*/, true /*is_canonical*/);
|
||||
zeek::detail::files_scanned.push_back(std::move(sf));
|
||||
|
||||
file_stack.push_back(new FileInfo(zeek::detail::current_module));
|
||||
|
||||
YY_BUFFER_STATE buffer = yy_scan_bytes(buf.data(), buf.size());
|
||||
switch_to(name.c_str(), buffer);
|
||||
break;
|
||||
}
|
||||
|
||||
case 0:
|
||||
if ( ! zeek::reporter->Errors() )
|
||||
|
@ -702,6 +775,23 @@ void zeek::detail::SetCurrentLocation(YYLTYPE currloc)
|
|||
line_number = currloc.first_line;
|
||||
}
|
||||
|
||||
static int switch_to(const char* file, YY_BUFFER_STATE buffer)
|
||||
{
|
||||
yy_switch_to_buffer(buffer);
|
||||
yylloc.first_line = yylloc.last_line = line_number = 1;
|
||||
|
||||
// Don't delete the old filename - it's pointed to by
|
||||
// every Obj created when parsing it.
|
||||
yylloc.filename = filename = zeek::util::copy_string(file);
|
||||
|
||||
current_file_has_conditionals = files_with_conditionals.count(filename) > 0;
|
||||
|
||||
entry_cond_depth.push_back(conditional_depth);
|
||||
entry_pragma_stack_depth.push_back(pragma_stack.size());
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int load_files(const char* orig_file)
|
||||
{
|
||||
std::string file_path = find_relative_script_file(orig_file);
|
||||
|
@ -800,19 +890,7 @@ static int load_files(const char* orig_file)
|
|||
buffer = yy_create_buffer(f, YY_BUF_SIZE);
|
||||
}
|
||||
|
||||
yy_switch_to_buffer(buffer);
|
||||
yylloc.first_line = yylloc.last_line = line_number = 1;
|
||||
|
||||
// Don't delete the old filename - it's pointed to by
|
||||
// every Obj created when parsing it.
|
||||
yylloc.filename = filename = zeek::util::copy_string(file_path.c_str());
|
||||
|
||||
current_file_has_conditionals = files_with_conditionals.count(filename) > 0;
|
||||
|
||||
entry_cond_depth.push_back(conditional_depth);
|
||||
entry_pragma_stack_depth.push_back(pragma_stack.size());
|
||||
|
||||
return 1;
|
||||
return switch_to(file_path.c_str(), buffer);
|
||||
}
|
||||
|
||||
void begin_RE()
|
||||
|
@ -942,12 +1020,6 @@ void reject_directive(zeek::detail::Stmt* s)
|
|||
zeek::reporter->Error("incorrect use of directive");
|
||||
}
|
||||
|
||||
// Be careful to never delete things from this list, as the strings
|
||||
// are referred to (in order to save the locations of tokens and statements,
|
||||
// for error reporting and debugging).
|
||||
static zeek::name_list input_files;
|
||||
static zeek::name_list essential_input_files;
|
||||
|
||||
void add_essential_input_file(const char* file)
|
||||
{
|
||||
if ( ! file )
|
||||
|
|
|
@ -12,6 +12,19 @@ CSE_ValidityChecker::CSE_ValidityChecker(std::shared_ptr<ProfileFuncs> _pfs, con
|
|||
start_e = _start_e;
|
||||
end_e = _end_e;
|
||||
|
||||
// For validity checking, if end_e is inside a loop and start_e is
|
||||
// outside that loop, then we need to extend the checking beyond end_e
|
||||
// to the end of the loop, to account for correctness after iterating
|
||||
// through the loop. We do that as follows. Upon entering an outer
|
||||
// loop, we set end_s to that loop. (We can tell it's an outer loop if,
|
||||
// upon entering, end_s is nil.) (1) If we encounter end_e while inside
|
||||
// that loop (which we can tell because end_s is non-nil), then we clear
|
||||
// end_e to signal that we're now using end_s to terminate the traversal.
|
||||
// (2) If we complete the loop without encountering end_e (which we can
|
||||
// tell because after traversal end_e is non-nil), then we clear end_s
|
||||
// to mark that the traversal is now not inside a loop.
|
||||
end_s = nullptr;
|
||||
|
||||
// Track whether this is a record assignment, in which case
|
||||
// we're attuned to assignments to the same field for the
|
||||
// same type of record.
|
||||
|
@ -38,6 +51,23 @@ TraversalCode CSE_ValidityChecker::PreStmt(const Stmt* s) {
|
|||
return TC_ABORTALL;
|
||||
}
|
||||
|
||||
if ( (t == STMT_WHILE || t == STMT_FOR) && have_start_e && ! end_s )
|
||||
// We've started the traversal and are entering an outer loop.
|
||||
end_s = s;
|
||||
|
||||
return TC_CONTINUE;
|
||||
}
|
||||
|
||||
TraversalCode CSE_ValidityChecker::PostStmt(const Stmt* s) {
|
||||
if ( end_s == s ) {
|
||||
if ( ! end_e )
|
||||
// We've done the outer loop containing the end expression.
|
||||
return TC_ABORTALL;
|
||||
|
||||
// We're no longer doing an outer loop.
|
||||
end_s = nullptr;
|
||||
}
|
||||
|
||||
return TC_CONTINUE;
|
||||
}
|
||||
|
||||
|
@ -59,8 +89,13 @@ TraversalCode CSE_ValidityChecker::PreExpr(const Expr* e) {
|
|||
ASSERT(! have_end_e);
|
||||
have_end_e = true;
|
||||
|
||||
// ... and we're now done.
|
||||
return TC_ABORTALL;
|
||||
if ( ! end_s )
|
||||
// We're now done.
|
||||
return TC_ABORTALL;
|
||||
|
||||
// Need to finish the loop before we mark things as done.
|
||||
// Signal to the statement traversal that we're in that state.
|
||||
end_e = nullptr;
|
||||
}
|
||||
|
||||
if ( ! have_start_e )
|
||||
|
|
|
@ -21,6 +21,7 @@ public:
|
|||
const Expr* end_e);
|
||||
|
||||
TraversalCode PreStmt(const Stmt*) override;
|
||||
TraversalCode PostStmt(const Stmt*) override;
|
||||
TraversalCode PreExpr(const Expr*) override;
|
||||
TraversalCode PostExpr(const Expr*) override;
|
||||
|
||||
|
@ -81,9 +82,13 @@ protected:
|
|||
// assignment expression.
|
||||
const Expr* start_e;
|
||||
|
||||
// Where in the AST to end our analysis.
|
||||
// Expression in the AST where we should end our analysis. See discussion
|
||||
// in the constructor for the interplay between this and end_s.
|
||||
const Expr* end_e;
|
||||
|
||||
// Statement in the AST where we should end our analysis.
|
||||
const Stmt* end_s;
|
||||
|
||||
// If what we're analyzing is a record element, then its offset.
|
||||
// -1 if not.
|
||||
int field;
|
||||
|
|
|
@ -173,7 +173,6 @@ bool Expr::IsFieldAssignable(const Expr* e) const {
|
|||
case EXPR_RSHIFT:
|
||||
case EXPR_FIELD:
|
||||
case EXPR_HAS_FIELD:
|
||||
case EXPR_IN:
|
||||
case EXPR_SIZE:
|
||||
return true;
|
||||
|
||||
|
@ -186,6 +185,8 @@ bool Expr::IsFieldAssignable(const Expr* e) const {
|
|||
// case EXPR_NE:
|
||||
// case EXPR_GE:
|
||||
// case EXPR_GT:
|
||||
//
|
||||
// case EXPR_IN:
|
||||
|
||||
// These could be added if we subsetted them to versions for
|
||||
// which we know it's safe to evaluate both operands. Again
|
||||
|
@ -1629,6 +1630,9 @@ ExprPtr AssignExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
|
|||
StmtPtr lhs_stmt;
|
||||
StmtPtr rhs_stmt;
|
||||
|
||||
if ( GetType()->Tag() == TYPE_ANY && op2->GetType()->Tag() != TYPE_ANY )
|
||||
op2 = with_location_of(make_intrusive<CoerceToAnyExpr>(op2), op2);
|
||||
|
||||
auto lhs_e = field_e->Op()->Reduce(c, lhs_stmt);
|
||||
auto rhs_e = op2->ReduceToFieldAssignment(c, rhs_stmt);
|
||||
|
||||
|
@ -3091,6 +3095,23 @@ CoerceToAnyExpr::CoerceToAnyExpr(ExprPtr arg_op) : UnaryExpr(EXPR_TO_ANY_COERCE,
|
|||
type = base_type(TYPE_ANY);
|
||||
}
|
||||
|
||||
bool CoerceToAnyExpr::IsReduced(Reducer* c) const { return HasReducedOps(c); }
|
||||
|
||||
ExprPtr CoerceToAnyExpr::Reduce(Reducer* c, StmtPtr& red_stmt) {
|
||||
if ( c->Optimizing() )
|
||||
op = c->UpdateExpr(op);
|
||||
|
||||
red_stmt = nullptr;
|
||||
|
||||
if ( ! op->IsSingleton(c) )
|
||||
op = op->ReduceToSingleton(c, red_stmt);
|
||||
|
||||
if ( c->Optimizing() )
|
||||
return ThisPtr();
|
||||
else
|
||||
return AssignToTemporary(c, red_stmt);
|
||||
}
|
||||
|
||||
ValPtr CoerceToAnyExpr::Fold(Val* v) const { return {NewRef{}, v}; }
|
||||
|
||||
ExprPtr CoerceToAnyExpr::Duplicate() { return SetSucc(new CoerceToAnyExpr(op->Duplicate())); }
|
||||
|
|
|
@ -490,7 +490,7 @@ void GenIDDefs::TrackID(const ID* id, const ExprPtr& e) {
|
|||
// here to set the lowest limit for definitions. For now we leave
|
||||
// DefinedAfter as capable of supporting that distinction in case we
|
||||
// find need to revive it in the future.
|
||||
oi->DefinedAfter(last_stmt_traversed, e, confluence_blocks, 0);
|
||||
oi->SetDefinedAfter(last_stmt_traversed, e, confluence_blocks, 0);
|
||||
|
||||
// Ensure we track this identifier across all relevant
|
||||
// confluence regions.
|
||||
|
|
|
@ -74,8 +74,8 @@ void IDOptInfo::AddInitExpr(ExprPtr init_expr, InitClass ic) {
|
|||
init_exprs.emplace_back(std::move(init_expr));
|
||||
}
|
||||
|
||||
void IDOptInfo::DefinedAfter(const Stmt* s, const ExprPtr& e, const std::vector<const Stmt*>& conf_blocks,
|
||||
zeek_uint_t conf_start) {
|
||||
void IDOptInfo::SetDefinedAfter(const Stmt* s, const ExprPtr& e, const std::vector<const Stmt*>& conf_blocks,
|
||||
zeek_uint_t conf_start) {
|
||||
if ( tracing )
|
||||
printf("ID %s defined at %d: %s\n", trace_ID, s ? s->GetOptInfo()->stmt_num : NO_DEF,
|
||||
s ? obj_desc(s).c_str() : "<entry>");
|
||||
|
@ -124,6 +124,20 @@ void IDOptInfo::DefinedAfter(const Stmt* s, const ExprPtr& e, const std::vector<
|
|||
for ( ; conf_start < conf_blocks.size(); ++conf_start )
|
||||
StartConfluenceBlock(conf_blocks[conf_start]);
|
||||
|
||||
if ( e ) {
|
||||
// If we just ended a region that's (1) at the same block level,
|
||||
// (2) definitive in terms of having assigned to the identifier,
|
||||
// and (3) adjacent to the one we're about to start (no intervening
|
||||
// confluence), then mark it as ended-due-to-assignment (as opposed
|
||||
// to ended-due-to-confluence). Doing so enables us to propagate that
|
||||
// assignment value to the beginning of this block in
|
||||
// FindRegionBeforeIndex() so we can collapse assignment cascades;
|
||||
// see the comment in that method.
|
||||
auto& ub = usage_regions.back();
|
||||
if ( ub.BlockLevel() == s->GetOptInfo()->block_level && ub.EndsAfter() == stmt_num - 1 && ub.DefExprAfter() )
|
||||
ub.SetEndedDueToAssignment();
|
||||
}
|
||||
|
||||
// Create a new region corresponding to this definition.
|
||||
// This needs to come after filling out the confluence
|
||||
// blocks, since they'll create their own (earlier) regions.
|
||||
|
@ -436,19 +450,29 @@ void IDOptInfo::EndRegionsAfter(int stmt_num, int level) {
|
|||
int IDOptInfo::FindRegionBeforeIndex(int stmt_num) {
|
||||
int region_ind = NO_DEF;
|
||||
for ( auto i = 0U; i < usage_regions.size(); ++i ) {
|
||||
auto ur = usage_regions[i];
|
||||
auto& ur = usage_regions[i];
|
||||
|
||||
if ( ur.StartsAfter() >= stmt_num )
|
||||
break;
|
||||
|
||||
if ( ur.EndsAfter() == NO_DEF )
|
||||
// It's active for everything beyond its start.
|
||||
// It's active for everything beyond its start.
|
||||
// or
|
||||
// It's active at the beginning of the statement of interest.
|
||||
if ( ur.EndsAfter() == NO_DEF || ur.EndsAfter() >= stmt_num )
|
||||
region_ind = i;
|
||||
|
||||
else if ( ur.EndsAfter() >= stmt_num - 1 )
|
||||
// It's active at the beginning of the statement of
|
||||
// interest.
|
||||
else if ( ur.EndsAfter() == stmt_num - 1 && ur.EndedDueToAssignment() ) {
|
||||
// There's one other possibility, which occurs for a series of
|
||||
// statements like:
|
||||
//
|
||||
// a = some_val;
|
||||
// a = a + 1;
|
||||
//
|
||||
// Here, the assignment for "a = some_val" ends right after
|
||||
// that statement due to new assignment to 'a' on the second line.
|
||||
// However, it's okay to use the first region on the RHS.
|
||||
region_ind = i;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(region_ind != NO_DEF);
|
||||
|
|
|
@ -51,6 +51,13 @@ public:
|
|||
int EndsAfter() const { return end_stmt; }
|
||||
void SetEndsAfter(int _end_stmt) { end_stmt = _end_stmt; }
|
||||
|
||||
// Returns or sets whether the region ended due to a new assignment to the
|
||||
// identifier, or confluence (ending of a scope block). This information
|
||||
// is used for an optimization in IDOptInfo::FindRegionBeforeIndex().
|
||||
// The value defaults to false.
|
||||
bool EndedDueToAssignment() const { return ended_due_to_assignment; }
|
||||
void SetEndedDueToAssignment() { ended_due_to_assignment = true; }
|
||||
|
||||
// The confluence nesting level associated with the region. Other
|
||||
// regions that overlap take precedence if they have a higher
|
||||
// (= more inner) block level.
|
||||
|
@ -85,6 +92,10 @@ protected:
|
|||
// its execution.
|
||||
int end_stmt = NO_DEF; // means the region hasn't ended yet
|
||||
|
||||
// Whether the region ended because of an immediately following
|
||||
// assignment.
|
||||
bool ended_due_to_assignment = false;
|
||||
|
||||
// Degree of confluence nesting associated with this region.
|
||||
int block_level;
|
||||
|
||||
|
@ -165,8 +176,8 @@ public:
|
|||
// gives the full set of surrounding confluence statements.
|
||||
// It should be processed starting at conf_start (note that
|
||||
// conf_blocks may be empty).
|
||||
void DefinedAfter(const Stmt* s, const ExprPtr& e, const std::vector<const Stmt*>& conf_blocks,
|
||||
zeek_uint_t conf_start);
|
||||
void SetDefinedAfter(const Stmt* s, const ExprPtr& e, const std::vector<const Stmt*>& conf_blocks,
|
||||
zeek_uint_t conf_start);
|
||||
|
||||
// Called upon encountering a "return" statement.
|
||||
void ReturnAt(const Stmt* s);
|
||||
|
|
|
@ -73,7 +73,7 @@ void FixedCatArg::RenderInto(ZVal* zframe, int slot, char*& res) {
|
|||
n = modp_dtoa2(d, res, 6);
|
||||
res += n;
|
||||
|
||||
if ( util::approx_equal(d, nearbyint(d), 1e-9) && std::isfinite(d) && ! strchr(tmp, 'e') ) {
|
||||
if ( util::approx_equal(d, nearbyint(d), 1e-9) && std::isfinite(d) ) {
|
||||
// disambiguate from integer
|
||||
*(res++) = '.';
|
||||
*(res++) = '0';
|
||||
|
|
|
@ -42,7 +42,6 @@ public:
|
|||
|
||||
protected:
|
||||
TypePtr t;
|
||||
char tmp[256];
|
||||
};
|
||||
|
||||
class StringCatArg : public CatArg {
|
||||
|
|
|
@ -934,13 +934,13 @@ eval auto& vsel = frame[z.v2].vector_val->RawVec();
|
|||
auto& v1 = frame[z.v3].vector_val->RawVec();
|
||||
auto& v2 = frame[z.v4].vector_val->RawVec();
|
||||
auto n = v1.size();
|
||||
auto res = new vector<std::optional<ZVal>>(n);
|
||||
vector<std::optional<ZVal>> res(n);
|
||||
for ( auto i = 0U; i < n; ++i )
|
||||
if ( vsel[i] )
|
||||
(*res)[i] = vsel[i]->int_val ? v1[i] : v2[i];
|
||||
res[i] = vsel[i]->int_val ? v1[i] : v2[i];
|
||||
auto& full_res = frame[z.v1].vector_val;
|
||||
Unref(full_res);
|
||||
full_res = new VectorVal(cast_intrusive<VectorType>(z.t), res);
|
||||
full_res = new VectorVal(cast_intrusive<VectorType>(z.t), &res);
|
||||
|
||||
# Our instruction format doesn't accommodate two constants, so for
|
||||
# the singular case of a V ? C1 : C2 conditional, we split it into
|
||||
|
@ -1254,9 +1254,14 @@ macro AssignFromRec()
|
|||
for ( size_t i = 0U; i < n; ++i )
|
||||
{
|
||||
auto rhs_i = rhs->RawField(rhs_map[i]);
|
||||
auto& init_i = init_vals[lhs_map[i]];
|
||||
if ( is_managed[i] )
|
||||
{
|
||||
zeek::Ref(rhs_i.ManagedVal());
|
||||
init_vals[lhs_map[i]] = rhs_i;
|
||||
if ( init_i )
|
||||
ZVal::DeleteManagedType(*init_i);
|
||||
}
|
||||
init_i = rhs_i;
|
||||
}
|
||||
|
||||
op Construct-Known-Record-From
|
||||
|
@ -1561,7 +1566,9 @@ eval if ( frame[z.v2].vector_val->Size() > 0 )
|
|||
unary-expr-op To-Any-Coerce
|
||||
op-type X
|
||||
set-type $1
|
||||
eval AssignV1(ZVal(frame[z.v2].ToVal(z.t), ZAM::any_base_type))
|
||||
eval auto orig_lhs = frame[z.v1]; /* hold in case z.v1 = z.v2 */
|
||||
frame[z.v1] = ZVal($1.ToVal(z.t), ZAM::any_base_type);
|
||||
ZVal::DeleteManagedType(orig_lhs);
|
||||
|
||||
unary-expr-op From-Any-Coerce
|
||||
op-type X
|
||||
|
@ -1604,7 +1611,19 @@ op Any-Vector-Elem-Assign
|
|||
op1-read
|
||||
set-type $1
|
||||
type VVV
|
||||
eval EvalVectorElemAssign(, vv->Assign(ind, frame[z.v3].ToVal(z.t)))
|
||||
eval auto ind = frame[z.v2].AsCount();
|
||||
auto vv = frame[z.v1].AsVector();
|
||||
auto yt = vv->RawYieldTypes();
|
||||
if ( ind < vv->Size() && yt && (*yt)[ind] && ZVal::IsManagedType((*yt)[ind]) )
|
||||
{
|
||||
auto orig_elem = vv->RawVec()[ind];
|
||||
if ( ! vv->Assign(ind, frame[z.v3].ToVal(z.t)) )
|
||||
ZAM_run_time_error(z.loc, "value used but not set");
|
||||
if ( orig_elem )
|
||||
ZVal::DeleteManagedType(*orig_elem);
|
||||
}
|
||||
else if ( ! vv->Assign(ind, frame[z.v3].ToVal(z.t)) )
|
||||
ZAM_run_time_error(z.loc, "value used but not set");
|
||||
|
||||
op Vector-Elem-Assign-Any
|
||||
op1-read
|
||||
|
@ -2133,7 +2152,10 @@ type VVVV
|
|||
eval NextVectorIterCore(z.v3, v4)
|
||||
frame[z.v1].uint_val = si.iter;
|
||||
if ( z.is_managed )
|
||||
{
|
||||
ZVal::DeleteManagedType(frame[z.v2]);
|
||||
frame[z.v2] = BuildVal(vv[si.iter]->ToVal(z.t), z.t);
|
||||
}
|
||||
else
|
||||
frame[z.v2] = *vv[si.iter];
|
||||
si.IterFinished();
|
||||
|
@ -2145,7 +2167,10 @@ internal-op Next-Vector-Blank-Iter-Val-Var
|
|||
type VVV
|
||||
eval NextVectorIterCore(z.v2, v3)
|
||||
if ( z.is_managed )
|
||||
{
|
||||
ZVal::DeleteManagedType(frame[z.v1]);
|
||||
frame[z.v1] = BuildVal(vv[si.iter]->ToVal(z.t), z.t);
|
||||
}
|
||||
else
|
||||
frame[z.v1] = *vv[si.iter];
|
||||
si.IterFinished();
|
||||
|
@ -3068,11 +3093,11 @@ macro AnalyzerName(tag)
|
|||
auto atype = tag.ToVal(z.t);
|
||||
auto val = atype->AsEnumVal();
|
||||
Unref(frame[z.v1].string_val);
|
||||
plugin::Component* component = zeek::analyzer_mgr->Lookup(val);
|
||||
plugin::Component* component = zeek::analyzer_mgr->Lookup(val, false);
|
||||
if ( ! component )
|
||||
component = zeek::packet_mgr->Lookup(val);
|
||||
component = zeek::packet_mgr->Lookup(val, false);
|
||||
if ( ! component )
|
||||
component = zeek::file_mgr->Lookup(val);
|
||||
component = zeek::file_mgr->Lookup(val, false);
|
||||
if ( component )
|
||||
frame[z.v1].string_val = new StringVal(component->CanonicalName());
|
||||
else
|
||||
|
|
|
@ -214,8 +214,9 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
std::string err = "overflow promoting from "; \
|
||||
err += ov_err; \
|
||||
err += " arithmetic value"; \
|
||||
/* The run-time error will throw an exception, so recover intermediary memory. */ \
|
||||
delete res_zv; \
|
||||
ZAM_run_time_error(z.loc, err.c_str()); \
|
||||
res[i] = std::nullopt; \
|
||||
} \
|
||||
else \
|
||||
res[i] = ZVal(cast(vi)); \
|
||||
|
@ -326,6 +327,55 @@ std::shared_ptr<ProfVec> ZBody::BuildProfVec() const {
|
|||
return pv;
|
||||
}
|
||||
|
||||
// Helper class for managing ZBody state to ensure that memory is recovered
|
||||
// if a ZBody is exited via an exception.
|
||||
class ZBodyStateManager {
|
||||
public:
|
||||
// If fixed_frame is nil then creates a dynamic frame.
|
||||
ZBodyStateManager(ZVal* _fixed_frame, int frame_size, const std::vector<int>& _managed_slots,
|
||||
TableIterVec* _tiv_ptr)
|
||||
: fixed_frame(_fixed_frame), managed_slots(_managed_slots), tiv_ptr(_tiv_ptr) {
|
||||
if ( fixed_frame )
|
||||
frame = fixed_frame;
|
||||
else {
|
||||
frame = new ZVal[frame_size];
|
||||
for ( auto s : managed_slots )
|
||||
frame[s].ClearManagedVal();
|
||||
}
|
||||
}
|
||||
|
||||
void SetTableIters(TableIterVec* _tiv_ptr) { tiv_ptr = _tiv_ptr; }
|
||||
|
||||
~ZBodyStateManager() {
|
||||
if ( tiv_ptr )
|
||||
for ( auto& ti : *tiv_ptr )
|
||||
ti.Clear();
|
||||
|
||||
if ( fixed_frame ) {
|
||||
// Recover memory and reset for use in next call.
|
||||
for ( auto s : managed_slots ) {
|
||||
ZVal::DeleteManagedType(frame[s]);
|
||||
frame[s].ClearManagedVal();
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
// Recover memory, no need to reset.
|
||||
for ( auto s : managed_slots )
|
||||
ZVal::DeleteManagedType(frame[s]);
|
||||
delete[] frame;
|
||||
}
|
||||
}
|
||||
|
||||
auto Frame() { return frame; }
|
||||
|
||||
private:
|
||||
ZVal* fixed_frame;
|
||||
ZVal* frame;
|
||||
const std::vector<int>& managed_slots;
|
||||
TableIterVec* tiv_ptr;
|
||||
};
|
||||
|
||||
ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
||||
unsigned int pc = 0;
|
||||
|
||||
|
@ -358,22 +408,22 @@ ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
|||
}
|
||||
#endif
|
||||
|
||||
ZVal* frame;
|
||||
ZBodyStateManager state_mgr(fixed_frame, frame_size, managed_slots, &table_iters);
|
||||
std::unique_ptr<TableIterVec> local_table_iters;
|
||||
std::vector<StepIterInfo> step_iters(num_step_iters);
|
||||
|
||||
ZVal* frame;
|
||||
|
||||
if ( fixed_frame )
|
||||
frame = fixed_frame;
|
||||
else {
|
||||
frame = new ZVal[frame_size];
|
||||
// Clear slots for which we do explicit memory management.
|
||||
for ( auto s : managed_slots )
|
||||
frame[s].ClearManagedVal();
|
||||
frame = state_mgr.Frame();
|
||||
|
||||
if ( ! table_iters.empty() ) {
|
||||
local_table_iters = std::make_unique<TableIterVec>(table_iters.size());
|
||||
*local_table_iters = table_iters;
|
||||
tiv_ptr = &(*local_table_iters);
|
||||
state_mgr.SetTableIters(nullptr); // unique_ptr will clean it up directly
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -423,33 +473,6 @@ ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
|||
++pc;
|
||||
}
|
||||
|
||||
auto result = ret_type ? ret_u->ToVal(ret_type) : nullptr;
|
||||
|
||||
if ( fixed_frame ) {
|
||||
// Make sure we don't have any dangling iterators.
|
||||
for ( auto& ti : table_iters )
|
||||
ti.Clear();
|
||||
|
||||
// Free slots for which we do explicit memory management,
|
||||
// preparing them for reuse.
|
||||
for ( auto& ms : managed_slots ) {
|
||||
auto& v = frame[ms];
|
||||
ZVal::DeleteManagedType(v);
|
||||
v.ClearManagedVal();
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Free those slots for which we do explicit memory management.
|
||||
// No need to then clear them, as we're about to throw away
|
||||
// the entire frame.
|
||||
for ( auto& ms : managed_slots ) {
|
||||
auto& v = frame[ms];
|
||||
ZVal::DeleteManagedType(v);
|
||||
}
|
||||
|
||||
delete[] frame;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_ZAM_PROFILE
|
||||
if ( profiling_active ) {
|
||||
tot_CPU_time += util::curr_CPU_time() - start_CPU_time;
|
||||
|
@ -460,7 +483,7 @@ ValPtr ZBody::Exec(Frame* f, StmtFlowType& flow) {
|
|||
}
|
||||
#endif
|
||||
|
||||
return result;
|
||||
return ret_type ? ret_u->ToVal(ret_type) : nullptr;
|
||||
}
|
||||
|
||||
void ZBody::ReportExecutionProfile(ProfMap& pm) {
|
||||
|
@ -604,8 +627,7 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
|
||||
auto& vec2 = v2->RawVec();
|
||||
auto n = vec2.size();
|
||||
auto vec1_ptr = new vector<std::optional<ZVal>>(n);
|
||||
auto& vec1 = *vec1_ptr;
|
||||
vector<std::optional<ZVal>> vec1(n);
|
||||
|
||||
for ( auto i = 0U; i < n; ++i ) {
|
||||
if ( vec2[i] )
|
||||
|
@ -620,7 +642,7 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
|
||||
auto vt = cast_intrusive<VectorType>(std::move(t));
|
||||
auto old_v1 = v1;
|
||||
v1 = new VectorVal(std::move(vt), vec1_ptr);
|
||||
v1 = new VectorVal(std::move(vt), &vec1);
|
||||
Unref(old_v1);
|
||||
}
|
||||
|
||||
|
@ -631,8 +653,13 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
auto& vec2 = v2->RawVec();
|
||||
auto& vec3 = v3->RawVec();
|
||||
auto n = vec2.size();
|
||||
auto vec1_ptr = new vector<std::optional<ZVal>>(n);
|
||||
auto& vec1 = *vec1_ptr;
|
||||
|
||||
if ( vec3.size() != n ) {
|
||||
ZAM_run_time_error(util::fmt("vector operands are of different sizes (%d vs. %d)", int(n), int(vec3.size())));
|
||||
return;
|
||||
}
|
||||
|
||||
vector<std::optional<ZVal>> vec1(n);
|
||||
|
||||
for ( auto i = 0U; i < vec2.size(); ++i ) {
|
||||
if ( vec2[i] && vec3[i] )
|
||||
|
@ -647,7 +674,7 @@ static void vec_exec(ZOp op, TypePtr t, VectorVal*& v1, const VectorVal* v2, con
|
|||
|
||||
auto vt = cast_intrusive<VectorType>(std::move(t));
|
||||
auto old_v1 = v1;
|
||||
v1 = new VectorVal(std::move(vt), vec1_ptr);
|
||||
v1 = new VectorVal(std::move(vt), &vec1);
|
||||
Unref(old_v1);
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <glob.h>
|
||||
|
||||
#include <exception>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
|
@ -32,6 +33,7 @@
|
|||
#include "zeek/spicy/file-analyzer.h"
|
||||
#include "zeek/spicy/packet-analyzer.h"
|
||||
#include "zeek/spicy/protocol-analyzer.h"
|
||||
#include "zeek/spicy/runtime-support.h"
|
||||
#include "zeek/zeek-config-paths.h"
|
||||
|
||||
using namespace zeek;
|
||||
|
@ -74,9 +76,13 @@ void Manager::registerProtocolAnalyzer(const std::string& name, hilti::rt::Proto
|
|||
info.name_zeek = hilti::rt::replace(name, "::", "_");
|
||||
info.name_zeekygen = hilti::rt::fmt("<Spicy-%s>", name);
|
||||
info.protocol = proto;
|
||||
info.ports = ports;
|
||||
info.linker_scope = linker_scope;
|
||||
|
||||
// Store ports in a deterministic order. We can't (easily) sort the
|
||||
// `hilti::rt::Vector` unfortunately.
|
||||
std::copy(ports.begin(), ports.end(), std::back_inserter(info.ports));
|
||||
std::sort(info.ports.begin(), info.ports.end());
|
||||
|
||||
// We may have that analyzer already iff it was previously pre-registered
|
||||
// without a linker scope. We'll then only set the scope now.
|
||||
if ( auto t = _analyzer_name_to_tag_type.find(info.name_zeek); t != _analyzer_name_to_tag_type.end() ) {
|
||||
|
@ -587,25 +593,25 @@ static ::TransportProto transport_protocol(const hilti::rt::Port port) {
|
|||
}
|
||||
|
||||
static void hook_accept_input() {
|
||||
auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("confirming protocol %s", tag.AsString()));
|
||||
return x->analyzer->AnalyzerConfirmation(tag);
|
||||
if ( auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("confirming protocol %s", tag.AsString()));
|
||||
return x->analyzer->AnalyzerConfirmation(tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void hook_decline_input(const std::string& reason) {
|
||||
auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("rejecting protocol %s: %s", tag.AsString(), reason));
|
||||
return x->analyzer->AnalyzerViolation(reason.c_str(), nullptr, 0, tag);
|
||||
if ( auto cookie = static_cast<rt::Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("rejecting protocol %s: %s", tag.AsString(), reason));
|
||||
return x->analyzer->AnalyzerViolation(reason.c_str(), nullptr, 0, tag);
|
||||
}
|
||||
}
|
||||
else
|
||||
SPICY_DEBUG(hilti::rt::fmt("attempting to reject protocol without cookie: %s", reason));
|
||||
}
|
||||
|
||||
void Manager::InitPostScript() {
|
||||
|
@ -701,14 +707,25 @@ void Manager::InitPostScript() {
|
|||
if ( ! tag )
|
||||
reporter->InternalError("cannot get analyzer tag for '%s'", p.name_analyzer.c_str());
|
||||
|
||||
auto register_analyzer_for_port = [&](auto tag, const hilti::rt::Port& port_) {
|
||||
SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port_));
|
||||
|
||||
// Well-known ports are registered in scriptland, so we'll raise an
|
||||
// event that will do it for us through a predefined handler.
|
||||
zeek::Args vals = Args();
|
||||
vals.emplace_back(tag.AsVal());
|
||||
vals.emplace_back(zeek::spicy::rt::to_val(port_, base_type(TYPE_PORT)));
|
||||
EventHandlerPtr handler = event_registry->Register("spicy_analyzer_for_port");
|
||||
event_mgr.Enqueue(handler, vals);
|
||||
};
|
||||
|
||||
for ( const auto& ports : p.ports ) {
|
||||
const auto proto = ports.begin.protocol();
|
||||
|
||||
// Port ranges are closed intervals.
|
||||
for ( auto port = ports.begin.port(); port <= ports.end.port(); ++port ) {
|
||||
const auto port_ = hilti::rt::Port(port, proto);
|
||||
SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port_));
|
||||
analyzer_mgr->RegisterAnalyzerForPort(tag, transport_protocol(port_), port);
|
||||
register_analyzer_for_port(tag, port_);
|
||||
|
||||
// Don't double register in case of single-port ranges.
|
||||
if ( ports.begin.port() == ports.end.port() )
|
||||
|
@ -727,7 +744,7 @@ void Manager::InitPostScript() {
|
|||
continue;
|
||||
|
||||
SPICY_DEBUG(hilti::rt::fmt(" Scheduling analyzer for port %s", port.port));
|
||||
analyzer_mgr->RegisterAnalyzerForPort(tag, transport_protocol(port.port), port.port.port());
|
||||
register_analyzer_for_port(tag, port.port);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ public:
|
|||
*
|
||||
* @param name name of the analyzer as defined in its EVT file
|
||||
* @param proto analyzer's transport-layer protocol
|
||||
* @param prts well-known ports for the analyzer; it'll be activated automatically for these
|
||||
* @param ports well-known ports for the analyzer; it'll be activated automatically for these
|
||||
* @param parser_orig name of the Spicy parser for the originator side; must match the name that
|
||||
* Spicy registers the unit's parser with
|
||||
* @param parser_resp name of the Spicy parser for the originator side; must match the name that
|
||||
|
@ -343,7 +343,7 @@ private:
|
|||
std::string name_parser_resp;
|
||||
std::string name_replaces;
|
||||
hilti::rt::Protocol protocol = hilti::rt::Protocol::Undef;
|
||||
hilti::rt::Vector<::zeek::spicy::rt::PortRange> ports;
|
||||
std::vector<::zeek::spicy::rt::PortRange> ports; // we keep this sorted
|
||||
std::string linker_scope;
|
||||
|
||||
// Computed and available once the analyzer has been registered.
|
||||
|
|
|
@ -19,6 +19,11 @@ struct PortRange {
|
|||
|
||||
hilti::rt::Port begin; /**< first port in the range */
|
||||
hilti::rt::Port end; /**< last port in the range */
|
||||
|
||||
bool operator<(const PortRange& other) const {
|
||||
// Just get us a deterministic order.
|
||||
return std::tie(begin, end) < std::tie(other.begin, other.end);
|
||||
}
|
||||
};
|
||||
|
||||
inline bool operator==(const PortRange& a, const PortRange& b) {
|
||||
|
|
|
@ -223,47 +223,47 @@ TypePtr rt::event_arg_type(const EventHandlerPtr& handler, const hilti::rt::inte
|
|||
|
||||
ValPtr& rt::current_conn() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_conn");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( cookie->cache.conn )
|
||||
return cookie->cache.conn;
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( cookie->cache.conn )
|
||||
return cookie->cache.conn;
|
||||
|
||||
if ( auto x = cookie->protocol ) {
|
||||
cookie->cache.conn = x->analyzer->Conn()->GetVal();
|
||||
return cookie->cache.conn;
|
||||
if ( auto x = cookie->protocol ) {
|
||||
cookie->cache.conn = x->analyzer->Conn()->GetVal();
|
||||
return cookie->cache.conn;
|
||||
}
|
||||
}
|
||||
else
|
||||
throw ValueUnavailable("$conn not available");
|
||||
|
||||
throw ValueUnavailable("$conn not available");
|
||||
}
|
||||
|
||||
ValPtr& rt::current_is_orig() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_is_orig");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( cookie->cache.is_orig )
|
||||
return cookie->cache.is_orig;
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( cookie->cache.is_orig )
|
||||
return cookie->cache.is_orig;
|
||||
|
||||
if ( auto x = cookie->protocol ) {
|
||||
cookie->cache.is_orig = val_mgr->Bool(x->is_orig);
|
||||
return cookie->cache.is_orig;
|
||||
if ( auto x = cookie->protocol ) {
|
||||
cookie->cache.is_orig = val_mgr->Bool(x->is_orig);
|
||||
return cookie->cache.is_orig;
|
||||
}
|
||||
}
|
||||
else
|
||||
throw ValueUnavailable("$is_orig not available");
|
||||
|
||||
throw ValueUnavailable("$is_orig not available");
|
||||
}
|
||||
|
||||
void rt::debug(const std::string& msg) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/debug");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
if ( ! cookie )
|
||||
return SPICY_DEBUG(msg);
|
||||
|
||||
rt::debug(*cookie, msg);
|
||||
}
|
||||
|
||||
void rt::debug(const Cookie& cookie, const std::string& msg) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/debug");
|
||||
std::string name;
|
||||
std::string id;
|
||||
|
||||
if ( const auto p = cookie.protocol ) {
|
||||
auto name = p->analyzer->GetAnalyzerName();
|
||||
|
@ -285,12 +285,14 @@ void rt::debug(const Cookie& cookie, const std::string& msg) {
|
|||
inline rt::cookie::FileStateStack* _file_state_stack(rt::Cookie* cookie) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/file_state_stack");
|
||||
|
||||
if ( auto c = cookie->protocol )
|
||||
return c->is_orig ? &c->fstate_orig : &c->fstate_resp;
|
||||
else if ( auto f = cookie->file )
|
||||
return &f->fstate;
|
||||
else
|
||||
throw rt::ValueUnavailable("no current connection or file available");
|
||||
if ( cookie ) {
|
||||
if ( auto c = cookie->protocol )
|
||||
return c->is_orig ? &c->fstate_orig : &c->fstate_resp;
|
||||
else if ( auto f = cookie->file )
|
||||
return &f->fstate;
|
||||
}
|
||||
|
||||
throw rt::ValueUnavailable("no current connection or file available");
|
||||
}
|
||||
|
||||
inline const rt::cookie::FileState* _file_state(rt::Cookie* cookie, std::optional<std::string> fid) {
|
||||
|
@ -313,14 +315,14 @@ inline const rt::cookie::FileState* _file_state(rt::Cookie* cookie, std::optiona
|
|||
|
||||
ValPtr rt::current_file() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_file");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto x = cookie->file )
|
||||
return x->analyzer->GetFile()->ToVal();
|
||||
else if ( auto* fstate = _file_state(cookie, {}) ) {
|
||||
if ( auto* f = file_mgr->LookupFile(fstate->fid) )
|
||||
return f->ToVal();
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->file )
|
||||
return x->analyzer->GetFile()->ToVal();
|
||||
else if ( auto* fstate = _file_state(cookie, {}) ) {
|
||||
if ( auto* f = file_mgr->LookupFile(fstate->fid) )
|
||||
return f->ToVal();
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("$file not available");
|
||||
|
@ -328,43 +330,43 @@ ValPtr rt::current_file() {
|
|||
|
||||
ValPtr rt::current_packet() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/current_packet");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto c = cookie->packet ) {
|
||||
if ( ! c->packet_val )
|
||||
// We cache the built value in case we need it multiple times.
|
||||
c->packet_val = c->packet->ToRawPktHdrVal();
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->packet ) {
|
||||
if ( ! c->packet_val )
|
||||
// We cache the built value in case we need it multiple times.
|
||||
c->packet_val = c->packet->ToRawPktHdrVal();
|
||||
|
||||
return c->packet_val;
|
||||
return c->packet_val;
|
||||
}
|
||||
}
|
||||
else
|
||||
throw ValueUnavailable("$packet not available");
|
||||
|
||||
throw ValueUnavailable("$packet not available");
|
||||
}
|
||||
|
||||
hilti::rt::Bool rt::is_orig() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/is_orig");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto x = cookie->protocol )
|
||||
return x->is_orig;
|
||||
else
|
||||
throw ValueUnavailable("is_orig() not available in current context");
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol )
|
||||
return x->is_orig;
|
||||
}
|
||||
|
||||
throw ValueUnavailable("is_orig() not available in current context");
|
||||
}
|
||||
|
||||
std::string rt::uid() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/uid");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto c = cookie->protocol ) {
|
||||
// Retrieve the ConnVal() so that we ensure the UID has been set.
|
||||
c->analyzer->ConnVal();
|
||||
return c->analyzer->Conn()->GetUID().Base62("C");
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->protocol ) {
|
||||
// Retrieve the ConnVal() so that we ensure the UID has been set.
|
||||
c->analyzer->ConnVal();
|
||||
return c->analyzer->Conn()->GetUID().Base62("C");
|
||||
}
|
||||
}
|
||||
else
|
||||
throw ValueUnavailable("uid() not available in current context");
|
||||
|
||||
throw ValueUnavailable("uid() not available in current context");
|
||||
}
|
||||
|
||||
std::tuple<hilti::rt::Address, hilti::rt::Port, hilti::rt::Address, hilti::rt::Port> rt::conn_id() {
|
||||
|
@ -395,58 +397,59 @@ std::tuple<hilti::rt::Address, hilti::rt::Port, hilti::rt::Address, hilti::rt::P
|
|||
hilti::rt::cannot_be_reached();
|
||||
};
|
||||
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto c = cookie->protocol ) {
|
||||
const auto* conn = c->analyzer->Conn();
|
||||
return std::make_tuple(convert_address(conn->OrigAddr()), convert_port(conn->OrigPort(), conn->ConnTransport()),
|
||||
convert_address(conn->RespAddr()),
|
||||
convert_port(conn->RespPort(), conn->ConnTransport()));
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->protocol ) {
|
||||
const auto* conn = c->analyzer->Conn();
|
||||
return std::make_tuple(convert_address(conn->OrigAddr()),
|
||||
convert_port(conn->OrigPort(), conn->ConnTransport()),
|
||||
convert_address(conn->RespAddr()),
|
||||
convert_port(conn->RespPort(), conn->ConnTransport()));
|
||||
}
|
||||
}
|
||||
else
|
||||
throw ValueUnavailable("conn_id() not available in current context");
|
||||
|
||||
throw ValueUnavailable("conn_id() not available in current context");
|
||||
}
|
||||
|
||||
void rt::flip_roles() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/flip_roles");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
rt::debug(*cookie, "flipping roles");
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
rt::debug(*cookie, "flipping roles");
|
||||
|
||||
if ( auto x = cookie->protocol )
|
||||
x->analyzer->Conn()->FlipRoles();
|
||||
else
|
||||
throw ValueUnavailable("flip_roles() not available in current context");
|
||||
if ( auto x = cookie->protocol )
|
||||
return x->analyzer->Conn()->FlipRoles();
|
||||
}
|
||||
|
||||
throw ValueUnavailable("flip_roles() not available in current context");
|
||||
}
|
||||
|
||||
hilti::rt::integer::safe<uint64_t> rt::number_packets() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/number_packets");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto x = cookie->protocol ) {
|
||||
return x->num_packets;
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto x = cookie->protocol ) {
|
||||
return x->num_packets;
|
||||
}
|
||||
}
|
||||
else
|
||||
throw ValueUnavailable("number_packets() not available in current context");
|
||||
|
||||
throw ValueUnavailable("number_packets() not available in current context");
|
||||
}
|
||||
|
||||
void rt::confirm_protocol() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/confirm_protocol");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( cookie->cache.confirmed )
|
||||
return;
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( cookie->cache.confirmed )
|
||||
return;
|
||||
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("confirming protocol %s", tag.AsString()));
|
||||
cookie->cache.confirmed = true;
|
||||
return x->analyzer->AnalyzerConfirmation(tag);
|
||||
if ( auto x = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(x->analyzer->GetAnalyzerTag());
|
||||
SPICY_DEBUG(hilti::rt::fmt("confirming protocol %s", tag.AsString()));
|
||||
cookie->cache.confirmed = true;
|
||||
return x->analyzer->AnalyzerConfirmation(tag);
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("no current connection available");
|
||||
}
|
||||
|
||||
|
@ -471,18 +474,17 @@ void rt::reject_protocol(const std::string& reason) {
|
|||
|
||||
void rt::weird(const std::string& id, const std::string& addl) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/weird");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( const auto x = cookie->protocol )
|
||||
x->analyzer->Weird(id.c_str(), addl.data());
|
||||
else if ( const auto x = cookie->file )
|
||||
zeek::reporter->Weird(x->analyzer->GetFile(), id.c_str(), addl.data());
|
||||
else if ( const auto x = cookie->packet ) {
|
||||
x->analyzer->Weird(id.c_str(), x->packet, addl.c_str());
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( const auto x = cookie->protocol )
|
||||
return x->analyzer->Weird(id.c_str(), addl.data());
|
||||
else if ( const auto x = cookie->file )
|
||||
return zeek::reporter->Weird(x->analyzer->GetFile(), id.c_str(), addl.data());
|
||||
else if ( const auto x = cookie->packet )
|
||||
return x->analyzer->Weird(id.c_str(), x->packet, addl.c_str());
|
||||
}
|
||||
else
|
||||
throw ValueUnavailable("none of $conn, $file, or $packet available for weird reporting");
|
||||
|
||||
throw ValueUnavailable("none of $conn, $file, or $packet available for weird reporting");
|
||||
}
|
||||
|
||||
void rt::protocol_begin(const std::optional<std::string>& analyzer, const ::hilti::rt::Protocol& proto) {
|
||||
|
@ -499,11 +501,11 @@ void rt::protocol_begin(const std::optional<std::string>& analyzer, const ::hilt
|
|||
// doesn't need to track what the other side already did.
|
||||
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( proto.value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -547,12 +549,12 @@ void rt::protocol_begin(const ::hilti::rt::Protocol& proto) { return protocol_be
|
|||
|
||||
rt::ProtocolHandle rt::protocol_handle_get_or_create(const std::string& analyzer, const ::hilti::rt::Protocol& proto) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_handle_get_or_create");
|
||||
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( proto.value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -623,11 +625,11 @@ static void protocol_data_in(const hilti::rt::Bool& is_orig, const hilti::rt::By
|
|||
const std::optional<rt::ProtocolHandle>& h) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_data_in");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
// We need to copy the data here to be on the safe side: the streaming
|
||||
// input methods expect the data to stay around until they return. At first
|
||||
|
@ -719,11 +721,11 @@ void rt::protocol_gap(const hilti::rt::Bool& is_orig, const hilti::rt::integer::
|
|||
const hilti::rt::integer::safe<uint64_t>& len, const std::optional<rt::ProtocolHandle>& h) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_gap");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( h->protocol().value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -761,25 +763,25 @@ void rt::protocol_gap(const hilti::rt::Bool& is_orig, const hilti::rt::integer::
|
|||
|
||||
void rt::protocol_end() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_end");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
for ( const auto& i : c->analyzer->GetChildren() )
|
||||
c->analyzer->RemoveChildAnalyzer(i);
|
||||
for ( const auto& i : c->analyzer->GetChildren() )
|
||||
c->analyzer->RemoveChildAnalyzer(i);
|
||||
}
|
||||
}
|
||||
|
||||
void rt::protocol_handle_close(const ProtocolHandle& handle) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/protocol_handle_close");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( ! cookie || ! cookie->protocol )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
auto c = cookie->protocol;
|
||||
if ( ! c )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
switch ( handle.protocol().value() ) {
|
||||
case ::hilti::rt::Protocol::TCP: {
|
||||
|
@ -790,7 +792,14 @@ void rt::protocol_handle_close(const ProtocolHandle& handle) {
|
|||
if ( child->IsFinished() || child->Removing() )
|
||||
throw ValueUnavailable(hilti::rt::fmt("child analyzer %s no longer exist", handle));
|
||||
|
||||
child->NextEndOfData(true);
|
||||
auto* tcp_child = dynamic_cast<analyzer::tcp::TCP_ApplicationAnalyzer*>(child);
|
||||
if ( ! tcp_child )
|
||||
throw ValueUnavailable(hilti::rt::fmt("child analyzer %s is not a TCP application analyzer", handle));
|
||||
|
||||
tcp_child->EndpointEOF(true); // For Spicy analyzers, this will trigger Finish() ...
|
||||
child->NextEndOfData(true); // ... whereas this won't.
|
||||
|
||||
tcp_child->EndpointEOF(false);
|
||||
child->NextEndOfData(false);
|
||||
|
||||
c->analyzer->RemoveChildAnalyzer(handle.id());
|
||||
|
@ -827,7 +836,8 @@ rt::cookie::FileState* rt::cookie::FileStateStack::push(std::optional<std::strin
|
|||
fid = *fid_provided;
|
||||
else {
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
if ( ! cookie )
|
||||
throw ValueUnavailable("no current connection available");
|
||||
|
||||
if ( auto c = cookie->protocol ) {
|
||||
auto tag = spicy_mgr->tagForProtocolAnalyzer(c->analyzer->GetAnalyzerTag());
|
||||
|
@ -899,38 +909,38 @@ static void _data_in(const char* data, uint64_t len, std::optional<uint64_t> off
|
|||
|
||||
void rt::terminate_session() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/terminate_session");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto c = cookie->protocol ) {
|
||||
assert(session_mgr);
|
||||
session_mgr->Remove(c->analyzer->Conn());
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->protocol ) {
|
||||
assert(session_mgr);
|
||||
return session_mgr->Remove(c->analyzer->Conn());
|
||||
}
|
||||
}
|
||||
else
|
||||
throw spicy::rt::ValueUnavailable("terminate_session() not available in the current context");
|
||||
|
||||
throw spicy::rt::ValueUnavailable("terminate_session() not available in the current context");
|
||||
}
|
||||
|
||||
void rt::skip_input() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/skip_input");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto p = cookie->protocol )
|
||||
p->analyzer->SetSkip(true);
|
||||
else if ( auto f = cookie->file )
|
||||
f->analyzer->SetSkip(true);
|
||||
else
|
||||
throw spicy::rt::ValueUnavailable("skip() not available in the current context");
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto p = cookie->protocol )
|
||||
return p->analyzer->SetSkip(true);
|
||||
else if ( auto f = cookie->file )
|
||||
return f->analyzer->SetSkip(true);
|
||||
}
|
||||
|
||||
throw spicy::rt::ValueUnavailable("skip() not available in the current context");
|
||||
}
|
||||
|
||||
std::string rt::fuid() {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/fuid");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto f = cookie->file ) {
|
||||
if ( auto file = f->analyzer->GetFile() )
|
||||
return file->GetID();
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto f = cookie->file ) {
|
||||
if ( auto file = f->analyzer->GetFile() )
|
||||
return file->GetID();
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("fuid() not available in current context");
|
||||
|
@ -1003,6 +1013,9 @@ void rt::file_gap(const hilti::rt::integer::safe<uint64_t>& offset, const hilti:
|
|||
const std::optional<std::string>& fid) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/file_gap");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
if ( ! cookie )
|
||||
throw spicy::rt::ValueUnavailable("file_gap() not available in the current context");
|
||||
|
||||
auto* fstate = _file_state(cookie, fid);
|
||||
|
||||
if ( auto c = cookie->protocol ) {
|
||||
|
@ -1024,13 +1037,15 @@ void rt::file_end(const std::optional<std::string>& fid) {
|
|||
|
||||
void rt::forward_packet(const hilti::rt::integer::safe<uint32_t>& identifier) {
|
||||
auto _ = hilti::rt::profiler::start("zeek/rt/forward_packet");
|
||||
auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie());
|
||||
assert(cookie);
|
||||
|
||||
if ( auto c = cookie->packet )
|
||||
c->next_analyzer = identifier;
|
||||
else
|
||||
throw ValueUnavailable("no current packet analyzer available");
|
||||
if ( auto cookie = static_cast<Cookie*>(hilti::rt::context::cookie()) ) {
|
||||
if ( auto c = cookie->packet ) {
|
||||
c->next_analyzer = identifier;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
throw ValueUnavailable("no current packet analyzer available");
|
||||
}
|
||||
|
||||
hilti::rt::Time rt::network_time() {
|
||||
|
|
|
@ -4,10 +4,8 @@
|
|||
|
||||
#include <getopt.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
|
@ -42,11 +40,10 @@ struct VisitorTypes : public spicy::visitor::PreOrder {
|
|||
module = {};
|
||||
return;
|
||||
}
|
||||
|
||||
module = n->scopeID();
|
||||
path = n->uid().path;
|
||||
|
||||
if ( is_resolved )
|
||||
if ( is_resolved && ! n->skipImplementation() )
|
||||
glue->addSpicyModule(module, path);
|
||||
}
|
||||
|
||||
|
|
|
@ -1375,7 +1375,7 @@ bool GlueCompiler::CreateSpicyHook(glue::Event* ev) {
|
|||
|
||||
auto attrs = builder()->attributeSet({builder()->attribute("&priority", builder()->integer(ev->priority))});
|
||||
auto parameters = hilti::util::transform(ev->parameters, [](const auto& p) { return p.get(); });
|
||||
auto unit_hook = builder()->declarationHook(parameters, body.block(), ::spicy::Engine::All, attrs, meta);
|
||||
auto unit_hook = builder()->declarationHook(parameters, body.block(), attrs, meta);
|
||||
auto hook_decl = builder()->declarationUnitHook(ev->hook, unit_hook, meta);
|
||||
ev->spicy_module->spicy_module->add(context(), hook_decl);
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ zeek_add_subdir_library(
|
|||
ProcessStats.cc
|
||||
Utils.cc
|
||||
BIFS
|
||||
consts.bif
|
||||
telemetry.bif)
|
||||
|
||||
# We don't need to include the civetweb headers across the whole project, only
|
||||
|
|
|
@ -2,27 +2,17 @@
|
|||
|
||||
using namespace zeek::telemetry;
|
||||
|
||||
Counter::Counter(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
||||
: handle(family->Add(labels)), labels(labels) {
|
||||
if ( callback ) {
|
||||
handle.AddCollectCallback(std::move(callback));
|
||||
has_callback = true;
|
||||
}
|
||||
}
|
||||
Counter::Counter(FamilyType* family, const prometheus::Labels& labels, detail::CollectCallbackPtr callback) noexcept
|
||||
: family(family), handle(family->Add(labels)), labels(labels), callback(std::move(callback)) {}
|
||||
|
||||
double Counter::Value() const noexcept {
|
||||
if ( has_callback ) {
|
||||
// Use Collect() here instead of Value() to correctly handle metrics with
|
||||
// callbacks.
|
||||
auto metric = handle.Collect();
|
||||
return metric.counter.value;
|
||||
}
|
||||
if ( callback )
|
||||
return callback();
|
||||
|
||||
return handle.Value();
|
||||
}
|
||||
|
||||
std::shared_ptr<Counter> CounterFamily::GetOrAdd(Span<const LabelView> labels,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
std::shared_ptr<Counter> CounterFamily::GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback) {
|
||||
prometheus::Labels p_labels = detail::BuildPrometheusLabels(labels);
|
||||
|
||||
auto check = [&](const std::shared_ptr<Counter>& counter) { return counter->CompareLabels(p_labels); };
|
||||
|
@ -36,6 +26,15 @@ std::shared_ptr<Counter> CounterFamily::GetOrAdd(Span<const LabelView> labels,
|
|||
}
|
||||
|
||||
std::shared_ptr<Counter> CounterFamily::GetOrAdd(std::initializer_list<LabelView> labels,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
detail::CollectCallbackPtr callback) {
|
||||
return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback));
|
||||
}
|
||||
|
||||
void CounterFamily::RunCallbacks() {
|
||||
for ( auto& c : counters ) {
|
||||
if ( c->HasCallback() ) {
|
||||
double val = c->RunCallback();
|
||||
c->Set(val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/family.h>
|
||||
#include <cstdint>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
|
||||
|
@ -15,6 +14,12 @@
|
|||
|
||||
namespace zeek::telemetry {
|
||||
|
||||
namespace detail {
|
||||
using CollectCallbackPtr = std::function<double()>;
|
||||
}
|
||||
|
||||
class CounterFamily;
|
||||
|
||||
/**
|
||||
* A handle to a metric that can only go up.
|
||||
*/
|
||||
|
@ -26,7 +31,7 @@ public:
|
|||
using FamilyType = prometheus::Family<Handle>;
|
||||
|
||||
explicit Counter(FamilyType* family, const prometheus::Labels& labels,
|
||||
prometheus::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
detail::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
|
||||
/**
|
||||
* Increments the value by 1.
|
||||
|
@ -55,10 +60,21 @@ public:
|
|||
|
||||
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
||||
|
||||
bool HasCallback() const noexcept { return callback != nullptr; }
|
||||
double RunCallback() const { return callback(); }
|
||||
|
||||
private:
|
||||
friend class CounterFamily;
|
||||
void Set(double val) {
|
||||
// Counter has no Set(), but we can fake it.
|
||||
handle.Reset();
|
||||
handle.Increment(val);
|
||||
}
|
||||
|
||||
FamilyType* family = nullptr;
|
||||
Handle& handle;
|
||||
prometheus::Labels labels;
|
||||
bool has_callback = false;
|
||||
detail::CollectCallbackPtr callback;
|
||||
};
|
||||
|
||||
using CounterPtr = std::shared_ptr<Counter>;
|
||||
|
@ -74,15 +90,17 @@ public:
|
|||
* Returns the metrics handle for given labels, creating a new instance
|
||||
* lazily if necessary.
|
||||
*/
|
||||
CounterPtr GetOrAdd(Span<const LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
CounterPtr GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
/**
|
||||
* @copydoc GetOrAdd
|
||||
*/
|
||||
CounterPtr GetOrAdd(std::initializer_list<LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
CounterPtr GetOrAdd(std::initializer_list<LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
zeek_int_t MetricType() const noexcept override { return BifEnum::Telemetry::MetricType::COUNTER; }
|
||||
|
||||
void RunCallbacks() override;
|
||||
|
||||
private:
|
||||
prometheus::Family<prometheus::Counter>* family;
|
||||
std::vector<CounterPtr> counters;
|
||||
|
|
|
@ -3,26 +3,16 @@
|
|||
using namespace zeek::telemetry;
|
||||
|
||||
double Gauge::Value() const noexcept {
|
||||
if ( has_callback ) {
|
||||
// Use Collect() here instead of Value() to correctly handle metrics
|
||||
// with callbacks.
|
||||
auto metric = handle.Collect();
|
||||
return metric.gauge.value;
|
||||
}
|
||||
if ( callback )
|
||||
return callback();
|
||||
|
||||
return handle.Value();
|
||||
}
|
||||
|
||||
Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, detail::CollectCallbackPtr callback) noexcept
|
||||
: family(family), handle(family->Add(labels)), labels(labels), callback(std::move(callback)) {}
|
||||
|
||||
Gauge::Gauge(FamilyType* family, const prometheus::Labels& labels, prometheus::CollectCallbackPtr callback) noexcept
|
||||
: handle(family->Add(labels)), labels(labels) {
|
||||
if ( callback ) {
|
||||
handle.AddCollectCallback(std::move(callback));
|
||||
has_callback = true;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(Span<const LabelView> labels, prometheus::CollectCallbackPtr callback) {
|
||||
std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback) {
|
||||
prometheus::Labels p_labels = detail::BuildPrometheusLabels(labels);
|
||||
|
||||
auto check = [&](const std::shared_ptr<Gauge>& gauge) { return gauge->CompareLabels(p_labels); };
|
||||
|
@ -36,6 +26,13 @@ std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(Span<const LabelView> labels, prome
|
|||
}
|
||||
|
||||
std::shared_ptr<Gauge> GaugeFamily::GetOrAdd(std::initializer_list<LabelView> labels,
|
||||
prometheus::CollectCallbackPtr callback) {
|
||||
detail::CollectCallbackPtr callback) {
|
||||
return GetOrAdd(Span{labels.begin(), labels.size()}, std::move(callback));
|
||||
}
|
||||
|
||||
void GaugeFamily::RunCallbacks() {
|
||||
for ( const auto& g : gauges ) {
|
||||
if ( g->HasCallback() )
|
||||
g->Set(g->RunCallback());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
#include <prometheus/family.h>
|
||||
#include <prometheus/gauge.h>
|
||||
#include <cstdint>
|
||||
#include <unistd.h>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
|
||||
|
@ -15,6 +15,10 @@
|
|||
|
||||
namespace zeek::telemetry {
|
||||
|
||||
namespace detail {
|
||||
using CollectCallbackPtr = std::function<double()>;
|
||||
}
|
||||
|
||||
/**
|
||||
* A handle to a metric that can count up and down.
|
||||
*/
|
||||
|
@ -26,7 +30,7 @@ public:
|
|||
using FamilyType = prometheus::Family<Handle>;
|
||||
|
||||
explicit Gauge(FamilyType* family, const prometheus::Labels& labels,
|
||||
prometheus::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
detail::CollectCallbackPtr callback = nullptr) noexcept;
|
||||
|
||||
/**
|
||||
* Increments the value by 1.
|
||||
|
@ -57,6 +61,11 @@ public:
|
|||
*/
|
||||
void Dec(double amount) noexcept { handle.Decrement(amount); }
|
||||
|
||||
/**
|
||||
* Set the value by @p val.
|
||||
*/
|
||||
void Set(double val) noexcept { handle.Set(val); }
|
||||
|
||||
/**
|
||||
* Decrements the value by 1.
|
||||
* @return The new value.
|
||||
|
@ -73,10 +82,14 @@ public:
|
|||
|
||||
bool CompareLabels(const prometheus::Labels& lbls) const { return labels == lbls; }
|
||||
|
||||
bool HasCallback() const noexcept { return callback != nullptr; }
|
||||
double RunCallback() const { return callback(); }
|
||||
|
||||
private:
|
||||
FamilyType* family = nullptr;
|
||||
Handle& handle;
|
||||
prometheus::Labels labels;
|
||||
bool has_callback = false;
|
||||
detail::CollectCallbackPtr callback;
|
||||
};
|
||||
|
||||
using GaugePtr = std::shared_ptr<Gauge>;
|
||||
|
@ -89,18 +102,20 @@ public:
|
|||
* Returns the metrics handle for given labels, creating a new instance
|
||||
* lazily if necessary.
|
||||
*/
|
||||
GaugePtr GetOrAdd(Span<const LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
GaugePtr GetOrAdd(Span<const LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
/**
|
||||
* @copydoc GetOrAdd
|
||||
*/
|
||||
GaugePtr GetOrAdd(std::initializer_list<LabelView> labels, prometheus::CollectCallbackPtr callback = nullptr);
|
||||
GaugePtr GetOrAdd(std::initializer_list<LabelView> labels, detail::CollectCallbackPtr callback = nullptr);
|
||||
|
||||
zeek_int_t MetricType() const noexcept override { return BifEnum::Telemetry::MetricType::GAUGE; }
|
||||
|
||||
GaugeFamily(prometheus::Family<prometheus::Gauge>* family, Span<const std::string_view> labels)
|
||||
: MetricFamily(labels), family(family) {}
|
||||
|
||||
void RunCallbacks() override;
|
||||
|
||||
private:
|
||||
prometheus::Family<prometheus::Gauge>* family;
|
||||
std::vector<GaugePtr> gauges;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue