Compare commits

..

No commits in common. "master" and "v8.0.0-dev" have entirely different histories.

2492 changed files with 110757 additions and 436563 deletions

View file

@ -18,8 +18,6 @@ spicy_ssl_config: &SPICY_SSL_CONFIG --build-type=release --disable-broker-tests
asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --ccache --enable-werror
ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --ccache --enable-werror
tsan_sanitizer_config: &TSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=thread --enable-fuzzers --ccache --enable-werror
macos_config: &MACOS_CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror --with-krb5=/opt/homebrew/opt/krb5
clang_tidy_config: &CLANG_TIDY_CONFIG --build-type=debug --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror --enable-clang-tidy
resources_template: &RESOURCES_TEMPLATE
cpu: *CPUS
@ -35,7 +33,6 @@ macos_environment: &MACOS_ENVIRONMENT
ZEEK_CI_BTEST_JOBS: 12
# No permission to write to default location of /zeek
CIRRUS_WORKING_DIR: /tmp/zeek
ZEEK_CI_CONFIGURE_FLAGS: *MACOS_CONFIG
freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE
cpu: 8
@ -48,108 +45,48 @@ freebsd_environment: &FREEBSD_ENVIRONMENT
ZEEK_CI_CPUS: 8
ZEEK_CI_BTEST_JOBS: 8
only_if_pr_master_release: &ONLY_IF_PR_MASTER_RELEASE
builds_only_if_template: &BUILDS_ONLY_IF_TEMPLATE
# Rules for skipping builds:
# - Do not run builds for anything that's cron triggered
# - Don't do darwin builds on zeek-security repo because they use up a ton of compute credits.
# - Always build PRs, but not if they come from dependabot
# - Always build master and release/* builds from the main repo
only_if: >
( $CIRRUS_CRON == '' ) &&
( ( $CIRRUS_PR != '' && $CIRRUS_BRANCH !=~ 'dependabot/.*' ) ||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
( $CIRRUS_CRON != 'weekly' ) &&
( $CIRRUS_PR != '' ||
$CIRRUS_BRANCH == 'master' ||
$CIRRUS_BRANCH =~ 'release/.*'
(
$CIRRUS_BRANCH == 'master' ||
$CIRRUS_BRANCH =~ 'release/.*'
)
)
) )
only_if_pr_master_release_nightly: &ONLY_IF_PR_MASTER_RELEASE_NIGHTLY
skip_task_on_pr: &SKIP_TASK_ON_PR
# Skip this task on PRs if it does not have the fullci label,
# it continues to run for direct pushes to master/release.
skip: >
! ( $CIRRUS_PR == '' || $CIRRUS_PR_LABELS =~ '.*fullci.*' )
zam_skip_task_on_pr: &ZAM_SKIP_TASK_ON_PR
# Skip this task on PRs unless it has the `fullci` or `zamci` label
# or files in src/script_opt/** were modified.
# It continues to run for direct pushes to master/release, as
# CIRRUS_PR will be empty.
skip: >
! ( $CIRRUS_PR == '' || $CIRRUS_PR_LABELS =~ '.*fullci.*' || $CIRRUS_PR_LABELS =~ '.*zamci.*' || changesInclude('src/script_opt/**') )
benchmark_only_if_template: &BENCHMARK_ONLY_IF_TEMPLATE
# only_if condition for cron-triggered benchmarking tests.
# These currently do not run for release/.*
only_if: >
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
( $CIRRUS_CRON != 'weekly' ) &&
( $CIRRUS_PR != '' ||
$CIRRUS_BRANCH == 'master' ||
$CIRRUS_BRANCH =~ 'release/.*' ||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
)
)
only_if_pr_release_and_nightly: &ONLY_IF_PR_RELEASE_AND_NIGHTLY
only_if: >
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
( $CIRRUS_CRON != 'weekly' ) &&
( $CIRRUS_PR != '' ||
$CIRRUS_BRANCH =~ 'release/.*' ||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
)
)
only_if_pr_nightly: &ONLY_IF_PR_NIGHTLY
only_if: >
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
( $CIRRUS_CRON != 'weekly' ) &&
( $CIRRUS_PR != '' ||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
)
)
only_if_release_tag_nightly: &ONLY_IF_RELEASE_TAG_NIGHTLY
only_if: >
( ( $CIRRUS_REPO_NAME == 'zeek' ) &&
( $CIRRUS_CRON != 'weekly' ) &&
( ( $CIRRUS_BRANCH =~ 'release/.*' && $CIRRUS_TAG =~ 'v[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$' ) ||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
)
)
only_if_nightly: &ONLY_IF_NIGHTLY
only_if: >
( ( $CIRRUS_REPO_NAME == 'zeek' ) &&
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
)
only_if_weekly: &ONLY_IF_WEEKLY
only_if: >
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
( $CIRRUS_CRON == 'weekly' && $CIRRUS_BRANCH == 'master' )
)
skip_if_pr_skip_all: &SKIP_IF_PR_SKIP_ALL
skip: >
( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
skip_if_pr_not_full_ci: &SKIP_IF_PR_NOT_FULL_CI
skip: >
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: Full.*") ||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
)
skip_if_pr_not_full_or_benchmark: &SKIP_IF_PR_NOT_FULL_OR_BENCHMARK
skip: >
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Benchmark).*" ) ||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
)
skip_if_pr_not_full_or_cluster_test: &SKIP_IF_PR_NOT_FULL_OR_CLUSTER_TEST
skip: >
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Cluster Test).*" ) ||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
)
skip_if_pr_not_full_or_zam: &SKIP_IF_PR_NOT_FULL_OR_ZAM
skip: >
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|ZAM).*" ) ||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
)
skip_if_pr_not_full_or_zeekctl: &SKIP_IF_PR_NOT_FULL_OR_ZEEKCTL
skip: >
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Zeekctl).*" ) ||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
)
skip_if_pr_not_full_or_windows: &SKIP_IF_PR_NOT_FULL_OR_WINDOWS
skip: >
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Windows).*" ) ||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
)
( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
( $CIRRUS_CRON == 'benchmark-nightly' ||
$CIRRUS_PR_LABELS =~ '.*fullci.*' ||
$CIRRUS_PR_LABELS =~ '.*benchmark.*' )
ci_template: &CI_TEMPLATE
<< : *BUILDS_ONLY_IF_TEMPLATE
# Default timeout is 60 minutes, Cirrus hard limit is 120 minutes for free
# tasks, so may as well ask for full time.
timeout_in: 120m
@ -193,7 +130,6 @@ ci_template: &CI_TEMPLATE
env:
CIRRUS_WORKING_DIR: /zeek
CIRRUS_LOG_TIMESTAMP: true
ZEEK_CI_CPUS: *CPUS
ZEEK_CI_BTEST_JOBS: *BTEST_JOBS
ZEEK_CI_BTEST_RETRIES: *BTEST_RETRIES
@ -238,88 +174,27 @@ env:
# Linux EOL timelines: https://linuxlifecycle.com/
# Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle
fedora42_task:
container:
# Fedora 42 EOL: Around May 2026
dockerfile: ci/fedora-42/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_SKIP_ALL
env:
ZEEK_CI_CONFIGURE_FLAGS: *BINARY_CONFIG
fedora41_task:
container:
# Fedora 41 EOL: Around Nov 2025
dockerfile: ci/fedora-41/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
fedora40_task:
container:
# Fedora 40 EOL: Around May 2025
dockerfile: ci/fedora-40/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
centosstream9_task:
container:
# Stream 9 EOL: 31 May 2027
# Stream 9 EOL: Around Dec 2027
dockerfile: ci/centos-stream-9/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
centosstream10_task:
container:
# Stream 10 EOL: 01 January 2030
dockerfile: ci/centos-stream-10/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
debian13_task:
container:
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
arm_debian13_task:
arm_container:
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_SKIP_ALL
debian13_static_task:
container:
# Just use a recent/common distro to run a static compile test.
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
env:
ZEEK_CI_CONFIGURE_FLAGS: *STATIC_CONFIG
debian13_binary_task:
container:
# Just use a recent/common distro to run binary mode compile test.
# As of 2024-03, the used configure flags are equivalent to the flags
# that we use to create binary packages.
# Just use a recent/common distro to run a static compile test.
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
env:
ZEEK_CI_CONFIGURE_FLAGS: *BINARY_CONFIG
debian12_task:
container:
@ -327,8 +202,48 @@ debian12_task:
dockerfile: ci/debian-12/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
arm_debian12_task:
arm_container:
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
env:
ZEEK_CI_CONFIGURE_FLAGS: *NO_SPICY_CONFIG
debian12_static_task:
container:
# Just use a recent/common distro to run a static compile test.
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
env:
ZEEK_CI_CONFIGURE_FLAGS: *STATIC_CONFIG
debian12_binary_task:
container:
# Just use a recent/common distro to run binary mode compile test.
# As of 2024-03, the used configure flags are equivalent to the flags
# that we use to create binary packages.
# Just use a recent/common distro to run a static compile test.
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
env:
ZEEK_CI_CONFIGURE_FLAGS: *BINARY_CONFIG
debian11_task:
container:
# Debian 11 EOL: June 2026
dockerfile: ci/debian-11/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
opensuse_leap_15_6_task:
container:
@ -336,8 +251,6 @@ opensuse_leap_15_6_task:
dockerfile: ci/opensuse-leap-15.6/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
opensuse_tumbleweed_task:
container:
@ -346,63 +259,30 @@ opensuse_tumbleweed_task:
<< : *RESOURCES_TEMPLATE
prepare_script: ./ci/opensuse-tumbleweed/prepare.sh
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
# << : *SKIP_TASK_ON_PR
weekly_current_gcc_task:
ubuntu24_10_task:
container:
# Opensuse Tumbleweed has no EOL
dockerfile: ci/opensuse-tumbleweed/Dockerfile
<< : *RESOURCES_TEMPLATE
prepare_script: ./ci/opensuse-tumbleweed/prepare-weekly.sh
<< : *CI_TEMPLATE
<< : *ONLY_IF_WEEKLY
env:
ZEEK_CI_COMPILER: gcc
weekly_current_clang_task:
container:
# Opensuse Tumbleweed has no EOL
dockerfile: ci/opensuse-tumbleweed/Dockerfile
<< : *RESOURCES_TEMPLATE
prepare_script: ./ci/opensuse-tumbleweed/prepare-weekly.sh
<< : *CI_TEMPLATE
<< : *ONLY_IF_WEEKLY
env:
ZEEK_CI_COMPILER: clang
ubuntu25_04_task:
container:
# Ubuntu 25.04 EOL: 2026-01-31
dockerfile: ci/ubuntu-25.04/Dockerfile
# Ubuntu 24.10 EOL: 2025-07-30
dockerfile: ci/ubuntu-24.10/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
<< : *SKIP_TASK_ON_PR
ubuntu24_04_task:
ubuntu24_task:
container:
# Ubuntu 24.04 EOL: Jun 2029
dockerfile: ci/ubuntu-24.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_SKIP_ALL
env:
ZEEK_CI_CREATE_ARTIFACT: 1
upload_binary_artifacts:
path: build.tgz
benchmark_script: ./ci/benchmark.sh
# Same as above, but running the ZAM tests instead of the regular tests.
ubuntu24_04_zam_task:
ubuntu24_zam_task:
container:
# Ubuntu 24.04 EOL: Jun 2029
dockerfile: ci/ubuntu-24.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_OR_ZAM
env:
ZEEK_CI_SKIP_UNIT_TESTS: 1
ZEEK_CI_SKIP_EXTERNAL_BTESTS: 1
@ -411,41 +291,44 @@ ubuntu24_04_zam_task:
ZEEK_CI_BTEST_JOBS: 3
# Same as above, but using Clang and libc++
ubuntu24_04_clang_libcpp_task:
ubuntu24_clang_libcpp_task:
container:
# Ubuntu 24.04 EOL: Jun 2029
dockerfile: ci/ubuntu-24.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
<< : *SKIP_TASK_ON_PR
env:
CC: clang-19
CXX: clang++-19
CC: clang-18
CXX: clang++-18
CXXFLAGS: -stdlib=libc++
ubuntu24_04_clang_tidy_task:
ubuntu22_task:
container:
# Ubuntu 24.04 EOL: Jun 2029
dockerfile: ci/ubuntu-24.04/Dockerfile
# Ubuntu 22.04 EOL: June 2027
dockerfile: ci/ubuntu-22.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
env:
CC: clang-19
CXX: clang++-19
ZEEK_CI_CONFIGURE_FLAGS: *CLANG_TIDY_CONFIG
ZEEK_CI_CREATE_ARTIFACT: 1
upload_binary_artifacts:
path: build.tgz
benchmark_script: ./ci/benchmark.sh
# Run on PRs, merges to master and release/.* and benchmark-nightly cron.
only_if: >
( $CIRRUS_PR != '' && $CIRRUS_BRANCH !=~ 'dependabot/.*' ) ||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
$CIRRUS_BRANCH == 'master' ||
$CIRRUS_BRANCH =~ 'release/.*' ||
$CIRRUS_CRON == 'benchmark-nightly' )
# Also enable Spicy SSL for this
ubuntu24_04_spicy_task:
ubuntu22_spicy_task:
container:
# Ubuntu 24.04 EOL: Jun 2029
dockerfile: ci/ubuntu-24.04/Dockerfile
# Ubuntu 22.04 EOL: April 2027
dockerfile: ci/ubuntu-22.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_OR_BENCHMARK
env:
ZEEK_CI_CREATE_ARTIFACT: 1
ZEEK_CI_CONFIGURE_FLAGS: *SPICY_SSL_CONFIG
@ -453,33 +336,26 @@ ubuntu24_04_spicy_task:
upload_binary_artifacts:
path: build.tgz
benchmark_script: ./ci/benchmark.sh
<< : *BENCHMARK_ONLY_IF_TEMPLATE
ubuntu24_04_spicy_head_task:
ubuntu22_spicy_head_task:
container:
# Ubuntu 24.04 EOL: Jun 2029
dockerfile: ci/ubuntu-24.04/Dockerfile
# Ubuntu 22.04 EOL: April 2027
dockerfile: ci/ubuntu-22.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE_NIGHTLY
<< : *SKIP_IF_PR_NOT_FULL_OR_BENCHMARK
env:
ZEEK_CI_CREATE_ARTIFACT: 1
ZEEK_CI_CONFIGURE_FLAGS: *SPICY_SSL_CONFIG
# Pull auxil/spicy to the latest head version. May or may not build.
ZEEK_CI_PREBUILD_COMMAND: 'cd auxil/spicy && git fetch && git reset --hard origin/main && git submodule update --init --recursive'
spicy_install_analyzers_script: ./ci/spicy-install-analyzers.sh
upload_binary_artifacts:
path: build.tgz
benchmark_script: ./ci/benchmark.sh
ubuntu22_04_task:
container:
# Ubuntu 22.04 EOL: June 2027
dockerfile: ci/ubuntu-22.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
# Don't run this job on release branches. It tests against spicy HEAD, which
# will frequently require other fixes that won't be in a release branch.
skip: $CIRRUS_BRANCH =~ 'release/.*'
<< : *BENCHMARK_ONLY_IF_TEMPLATE
alpine_task:
container:
@ -489,8 +365,6 @@ alpine_task:
dockerfile: ci/alpine/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
# Cirrus only supports the following macos runner currently, selecting
# anything else automatically upgrades to this one.
@ -503,8 +377,6 @@ macos_sequoia_task:
image: ghcr.io/cirruslabs/macos-runner:sequoia
prepare_script: ./ci/macos/prepare.sh
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_SKIP_ALL
<< : *MACOS_ENVIRONMENT
# FreeBSD EOL timelines: https://www.freebsd.org/security/#sup
@ -516,8 +388,6 @@ freebsd14_task:
prepare_script: ./ci/freebsd/prepare.sh
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_SKIP_ALL
<< : *FREEBSD_ENVIRONMENT
freebsd13_task:
@ -528,8 +398,7 @@ freebsd13_task:
prepare_script: ./ci/freebsd/prepare.sh
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
<< : *SKIP_TASK_ON_PR
<< : *FREEBSD_ENVIRONMENT
asan_sanitizer_task:
@ -539,8 +408,6 @@ asan_sanitizer_task:
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_SKIP_ALL
test_fuzzers_script: ./ci/test-fuzzers.sh
coverage_script: ./ci/upload-coverage.sh
env:
@ -557,8 +424,6 @@ asan_sanitizer_zam_task:
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_NIGHTLY
<< : *SKIP_IF_PR_NOT_FULL_OR_ZAM
env:
ZEEK_CI_CONFIGURE_FLAGS: *ASAN_SANITIZER_CONFIG
ASAN_OPTIONS: detect_leaks=1:detect_odr_violation=0
@ -567,6 +432,7 @@ asan_sanitizer_zam_task:
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
# Use a lower number of jobs due to OOM issues with ZAM tasks
ZEEK_CI_BTEST_JOBS: 3
<< : *ZAM_SKIP_TASK_ON_PR
ubsan_sanitizer_task:
container:
@ -575,12 +441,11 @@ ubsan_sanitizer_task:
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_NIGHTLY
<< : *SKIP_IF_PR_NOT_FULL_CI
<< : *SKIP_TASK_ON_PR
test_fuzzers_script: ./ci/test-fuzzers.sh
env:
CC: clang-19
CXX: clang++-19
CC: clang-18
CXX: clang++-18
CXXFLAGS: -DZEEK_DICT_DEBUG
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
ZEEK_TAILORED_UB_CHECKS: 1
@ -592,11 +457,9 @@ ubsan_sanitizer_zam_task:
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_NIGHTLY
<< : *SKIP_IF_PR_NOT_FULL_OR_ZAM
env:
CC: clang-19
CXX: clang++-19
CC: clang-18
CXX: clang++-18
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
ZEEK_TAILORED_UB_CHECKS: 1
UBSAN_OPTIONS: print_stacktrace=1
@ -605,6 +468,7 @@ ubsan_sanitizer_zam_task:
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
# Use a lower number of jobs due to OOM issues with ZAM tasks
ZEEK_CI_BTEST_JOBS: 3
<< : *ZAM_SKIP_TASK_ON_PR
tsan_sanitizer_task:
container:
@ -613,11 +477,10 @@ tsan_sanitizer_task:
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *ONLY_IF_PR_NIGHTLY
<< : *SKIP_IF_PR_NOT_FULL_CI
<< : *SKIP_TASK_ON_PR
env:
CC: clang-19
CXX: clang++-19
CC: clang-18
CXX: clang++-18
ZEEK_CI_CONFIGURE_FLAGS: *TSAN_SANITIZER_CONFIG
ZEEK_CI_DISABLE_SCRIPT_PROFILING: 1
# If this is defined directly in the environment, configure fails to find
@ -638,12 +501,11 @@ windows_task:
prepare_script: ci/windows/prepare.cmd
build_script: ci/windows/build.cmd
test_script: ci/windows/test.cmd
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_OR_WINDOWS
env:
ZEEK_CI_CPUS: 8
# Give verbose error output on a test failure.
CTEST_OUTPUT_ON_FAILURE: 1
<< : *BUILDS_ONLY_IF_TEMPLATE
# Container images
@ -724,18 +586,22 @@ arm64_container_image_docker_builder:
env:
CIRRUS_ARCH: arm64
<< : *DOCKER_BUILD_TEMPLATE
<< : *ONLY_IF_RELEASE_TAG_NIGHTLY
<< : *SKIP_TASK_ON_PR
amd64_container_image_docker_builder:
env:
CIRRUS_ARCH: amd64
<< : *DOCKER_BUILD_TEMPLATE
<< : *ONLY_IF_PR_MASTER_RELEASE_NIGHTLY
<< : *SKIP_IF_PR_NOT_FULL_OR_CLUSTER_TEST
<< : *SKIP_TASK_ON_PR
container_image_manifest_docker_builder:
cpu: 1
<< : *ONLY_IF_RELEASE_TAG_NIGHTLY
# Push master builds to zeek/zeek-dev, or tagged release branches to zeek/zeek
only_if: >
( $CIRRUS_CRON == '' ) &&
( $CIRRUS_REPO_FULL_NAME == 'zeek/zeek' &&
( $CIRRUS_BRANCH == 'master' ||
$CIRRUS_TAG =~ 'v[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$' ) )
env:
DOCKER_USERNAME: ENCRYPTED[!505b3dee552a395730a7e79e6aab280ffbe1b84ec62ae7616774dfefe104e34f896d2e20ce3ad701f338987c13c33533!]
DOCKER_PASSWORD: ENCRYPTED[!6c4b2f6f0e5379ef1091719cc5d2d74c90cfd2665ac786942033d6d924597ffb95dbbc1df45a30cc9ddeec76c07ac620!]
@ -814,7 +680,8 @@ container_image_manifest_docker_builder:
# images from the public ECR repository to stay within free-tier bounds.
public_ecr_cleanup_docker_builder:
cpu: 1
<< : *ONLY_IF_NIGHTLY
only_if: >
$CIRRUS_CRON == '' && $CIRRUS_REPO_FULL_NAME == 'zeek/zeek' && $CIRRUS_BRANCH == 'master'
env:
AWS_ACCESS_KEY_ID: ENCRYPTED[!eff52f6442e1bc78bce5b15a23546344df41bf519f6201924cb70c7af12db23f442c0e5f2b3687c2d856ceb11fcb8c49!]
AWS_SECRET_ACCESS_KEY: ENCRYPTED[!748bc302dd196140a5fa8e89c9efd148882dc846d4e723787d2de152eb136fa98e8dea7e6d2d6779d94f72dd3c088228!]
@ -854,23 +721,27 @@ cluster_testing_docker_builder:
path: "testing/external/zeek-testing-cluster/.tmp/**"
depends_on:
- amd64_container_image
<< : *ONLY_IF_PR_RELEASE_AND_NIGHTLY
<< : *SKIP_IF_PR_NOT_FULL_OR_CLUSTER_TEST
<< : *SKIP_TASK_ON_PR
# Test zeekctl upon master and release pushes and also when
# a PR has a "CI: Zeekctl" or "CI: Full" label.
# a PR has a zeekctlci or fullci label.
#
# Also triggers on CIRRUS_CRON == 'zeekctl-nightly' if that is configured
# through the Cirrus Web UI.
zeekctl_debian12_task:
cpu: *CPUS
memory: *MEMORY
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_OR_ZEEKCTL
only_if: >
( $CIRRUS_CRON == 'zeekctl-nightly' ) ||
( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS =~ '.*(zeekctlci|fullci).*' ) ||
( $CIRRUS_REPO_NAME == 'zeek' && (
$CIRRUS_BRANCH == 'master' ||
$CIRRUS_BRANCH =~ 'release/.*' )
)
container:
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
<< : *RESOURCES_TEMPLATE
sync_submodules_script: git submodule update --recursive --init
always:
@ -893,19 +764,15 @@ include_plugins_debian12_task:
cpu: *CPUS
memory: *MEMORY
container:
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
<< : *RESOURCES_TEMPLATE
sync_submodules_script: git submodule update --recursive --init
fetch_external_plugins_script:
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-perf-support.git
- cd zeek-perf-support && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-more-hashes.git
- cd zeek-more-hashes && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-cluster-backend-nats.git
- cd zeek-cluster-backend-nats && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/SeisoLLC/zeek-kafka.git
- cd zeek-kafka && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
always:
ccache_cache:
folder: /tmp/ccache
@ -925,5 +792,5 @@ include_plugins_debian12_task:
on_failure:
upload_include_plugins_testing_artifacts:
path: "testing/builtin-plugins/.tmp/**"
<< : *ONLY_IF_PR_MASTER_RELEASE
<< : *SKIP_IF_PR_NOT_FULL_CI
<< : *BUILDS_ONLY_IF_TEMPLATE
<< : *SKIP_TASK_ON_PR

View file

@ -1,76 +1,5 @@
Checks: [-*,
Checks: '-*,
bugprone-*,
performance-*,
modernize-*,
readability-isolate-declaration,
readability-container-contains,
# Enable a very limited number of the cppcoreguidelines checkers.
# See the notes for some of the rest of them below.
cppcoreguidelines-macro-usage,
cppcoreguidelines-misleading-capture-default-by-value,
cppcoreguidelines-virtual-class-destructor,
# Skipping these temporarily because they are very noisy
-bugprone-forward-declaration-namespace,
-bugprone-narrowing-conversions,
-bugprone-unchecked-optional-access,
-performance-unnecessary-value-param,
-modernize-use-equals-default,
-modernize-use-integer-sign-comparison,
# The following cause either lots of pointless or advisory warnings
-bugprone-easily-swappable-parameters,
-bugprone-nondeterministic-pointer-iteration-order,
# bifcl generates a lot of code with double underscores in their name.
# ZAM uses a few identifiers that start with underscores or have
# double-underscores in the name.
-bugprone-reserved-identifier,
# bifcl generates almost every switch statement without a default case
# and so this one generates a lot of warnings.
-bugprone-switch-missing-default-case,
# These report warnings that are rather difficult to fix or are things
# we simply don't want to fix.
-bugprone-undefined-memory-manipulation,
-bugprone-pointer-arithmetic-on-polymorphic-object,
-bugprone-empty-catch,
-bugprone-exception-escape,
-bugprone-suspicious-include,
-modernize-avoid-c-arrays,
-modernize-concat-nested-namespaces,
-modernize-raw-string-literal,
-modernize-use-auto,
-modernize-use-nodiscard,
-modernize-use-trailing-return-type,
-modernize-use-designated-initializers,
# This one returns a bunch of findings in DFA and the sqlite library.
# We're unlikely to fix either of them.
-performance-no-int-to-ptr,
# These cppcoreguidelines checkers are things we should investigate
# and possibly fix, but there are so many findings that we're holding
# off doing it for now.
#cppcoreguidelines-init-variables,
#cppcoreguidelines-prefer-member-initializer,
#cppcoreguidelines-pro-type-member-init,
#cppcoreguidelines-pro-type-cstyle-cast,
#cppcoreguidelines-pro-type-static-cast-downcast,
#cppcoreguidelines-special-member-functions,
# These are features in newer version of C++ that we don't have
# access to yet.
-modernize-use-std-format,
-modernize-use-std-print,
]
HeaderFilterRegex: '.h'
ExcludeHeaderFilterRegex: '.*(auxil|3rdparty)/.*'
SystemHeaders: false
CheckOptions:
- key: modernize-use-default-member-init.UseAssignment
value: 'true'
WarningsAsErrors: '*'
clang-analyzer-*,
performance-*'

View file

@ -33,6 +33,3 @@ f5a76c1aedc7f8886bc6abef0dfaa8065684b1f6
# clang-format: Format JSON with clang-format
e6256446ddef5c5d5240eefff974556f2e12ac46
# analyzer/protocol: Reformat with spicy-format
d70bcd07b9b26036b16092fe950eca40e2f5a032

View file

@ -16,7 +16,7 @@ jobs:
generate:
permissions:
contents: write # for Git to git push
if: "github.repository == 'zeek/zeek' && contains(github.event.pull_request.labels.*.name, 'CI: Skip All') == false"
if: github.repository == 'zeek/zeek'
runs-on: ubuntu-24.04
steps:
@ -66,14 +66,16 @@ jobs:
make \
python3 \
python3-dev \
python3-pip \
python3-pip\
sqlite3 \
swig \
zlib1g-dev
python3 -m venv ci-docs-venv
source ci-docs-venv/bin/activate
pip3 install -r doc/requirements.txt
pip3 install pre-commit
# Many distros adhere to PEP 394's recommendation for `python` =
# `python2` so this is a simple workaround until we drop Python 2
# support and explicitly use `python3` for all invocations.
sudo ln -sf /usr/bin/python3 /usr/local/bin/python
sudo pip3 install --break-system-packages -r doc/requirements.txt
sudo pip3 install --break-system-packages pre-commit
- name: ccache
uses: hendrikmuhs/ccache-action@v1.2
@ -110,7 +112,6 @@ jobs:
- name: Generate Docs
run: |
source ci-docs-venv/bin/activate
git config --global user.name zeek-bot
git config --global user.email info@zeek.org

20
.gitmodules vendored
View file

@ -1,6 +1,9 @@
[submodule "auxil/zeek-aux"]
path = auxil/zeek-aux
url = https://github.com/zeek/zeek-aux
[submodule "auxil/binpac"]
path = auxil/binpac
url = https://github.com/zeek/binpac
[submodule "auxil/zeekctl"]
path = auxil/zeekctl
url = https://github.com/zeek/zeekctl
@ -10,12 +13,18 @@
[submodule "cmake"]
path = cmake
url = https://github.com/zeek/cmake
[submodule "src/3rdparty"]
path = src/3rdparty
url = https://github.com/zeek/zeek-3rdparty
[submodule "auxil/broker"]
path = auxil/broker
url = https://github.com/zeek/broker
[submodule "auxil/netcontrol-connectors"]
path = auxil/netcontrol-connectors
url = https://github.com/zeek/zeek-netcontrol
[submodule "auxil/bifcl"]
path = auxil/bifcl
url = https://github.com/zeek/bifcl
[submodule "doc"]
path = doc
url = https://github.com/zeek/zeek-docs
@ -37,6 +46,9 @@
[submodule "auxil/zeek-client"]
path = auxil/zeek-client
url = https://github.com/zeek/zeek-client
[submodule "auxil/gen-zam"]
path = auxil/gen-zam
url = https://github.com/zeek/gen-zam
[submodule "auxil/c-ares"]
path = auxil/c-ares
url = https://github.com/c-ares/c-ares
@ -46,6 +58,12 @@
[submodule "auxil/spicy"]
path = auxil/spicy
url = https://github.com/zeek/spicy
[submodule "auxil/filesystem"]
path = auxil/filesystem
url = https://github.com/gulrak/filesystem.git
[submodule "auxil/zeek-af_packet-plugin"]
path = auxil/zeek-af_packet-plugin
url = https://github.com/zeek/zeek-af_packet-plugin.git
[submodule "auxil/libunistd"]
path = auxil/libunistd
url = https://github.com/zeek/libunistd
@ -63,7 +81,7 @@
url = https://github.com/zeromq/cppzmq
[submodule "src/cluster/websocket/auxil/IXWebSocket"]
path = src/cluster/websocket/auxil/IXWebSocket
url = https://github.com/machinezone/IXWebSocket
url = https://github.com/zeek/IXWebSocket.git
[submodule "auxil/expected-lite"]
path = auxil/expected-lite
url = https://github.com/martinmoene/expected-lite.git

View file

@ -10,7 +10,7 @@ repos:
language: python
files: '\.(h|c|cpp|cc|spicy|evt)$'
types: [file]
exclude: '^(testing/btest/(Baseline|plugins|spicy|scripts)/.*|testing/builtin-plugins/.*|src/3rdparty/.*)$'
exclude: '^(testing/btest/(Baseline|plugins|spicy|scripts)/.*|testing/builtin-plugins/.*)$'
- id: btest-command-commented
name: Check that all BTest command lines are commented out
@ -19,26 +19,25 @@ repos:
files: '^testing/btest/.*$'
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v20.1.8
rev: v20.1.0
hooks:
- id: clang-format
types_or:
- "c"
- "c++"
- "json"
exclude: '^src/3rdparty/.*'
- repo: https://github.com/maxwinterstein/shfmt-py
rev: v3.12.0.1
rev: v3.11.0.2
hooks:
- id: shfmt
args: ["-w", "-i", "4", "-ci"]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.12.8
rev: v0.11.4
hooks:
- id: ruff-check
args: ["--fix"]
- id: ruff
args: [--fix]
- id: ruff-format
- repo: https://github.com/cheshirekow/cmake-format-precommit
@ -47,13 +46,14 @@ repos:
- id: cmake-format
- repo: https://github.com/crate-ci/typos
rev: v1.35.3
rev: v1.30.1
hooks:
- id: typos
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES|scripts/base/protocols/ssl/mozilla-ca-list.zeek|src/3rdparty/.*)$'
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES|scripts/base/protocols/ssl/mozilla-ca-list.zeek)$'
- repo: https://github.com/bbannier/spicy-format
rev: v0.26.0
rev: v0.24.2
hooks:
- id: spicy-format
exclude: '^testing/.*'
# TODO: Reformat existing large analyzers just before 8.0.
exclude: '(^testing/.*)|(protocol/ldap/.*)|(protocol/quic/.*)|(protocol/websocket/.*)'

View file

@ -30,15 +30,12 @@ extend-ignore-re = [
"ot->Yield\\(\\)->InternalType\\(\\)",
"switch \\( ot \\)",
"\\(ZAMOpType ot\\)",
"exat", # Redis expire at
"EXAT",
# News stuff
"SupressWeirds.*deprecated",
"\"BaR\"",
"\"xFoObar\"",
"\"FoO\"",
"Smoot",
]
extend-ignore-identifiers-re = [
@ -56,7 +53,6 @@ extend-ignore-identifiers-re = [
"complte_flag", # Existing use in exported record in base.
"VidP(n|N)", # In SMB.
"iin", # In DNP3.
"SCN[dioux]", # sccanf fixed-width identifiers
"(ScValidatePnPService|ScSendPnPMessage)", # In DCE-RPC.
"snet", # Used as shorthand for subnet in base scripts.
"typ",
@ -84,9 +80,6 @@ have_2nd = "have_2nd"
ot1 = "ot1"
ot2 = "ot2"
uses_seh = "uses_seh"
ect0 = "ect0"
ect1 = "ect1"
tpe = "tpe"
[default.extend-words]
caf = "caf"

2741
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -68,8 +68,7 @@ option(INSTALL_ZEEKCTL "Install zeekctl." ${ZEEK_INSTALL_TOOLS_DEFAULT})
option(INSTALL_ZEEK_CLIENT "Install the zeek-client." ${ZEEK_INSTALL_TOOLS_DEFAULT})
option(INSTALL_ZKG "Install zkg." ${ZEEK_INSTALL_TOOLS_DEFAULT})
option(PREALLOCATE_PORT_ARRAY "Pre-allocate all ports for zeek::Val." ON)
option(ZEEK_STANDALONE "Build Zeek as stand-alone binary." ON)
option(ZEEK_ENABLE_FUZZERS "Build Zeek fuzzing targets." OFF)
option(ZEEK_STANDALONE "Build Zeek as stand-alone binary?" ON)
# Non-boolean options.
if (NOT WIN32)
@ -90,6 +89,8 @@ set(ZEEK_ETC_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/etc"
set(CMAKE_EXPORT_COMPILE_COMMANDS ON CACHE INTERNAL
"Whether to write a JSON compile commands database")
set(ZEEK_CXX_STD cxx_std_17 CACHE STRING "The C++ standard to use.")
set(ZEEK_SANITIZERS "" CACHE STRING "Sanitizers to use when building.")
set(CPACK_SOURCE_IGNORE_FILES "" CACHE STRING "Files to be ignored by CPack")
@ -192,36 +193,18 @@ if (MSVC)
# TODO: This is disabled for now because there a bunch of known
# compiler warnings on Windows that we don't have good fixes for.
#set(WERROR_FLAG "/WX")
#set(WNOERROR_FLAG "/WX:NO")
#set(WERROR_FLAG "/WX")
endif ()
# Always build binpac in static mode if building on Windows
set(BUILD_STATIC_BINPAC true)
else ()
include(GNUInstallDirs)
if (BUILD_WITH_WERROR)
set(WERROR_FLAG "-Werror")
set(WNOERROR_FLAG "-Wno-error")
# With versions >=13.0 GCC gained `-Warray-bounds` which reports false
# positives, see e.g., https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111273.
if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13.0)
list(APPEND WERROR_FLAG "-Wno-error=array-bounds")
endif ()
# With versions >=11.0 GCC is returning false positives for -Wrestrict. See
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100366. It's more prevalent
# building with -std=c++20.
if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11.0)
list(APPEND WERROR_FLAG "-Wno-error=restrict")
endif ()
endif ()
endif ()
include(cmake/CommonCMakeConfig.cmake)
include(cmake/FindClangTidy.cmake)
include(cmake/CheckCompilerArch.cmake)
include(cmake/RequireCXXStd.cmake)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
@ -250,7 +233,7 @@ set(ZEEK_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
# zeek-plugin-create-package.sh. Needed by ZeekPluginConfig.cmake.in.
set(ZEEK_PLUGIN_SCRIPTS_PATH "${PROJECT_SOURCE_DIR}/cmake")
# Our C++ base target for propagating compiler and linker flags. Note: for
# Our C++17 base target for propagating compiler and linker flags. Note: for
# now, we only use it for passing library dependencies around.
add_library(zeek_internal INTERFACE)
add_library(Zeek::Internal ALIAS zeek_internal)
@ -369,7 +352,7 @@ endfunction ()
find_package(Threads REQUIRED)
# Interface library for propagating extra flags and include paths to dynamically
# loaded plugins. Also propagates include paths and c++ standard mode on the install
# loaded plugins. Also propagates include paths and C++17 mode on the install
# interface.
add_library(zeek_dynamic_plugin_base INTERFACE)
target_include_directories(
@ -396,14 +379,14 @@ endfunction ()
add_zeek_dynamic_plugin_build_interface_include_directories(
${PROJECT_SOURCE_DIR}/src/include
${PROJECT_SOURCE_DIR}/tools/binpac/lib
${PROJECT_SOURCE_DIR}/auxil/binpac/lib
${PROJECT_SOURCE_DIR}/auxil/broker/libbroker
${PROJECT_SOURCE_DIR}/auxil/paraglob/include
${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include
${PROJECT_SOURCE_DIR}/auxil/expected-lite/include
${CMAKE_BINARY_DIR}/src
${CMAKE_BINARY_DIR}/src/include
${CMAKE_BINARY_DIR}/tools/binpac/lib
${CMAKE_BINARY_DIR}/auxil/binpac/lib
${CMAKE_BINARY_DIR}/auxil/broker/libbroker
${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include)
@ -666,7 +649,6 @@ if (ENABLE_DEBUG)
set(VERSION_C_IDENT "${VERSION_C_IDENT}_debug")
target_compile_definitions(zeek_internal INTERFACE DEBUG)
target_compile_definitions(zeek_dynamic_plugin_base INTERFACE DEBUG)
set(SPICYZ_FLAGS "-d" CACHE STRING "Additional flags to pass to spicyz for builtin analyzers")
endif ()
if (NOT BINARY_PACKAGING_MODE)
@ -883,35 +865,46 @@ endif ()
set(PY_MOD_INSTALL_DIR ${py_mod_install_dir} CACHE STRING "Installation path for Python modules"
FORCE)
# BinPAC uses the same 'ENABLE_STATIC_ONLY' variable to define whether
# to build statically. Save a local copy so it can be set based on the
# configure flag before we add the subdirectory.
set(ENABLE_STATIC_ONLY_SAVED ${ENABLE_STATIC_ONLY})
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/auxil/binpac/CMakeLists.txt)
if (BUILD_STATIC_BINPAC)
set(ENABLE_STATIC_ONLY true)
set(ENABLE_STATIC_ONLY_SAVED ${ENABLE_STATIC_ONLY})
if (MSVC)
set(BUILD_STATIC_BINPAC true)
endif ()
if (BUILD_STATIC_BINPAC)
set(ENABLE_STATIC_ONLY true)
endif ()
add_subdirectory(auxil/binpac)
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
# FIXME: avoid hard-coding a path for multi-config generator support. See the
# TODO in ZeekPluginConfig.cmake.in.
set(BINPAC_EXE_PATH "${CMAKE_BINARY_DIR}/auxil/binpac/src/binpac${CMAKE_EXECUTABLE_SUFFIX}")
endif ()
add_subdirectory(tools/binpac)
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
# FIXME: avoid hard-coding a path for multi-config generator support. See the
# TODO in ZeekPluginConfig.cmake.in.
set(BINPAC_EXE_PATH "${CMAKE_BINARY_DIR}/tools/binpac/src/binpac${CMAKE_EXECUTABLE_SUFFIX}")
set(_binpac_exe_path "included")
# Need to call find_package so it sets up the include paths used by plugin builds.
find_package(BinPAC REQUIRED)
# Add an alias (used by our plugin setup).
add_executable(Zeek::BinPAC ALIAS binpac)
add_subdirectory(tools/bifcl)
add_executable(Zeek::BifCl ALIAS bifcl)
# FIXME: avoid hard-coding a path for multi-config generator support. See the
# TODO in ZeekPluginConfig.cmake.in.
set(BIFCL_EXE_PATH "${CMAKE_BINARY_DIR}/tools/bifcl/bifcl${CMAKE_EXECUTABLE_SUFFIX}")
set(_bifcl_exe_path "included")
if (NOT BIFCL_EXE_PATH)
add_subdirectory(auxil/bifcl)
add_executable(Zeek::BifCl ALIAS bifcl)
# FIXME: avoid hard-coding a path for multi-config generator support. See the
# TODO in ZeekPluginConfig.cmake.in.
set(BIFCL_EXE_PATH "${CMAKE_BINARY_DIR}/auxil/bifcl/bifcl${CMAKE_EXECUTABLE_SUFFIX}")
set(_bifcl_exe_path "included")
else ()
add_executable(Zeek::BifCl IMPORTED)
set_property(TARGET Zeek::BifCl PROPERTY IMPORTED_LOCATION "${BIFCL_EXE_PATH}")
set(_bifcl_exe_path "BIFCL_EXE_PATH")
endif ()
add_subdirectory(tools/gen-zam)
if (NOT GEN_ZAM_EXE_PATH)
add_subdirectory(auxil/gen-zam)
endif ()
if (ENABLE_JEMALLOC)
if (${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
@ -1016,7 +1009,6 @@ if (NOT DISABLE_SPICY)
set(Python3_EXECUTABLE ${Python_EXECUTABLE} CACHE STRING "Python3_EXECUTABLE hint")
endif ()
set(SPICY_ENABLE_TESTS OFF)
add_subdirectory(auxil/spicy)
include(ConfigureSpicyBuild) # set some options different for building Spicy
@ -1055,6 +1047,9 @@ include(BuiltInSpicyAnalyzer)
include_directories(BEFORE ${PCAP_INCLUDE_DIR} ${BIND_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR} ${JEMALLOC_INCLUDE_DIR})
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc
DESTINATION include/zeek/3rdparty/)
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/prometheus-cpp/core/include/prometheus
DESTINATION include/zeek/3rdparty/prometheus-cpp/include)
@ -1064,8 +1059,15 @@ install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxil/prometheus-cpp/core/include/
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/expected-lite/include/nonstd
DESTINATION include/zeek/3rdparty/)
# Create 3rdparty/ghc within the build directory so that the include for
# "zeek/3rdparty/ghc/filesystem.hpp" works within the build tree.
execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/")
execute_process(
COMMAND
"${CMAKE_COMMAND}" -E create_symlink
"${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc"
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/ghc")
# Do the same for nonstd.
execute_process(
@ -1080,16 +1082,18 @@ set(USE_GEOIP false)
find_package(LibMMDB)
if (LIBMMDB_FOUND)
set(USE_GEOIP true)
include_directories(BEFORE SYSTEM ${LibMMDB_INCLUDE_DIR})
include_directories(BEFORE ${LibMMDB_INCLUDE_DIR})
list(APPEND OPTLIBS ${LibMMDB_LIBRARY})
endif ()
set(USE_KRB5 false)
find_package(LibKrb5)
if (LIBKRB5_FOUND)
set(USE_KRB5 true)
include_directories(BEFORE SYSTEM ${LibKrb5_INCLUDE_DIR})
list(APPEND OPTLIBS ${LibKrb5_LIBRARY})
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
find_package(LibKrb5)
if (LIBKRB5_FOUND)
set(USE_KRB5 true)
include_directories(BEFORE ${LibKrb5_INCLUDE_DIR})
list(APPEND OPTLIBS ${LibKrb5_LIBRARY})
endif ()
endif ()
set(HAVE_PERFTOOLS false)
@ -1121,7 +1125,7 @@ endif ()
# dependencies which tend to be in standard system locations and thus cause the
# system OpenSSL headers to still be picked up even if one specifies
# --with-openssl (which may be common).
include_directories(BEFORE SYSTEM ${OPENSSL_INCLUDE_DIR})
include_directories(BEFORE ${OPENSSL_INCLUDE_DIR})
# Determine if libfts is external to libc, i.e. musl
find_package(FTS)
@ -1187,6 +1191,18 @@ endif ()
# Tell the plugin code that we're building as part of the main tree.
set(ZEEK_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)
set(ZEEK_HAVE_AF_PACKET no)
if (${CMAKE_SYSTEM_NAME} MATCHES Linux)
if (NOT DISABLE_AF_PACKET)
if (NOT AF_PACKET_PLUGIN_PATH)
set(AF_PACKET_PLUGIN_PATH ${CMAKE_SOURCE_DIR}/auxil/zeek-af_packet-plugin)
endif ()
list(APPEND ZEEK_INCLUDE_PLUGINS ${AF_PACKET_PLUGIN_PATH})
set(ZEEK_HAVE_AF_PACKET yes)
endif ()
endif ()
set(ZEEK_HAVE_JAVASCRIPT no)
if (NOT DISABLE_JAVASCRIPT)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/auxil/zeekjs/cmake)
@ -1206,7 +1222,6 @@ if (NOT DISABLE_JAVASCRIPT)
endif ()
endif ()
set(ZEEK_HAVE_AF_PACKET no CACHE INTERNAL "Zeek has AF_PACKET support")
set(ZEEK_HAVE_JAVASCRIPT ${ZEEK_HAVE_JAVASCRIPT} CACHE INTERNAL "Zeek has JavaScript support")
set(DEFAULT_ZEEKPATH_PATHS
@ -1225,7 +1240,11 @@ endif ()
include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink "." "${CMAKE_CURRENT_BINARY_DIR}/zeek")
set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${BinPAC_ROOT_DIR})
if (BinPAC_ROOT_DIR)
set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${BinPAC_ROOT_DIR})
else ()
set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${ZEEK_ROOT_DIR})
endif ()
if (BROKER_ROOT_DIR)
set(ZEEK_CONFIG_BROKER_ROOT_DIR ${BROKER_ROOT_DIR})
@ -1443,6 +1462,11 @@ else ()
set(_install_btest_tools_msg "no pcaps")
endif ()
set(_binpac_exe_path "included")
if (BINPAC_EXE_PATH)
set(_binpac_exe_path ${BINPAC_EXE_PATH})
endif ()
set(_gen_zam_exe_path "included")
if (GEN_ZAM_EXE_PATH)
set(_gen_zam_exe_path ${GEN_ZAM_EXE_PATH})
@ -1472,118 +1496,68 @@ if (ZEEK_LEGACY_ANALYZERS OR ZEEK_SKIPPED_ANALYZERS)
)
endif ()
set(_zeek_builtin_plugins "${ZEEK_BUILTIN_PLUGINS}")
if (NOT ZEEK_BUILTIN_PLUGINS)
set(_zeek_builtin_plugins "none")
endif ()
set(_zeek_fuzzing_engine "${ZEEK_FUZZING_ENGINE}")
if (NOT ZEEK_FUZZING_ENGINE)
if (ZEEK_ENABLE_FUZZERS)
# The default fuzzer used by gcc and clang is libFuzzer. This is if you
# simply pass '-fsanitize=fuzzer' to the compiler.
set(_zeek_fuzzing_engine "libFuzzer")
endif ()
endif ()
## Utility method for outputting status information for features that just have a
## string representation. This can also take an optional second argument that is a
## value string to print.
function (output_summary_line what)
if ("${ARGV1}" MATCHES "^$")
message("${what}:")
return()
endif ()
set(_spaces " ")
string(LENGTH ${what} _what_length)
math(EXPR _num_spaces "25 - ${_what_length}")
string(SUBSTRING ${_spaces} 0 ${_num_spaces} _spacing)
message("${what}:${_spacing}${ARGV1}")
endfunction ()
## Utility method for outputting status information for features that have an ON/OFF
## state.
function (output_summary_bool what state)
if (${state})
output_summary_line("${what}" "ON")
else ()
output_summary_line("${what}" "OFF")
endif ()
endfunction ()
message("\n====================| Zeek Build Summary |====================\n")
output_summary_line("Build type" "${CMAKE_BUILD_TYPE}")
output_summary_line("Build dir" "${PROJECT_BINARY_DIR}")
message("")
output_summary_line("Install prefix" "${CMAKE_INSTALL_PREFIX}")
output_summary_line("Config file dir" "${ZEEK_ETC_INSTALL_DIR}")
output_summary_line("Log dir" "${ZEEK_LOG_DIR}")
output_summary_line("Plugin dir" "${ZEEK_PLUGIN_DIR}")
output_summary_line("Python module dir" "${PY_MOD_INSTALL_DIR}")
output_summary_line("Script dir" "${ZEEK_SCRIPT_INSTALL_PATH}")
output_summary_line("Spool dir" "${ZEEK_SPOOL_DIR}")
output_summary_line("State dir" "${ZEEK_STATE_DIR}")
output_summary_line("Spicy modules dir" "${ZEEK_SPICY_MODULE_PATH}")
message("")
output_summary_bool("Debug mode" ${ENABLE_DEBUG})
output_summary_bool("Unit tests" ${ENABLE_ZEEK_UNIT_TESTS})
message("")
output_summary_line("Builtin Plugins" "${_zeek_builtin_plugins}")
message("")
output_summary_line("CC" "${CMAKE_C_COMPILER}")
output_summary_line("CFLAGS" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${BuildType}}")
output_summary_line("CXX" "${CMAKE_CXX_COMPILER}")
output_summary_line("CXXFLAGS" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}")
output_summary_line("CPP" "${CMAKE_CXX_COMPILER}")
message("")
output_summary_bool("AF_PACKET" ${ZEEK_HAVE_AF_PACKET})
output_summary_bool("Aux. Tools" ${INSTALL_AUX_TOOLS})
output_summary_bool("BTest" ${INSTALL_BTEST})
output_summary_line("BTest tooling" ${_install_btest_tools_msg})
output_summary_bool("JavaScript" ${ZEEK_HAVE_JAVASCRIPT})
output_summary_line("Spicy" ${_spicy})
output_summary_bool("Spicy analyzers" ${USE_SPICY_ANALYZERS})
output_summary_bool("zeek-client" ${INSTALL_ZEEK_CLIENT})
output_summary_bool("ZeekControl" ${INSTALL_ZEEKCTL})
output_summary_bool("zkg" ${INSTALL_ZKG})
message("")
output_summary_bool("libmaxminddb" ${USE_GEOIP})
output_summary_bool("Kerberos" ${USE_KRB5})
output_summary_bool("gperftools" ${HAVE_PERFTOOLS})
output_summary_bool(" - tcmalloc" ${USE_PERFTOOLS_TCMALLOC})
output_summary_bool(" - debugging" ${USE_PERFTOOLS_DEBUG})
output_summary_bool("jemalloc" ${ENABLE_JEMALLOC})
message("")
output_summary_line("Cluster backends")
output_summary_bool(" - Broker" ON)
output_summary_bool(" - ZeroMQ" ${ENABLE_CLUSTER_BACKEND_ZEROMQ})
message("")
output_summary_line("Storage backends")
output_summary_bool(" - SQLite" ON)
output_summary_bool(" - Redis" ${ENABLE_STORAGE_BACKEND_REDIS})
message("")
output_summary_bool("Fuzz Targets" ${ZEEK_ENABLE_FUZZERS})
output_summary_line("Fuzz Engine" "${_zeek_fuzzing_engine}")
message("")
output_summary_line("External Tools/Linters")
output_summary_bool(" - Include What You Use" ${ENABLE_IWYU})
output_summary_bool(" - Clang-Tidy" ${ENABLE_CLANG_TIDY})
if (${_analyzer_warning})
message("${_analyzer_warning}\n")
endif ()
message("\n================================================================")
message(
"\n====================| Zeek Build Summary |===================="
"\n"
"\nBuild type: ${CMAKE_BUILD_TYPE}"
"\nBuild dir: ${PROJECT_BINARY_DIR}"
"\n"
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
"\nConfig file dir: ${ZEEK_ETC_INSTALL_DIR}"
"\nLog dir: ${ZEEK_LOG_DIR}"
"\nPlugin dir: ${ZEEK_PLUGIN_DIR}"
"\nPython module dir: ${PY_MOD_INSTALL_DIR}"
"\nScript dir: ${ZEEK_SCRIPT_INSTALL_PATH}"
"\nSpool dir: ${ZEEK_SPOOL_DIR}"
"\nState dir: ${ZEEK_STATE_DIR}"
"\nSpicy modules dir: ${ZEEK_SPICY_MODULE_PATH}"
"\n"
"\nDebug mode: ${ENABLE_DEBUG}"
"\nUnit tests: ${ENABLE_ZEEK_UNIT_TESTS}"
"\nBuiltin Plugins: ${ZEEK_BUILTIN_PLUGINS}"
"\n"
"\nCC: ${CMAKE_C_COMPILER}"
"\nCFLAGS: ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${BuildType}}"
"\nCXX: ${CMAKE_CXX_COMPILER}"
"\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}"
"\nCPP: ${CMAKE_CXX_COMPILER}"
"\n"
"\nAF_PACKET: ${ZEEK_HAVE_AF_PACKET}"
"\nAux. Tools: ${INSTALL_AUX_TOOLS}"
"\nBifCL: ${_bifcl_exe_path}"
"\nBinPAC: ${_binpac_exe_path}"
"\nBTest: ${INSTALL_BTEST}"
"\nBTest tooling: ${_install_btest_tools_msg}"
"\nGen-ZAM: ${_gen_zam_exe_path}"
"\nJavaScript: ${ZEEK_HAVE_JAVASCRIPT}"
"\nSpicy: ${_spicy}"
"\nSpicy analyzers: ${USE_SPICY_ANALYZERS}"
"\nzeek-client: ${INSTALL_ZEEK_CLIENT}"
"\nZeekControl: ${INSTALL_ZEEKCTL}"
"\nzkg: ${INSTALL_ZKG}"
"\n"
"\nlibmaxminddb: ${USE_GEOIP}"
"\nKerberos: ${USE_KRB5}"
"\ngperftools found: ${HAVE_PERFTOOLS}"
"\n - tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
"\n - debugging: ${USE_PERFTOOLS_DEBUG}"
"\njemalloc: ${ENABLE_JEMALLOC}"
"\n"
"\nCluster backends:"
"\n - Broker: ON"
"\n - ZeroMQ: ${ENABLE_CLUSTER_BACKEND_ZEROMQ}"
"\n"
"\nStorage backends:"
"\n - SQLite: ON"
"\n - Redis: ${ENABLE_STORAGE_BACKEND_REDIS}"
"\n"
"\nFuzz Targets: ${ZEEK_ENABLE_FUZZERS}"
"\nFuzz Engine: ${ZEEK_FUZZING_ENGINE}"
"\n"
"\nInclude What You Use: ${ENABLE_IWYU}"
"\nClang-Tidy: ${ENABLE_CLANG_TIDY}"
"${_analyzer_warning}"
"\n"
"\n================================================================\n")
include(UserChangedWarning)

View file

@ -533,6 +533,32 @@ POSSIBILITY OF SUCH DAMAGE.
==============================================================================
%%% auxil/filesystem
==============================================================================
Copyright (c) 2018, Steffen Schümann <s.schuemann@pobox.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================
%%% auxil/highwayhash
==============================================================================

492
NEWS
View file

@ -3,508 +3,26 @@ This document summarizes the most important changes in the current Zeek
release. For an exhaustive list of changes, see the ``CHANGES`` file
(note that submodules, such as Broker, come with their own ``CHANGES``.)
Zeek 8.1.0
==========
We would like to thank @chrisjlly, Klemens Nanni (@klemensn), and Klemens Nanni
(@klemens-ya) for their contributions to this release.
Breaking Changes
----------------
- Python 3.10 is now required for Zeek and all of its associated subprojects.
- The ``&optional`` script attribute will now error when applied to anything that's
not a record field. Previously, this would have surprising behavior.
- The BinPAC, Bifcl, and Gen-ZAM tools have all moved directly into the Zeek repo, which
should ease maintenance on them a bit. They were moved from the ``auxil`` directory to the
tools directory. Along with this, the ``--gen-zam`` argument for ``configure`` was
removed and the internal version will always be used.
- The zeek-af_packet-plugin git submodule was moved directly into the Zeek repo. This used
to live in the ``auxil`` directory, after having moved there from an external plugin.
It is now built as part of main Zeek build whenever building on Linux.
New Functionality
-----------------
- A new TapAnalyzer class was added allowing to tap into all packets delivered
to child analyzers attached to session adapters.
- Two new hooks, ``Cluster::on_subscribe()`` and ``Cluster::on_unsubscribe()`` have
been added to allow observing ``Subscribe()`` and ``Unsubscribe()`` calls on
backends by Zeek scripts.
- The ability to control the length of strings and containers in log output was added. The
maximum length of individual log fields can be set, as well as the total length of all
string or container fields in a single log record. This feature is controlled via four
new script-level variables:
Log::default_max_field_string_bytes
Log::default_max_total_string_bytes
Log::default_max_field_container_elements
Log::default_max_total_container_elements
When one of the ``field`` limits is reached, the individual field is truncated. When one
of the ``total`` limits is reached, all further strings will returned as empty and all
further container elements will not be output. See the documentation for those variables
for more detail.
The above variables control the truncation globally, but they can also be set for log
streams individually. This is controlled by variables with the same names that can be
set when the log stream is created.
Two new weirds were added to report the truncation: ``log_string_field_truncated`` and
``log_container_field_truncated``. New metrics were added to track how many truncations
have occurred: ``zeek_log_writer_truncated_string_fields_total`` and
``zeek_log_writer_truncated_containers_total``. The metrics are reported for each log
stream.
- The DNS analyzer now returns the set of parameters for SVCB data. It previously handled
SVCB packets, but omitted the parameters while parsing.
Changed Functionality
---------------------
- The var-extraction-uri.zeek policy does not include the path in the ``uri_vars``
field anymore.
- The ``get_current_packet_header()`` now populates the returned record also for
fragmented IP datagrams.
Removed Functionality
---------------------
Deprecated Functionality
------------------------
Zeek 8.0.0
==========
We would like to thank @aidans111, Anthony Verez (@netantho), Baa (@Baa14453),
Bhaskar Bhar (@bhaskarbhar), @dwhitemv25, EdKo (@ephikos), @edoardomich, Fupeng
Zhao (@AmazingPP), hendrik.schwartke@os-s.de (@hendrikschwartke), @i2z1, Jan
Grashöfer (@J-Gras) Jean-Samuel Marier, Justin Azoff (@JustinAzoff), Mario D
(@mari0d), Markus Elfring (@elfring), Peter Cullen (@pbcullen), Sean Donaghy,
Simeon Miteff (@simeonmiteff), Steve Smoot (@stevesmoot), @timo-mue,
@wojciech-graj, and Xiaochuan Ye (@XueSongTap) for their contributions to this
release.
We would like to thank ... for their contributions to this release.
Breaking Changes
----------------
- Zeek by default now depends on the availability of the ZeroMQ library for building
and running. This is in preparation of switching to the ZeroMQ-based cluster backend
by default in future Zeek versions. On an Ubuntu based system, the required system
packages are ``libzmq5``, ``libzmq3-dev`` and ``cppzmq-dev``. See the Dockerfiles
in the ``ci/`` directory for other supported platforms.
- Zeek and all of its associated submodules now require C++20-capable compilers to
build. This will let us move forward in using more modern C++ features and replace some
workarounds that we have been carrying. Minimum recommended versions of compilers are
GCC 10, Clang 8, and Visual Studio 2022.
- The ``zeek::Span`` class has been deprecated and the APIs in the telemetry subsystem
switched to use ``std::span`` instead of ``zeek::Span``. If your plugin instantiates
counter or gauge instances using the telemetry subsystem and you've previously used
``zeek::Span`` explicitly, updates may be needed.
- The code base underwent a big cleanup of #include usage, across almost all of the
files. We tested builds of all of the existing third-party packages and only noticed one
or two failures, but there is a possibility for breakage related to this cleanup.
- The ``lookup_connection()`` and ``connection_exists()`` builtin functions
now require ``conn_id`` instances as argument, rather than internally supporting
duck type matching ``conn_id``-like records.
- Network timestamps are not added to events by default anymore. Use the following
redef line to enable them:
redef EventMetadata::add_network_timestamp = T;
The background is that event metadata has become more generic and may incur
a small overhead when enabled. There's not enough users of network timestamp
metadata to justify the complexity of treating it separate.
- The ASCII writer's ``JSON::TS_MILLIS`` timestamp format was changed to produce
signed integers. This matters for the representation for timestamps that are
before the UNIX epoch. These are now written as negative values, while previously
the negative value was interpreted as an unsigned integer, resulting in very large
timestamps, potentially causing issues for downstream consumers.
If you prefer to always have unsigned values, it's possible to revert to the previous
behavior by setting:
redef LogAscii::json_timestamps = JSON::TS_MILLIS_UNSIGNED;
- The "endpoint" label of metrics exposed via Prometheus or the ``telemetry.log``
was renamed to "node". This is done for consistency with cluster terminology:
The label values have always been the value of ``Cluster::node`, so it's more intuitive
to call it. The "endpoint" name originated from a time when the telemetry framework
was implemented in Broker.
To revert to the "endpoint" label, you can do the following, but we strongly
suggest to migrate to the new default "node" instead:
redef Telemetry::metrics_endpoint_label = "endpoint";
- The ``current_event_time()`` builtin function as well as ``Event::Time()``
and ``EventMgr::CurrentEventTime()`` now return ``-1.0`` if no timestamp
metadata is available for the current event, or if no event is being
dispatched. Previously this would've been 0.0, or the timestamp of the previously
dispatched event.
- Missing network timestamp metadata on remote events is not set to the local
network time anymore by default. This potentially hid useful debugging information
about another node not sending timestamp metadata. The old behavior can be
re-enabled as follows:
redef EventMetadata::add_missing_remote_network_timestamp = T;
- The ``IsPacketSource()`` method on ``IOSource`` was removed. It was unused
and incorrectly returned ``false`` on all packet sources.
- The ``--with-binpac`` and ``--with-bifcl`` arguments for ``configure`` are now
deprecated. Both arguments have for a long time just used the internal version of the
tooling even if something was passed, so they were mostly useless. This may cause
breakage of cross-compiling, where the ``binpac`` and ``bifcl`` tooling needs to be run
on the host machine. We haven't heard from anyone that this is the case with the
arguments in their currently-broken state.
- The parsing of data for the ``ssl_session_ticket_handshake`` event was fixed.
In the past, the data contained two extra bytes before the session ticket
data. The event now contains only the session ticket data. You might have to
adjust your scripts if you manually worked around this bug in the past.
New Functionality
-----------------
- Zeek now supports pluggable and customizable connection tracking. The default
behavior remains unchanged and uses a connection's five tuple based on the
IP/port pairs and proto field. Zeek 8 ships with one additional implementation,
to factor VLAN tags into the connection tracking. To switch to VLAN-aware
connection tracking:
@load frameworks/conn_key/vlan_fivetuple
By convention, additional fields used by alternative ConnKey implementations are
added into the new ``ctx`` field of ``conn_id``. The type of ``ctx`` is ``conn_id_ctx``.
The ``vlan_fivetuple`` script adds two additional fields to the ``conn_id_ctx``
record type, representing any VLAN tags involved. Accordingly, every log
using ``conn_id`` reflects the change as well as ``ctx`` and the VLAN fields have
the ``&log`` attribute. The columns used for logging will be named ``id.ctx.vlan``
and ``id.ctx.inner_vlan``.
This feature does not automatically provide a notion of endpoint that
corresponds with the effective connection tuple. For example, applications tracking
endpoints by IP address do not somehow become VLAN-aware when enabling
VLAN-aware tracking.
Users may experiment with their own notion of endpoint by combining the ``orig_h``
or ``resp_h`` field of ``conn_id`` with the new ``ctx`` field. For example, tracking
the number of connections from a given host in a VLAN-aware fashion can be done
as follows:
global connection_counts: table[conn_id_ctx, addr] of count &default=0;
event new_connection(c: connection) {
++connection_counts[c$id$ctx, c$id$orig_h];
}
Note that this script snippet isn't VLAN-specific, yet it is VLAN-aware if the
``vlan_fivetuple`` script is loaded. In future Zeek versions, this pattern is
likely to be used to adapt base and policy scripts for more "context awareness".
Users may add their own plugins (for example via a zkg package) to provide
alternative implementations. This involves implementing a factory for
connection "keys" that factor in additional flow information. See the VLAN
implementation in the ``src/packet_analysis/protocol/ip/conn_key/vlan_fivetuple``
directory for an example.
- Added support to ZeekControl for seamlessly switching to ZeroMQ as cluster
backend by adding the following settings to zeekctl.cfg:
ClusterBackend = ZeroMQ
UseWebSocket = 1
With the ZeroMQ cluster backend, Zeekctl requires to use Zeek's WebSocket API
to communicate with individual nodes for the ``print`` and ``netstats`` commands.
Setting the ``UseWebSocket`` option enables a WebSocket server on the manager
node, listening on 127.0.0.1:27759 by default (this is configurable with using
the newly introduced ``WebSocketHost`` and ``WebSocketPort`` options).
The ``UseWebSocket`` option can also be used when ``ClusterBackend`` is set
to ``Broker``, but isn't strictly required.
For ZeroMQ (or other future cluster backends), setting ``UseWebSocket`` is a
requirement as Zeekctl does not speak the native ZeroMQ protocol to communicate
with cluster nodes for executing commands. This functionality requires the
``websockets`` Python package with version 11.0 or higher.
- Cluster telemetry improvements. Zeek now exposes a configurable number of
metrics regarding outgoing and incoming cluster events. By default, the number
of events sent and received by a Zeek cluster node and any attached WebSocket
clients is tracked as four individual counters. It's possible to gather more
detailed information by adding ``Cluster::Telemetry::VERBOSE`` and
``Cluster::Telemetry::DEBUG`` to the variables ``Cluster::core_metrics`` and
``Cluster::webscoket_metrics``:
redef Cluster::core_metrics += { Cluster::Telemetry::VERBOSE };
redef Cluster::websocket_metrics += { Cluster::Telemetry::DEBUG };
Configuring verbose, adds metrics that are labeled with the event handler
and topic name. Configuring debug, uses histogram metrics to additionally track
the distribution of the serialized event size. Additionally, when debug is selected,
outgoing events are labeled with the script location from where they were published.
- Support for the X-Application-Name HTTP header was added to the WebSocket API at
``v1/messages/json``. A WebSocket application connecting to Zeek may set the
X-Application-Name header to a descriptive identifier. The value of this header
will be added to the cluster metrics as ``app`` label. This allows to gather
incoming and outgoing event metrics of a specific WebSocket application, simply
by setting the X-Application-Name header.
- The SMTP analyzer can now optionally forward the top-level RFC 822 message individual
SMTP transactions to the file analysis framework. This can be leveraged to extract
emails in form of ``.eml`` files from SMTP traffic to disk.
To enable this feature, set the ``SMTP::enable_rfc822_msg_file_analysis`` option
and implement an appropriate ``file_new()`` or ``file_over_new_connection()`` handler:
redef SMTP::enable_rfc822_msg_file_analysis = T;
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) {
if ( f$id == c$smtp$rfc822_msg_fuid )
Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename="email"]);
}
- Generic event metadata support. A new ``EventMetadata`` module was added allowing
to register generic event metadata types and accessing the current event's metadata
using the functions ``current()`` and ``current_all()`` of this module.
- A new plugin hook, ``HookPublishEvent()``, has been added for intercepting
publishing of Zeek events. This hook may be used for monitoring purposes,
modifying or rerouting remote events.
Plugins can implement and enable this hook by calling the following method
within their Configure() implementation.
EnableHook(HOOK_PUBLISH_EVENT)
The signature of ``HookPublishEvent()`` is as follows.
bool HookPublishEvent(zeek::cluster::Backend& backend,
const std::string& topic,
zeek::cluster::detail::Event& event);
- Zeek now includes the Redis protocol analyzer from the evantypanski/spicy-redis
project (https://github.com/evantypanski/spicy-redis). This analyzer is enabled
by default. This analyzer logs Redis commands and their associated replies in
``redis.log``.
To disable the analyzer in case of issues, use the following snippet:
redef Analyzer::disabled_analyzers += {
Analyzer::ANALYZER_REDIS,
};
- The FTP analyzer now supports explicit TLS via AUTH TLS.
- Two new script-level hooks in the Intel framework have been added.
hook indicator_inserted(indicator_value: string, indicator_type: Intel::Type)
hook indicator_removed(indicator_value: string, indicator_type: Intel::Type)
These are reliably invoked on worker and manager nodes the first time an
indicator value is inserted into the store and once it has been completely
removed from the store.
- The ``frameworks/intel/seen`` scripts have been annotated with event groups
and a new ``frameworks/intel/seen/manage-event-groups`` policy script added.
The motivation is to allow Zeek distributors to load the ``intel/seen`` scripts
by default without incurring their event overhead when no Intel indicators are
loaded. Corresponding event handlers are enabled once the first Intel indicator
of a given ``Intel::Type`` is added. Event handlers are disabled when the last
indicator is removed, again.
Note that the ``manage-event-groups`` script interacts with the ``Intel::seen_policy``
hook: If no indicators for a given ``Intel::Type`` are loaded, the ``Intel::seen_policy``
will not be invoked as the event handlers extracting indicators aren't executed.
If you rely on the ``Intel::seen_policy`` hook to be invoked regardless of the
contents of the Intel store, do not load the ``manage-event-groups`` or set:
redef Intel::manage_seen_event_groups = F;
- The DNS analyzer was extended to support NAPTR RRs (RFC 2915, RFC 3403).
A corresponding ``dns_NAPTR_reply`` event was added.
- A new ``get_tags_by_category`` BIF method was added that returns a list of tags for a
specified plugin category. This can be used in lieu of calling ``zeek -NN`` and
parsing the output. For example, this will return the list of all analyzer plugins
currently loaded:
get_tags_by_category("ANALYZER");
- A new ``conn_generic_packet_threshold_crossed`` event was introduced. The event triggers
for any IP-based session that reaches a given threshold. Multiple packet thresholds can
be defined in ``ConnThreshold::generic_packet_thresholds``. The generic thresholds refer
to the total number of packets on a connection without taking direction into account
(i.e. the event also triggers on one-sided connections).
The event is intended as an alternative to the ``new_connection`` event that allows for
ignoring short-lived connections like DNS or scans. For example, it can be used to set
up traditional connection monitoring without introducing overhead for connections that
would never reach a larger threshold anyway.
- Zeek now supports extracting the PPPoE session ID. The ``PacketAnalyzer::PPPoE::session_id``
BiF can be used to get the session ID of the current packet.
The ``conn/pppoe-session-id-logging.zeek`` policy script adds pppoe session IDs to the
connection log.
The ``get_conn_stats()`` function's return value now includes the number of packets
that have not been processed by any analyzer. Using data from ``get_conn_stats()`` and
``get_net_stats()``, it's possible to determine the number of packets that have
been received and accepted by Zeek, but eventually discarded without processing.
Changed Functionality
---------------------
- The `Conn::set_conn` function is now always run in `new_connection`, instead of only
being run in `connection_state_remove`.
- Logging of failed analyzers has been overhauled. `dpd.log` was replaced
by a new `analyzer.log` that presents a more unified and consistent view
of failed analyzers. The previous `analyzer.log` was renamed to `analyzer-debug.log`;
see below for more details.
For protocol analyzers, `analyzer.log` now reports initially confirmed analyzers that
Zeek subsequently removed from the connection due to a protocol violation.
For file and packet analyzers, all errors will be logged to `analyzer.log`.
As part of this work, a new `analyzer_failed` event has been introduced. This event
is raised when an analyzer is removed because of raising a violation.
- `analyzer.log` was renamed to `analyzer_debug.log`, and is no longer created
by default. The log file will be created if the `frameworks/analyzer/debug-logging.zeek`
policy script is loaded.
Note that the namespace for options in the script changed to
`Analyzer::DebugLogging`. Furthermore the default options changed to enable
more detailed output by default.
- Record fields with a ``&default`` attribute are now consistently re-initialized
after deleting such fields. Previously, this would only work for constant
expressions, but has been extended to apply to arbitrary expressions.
- Publishing remote events with vector arguments that contain holes is now
rejected. The receiver side never had a chance to figure out where these
holes would have been. There's a chance this breaks scripts that accidentally
published vectors with holes. A reporter error is produced at runtime when
serialization of vectors with holes is attempted.
- Kerberos support on macOS has been enabled. Due to incompatibilities, the system
provided libkrb5 is ignored, however. Only versions from homebrew are supported and
found/picked-up by default. Use --with-krb5 for pointing at a custom librkb5
installation.
- The ``$listen_host`` configuration for ``Cluster::listen_websocket()``'s
``WebSocketServerOptions`` was deprecated. Use the new ``$listen_addr`` field
instead.
- The `service_violation` field of the connection record was marked as deprecated.
Consider using the new `failed_analyzers` field of the connection record instead.
- `detect-protocol.zeek was the last non-deprecated policy script left in
`frameworks/dpd`. It was moved to `frameworks/analyzer/detect-protocol.zeek`.
- Running Zeek with Zeekygen for documentation extraction (-X|--zeekygen
<cfgfile>) now implies -a, i.e., parse-only mode.
- The `not_valid_before` and `not_valid_after` times of X509 certificates are
now logged as GMT timestamps. Before, they were logged as local times; thus
the output was dependent on the timezone that your system is set to.
Similarly, the related events and the Zeek data structures all interpreted
times in X509 certificates as local times.
- The PPPoE parser now respects the size value given in the PPPoE header. Data
beyond the size given in the header will be truncated.
- Record fields with ``&default`` attributes initializing empty ``vector``, ``table``
or ``set`` instances are now deferred until they are accessed, potentially
improving memory usage when such fields are never accessed.
Removed Functionality
---------------------
- The ``--with-bind`` argument for ``configure`` was removed. We removed the need for the
BIND library from our CMake setup in the v7.2 release, but this non-functional argument
was left behind.
- The ``--disable-archiver`` argument for ``configure`` was removed. This was deprecated
and scheduled to be removed in v7.1, but we apparently missed it during the cleanup for
that release.
Deprecated Functionality
------------------------
- The `dpd.log` is now deprecated and replaced by `analyzer.log` (see above).
`dpd.log` is no longer created by default, but can be loaded using the
`frameworks/analyzer/deprecated-dpd-log.zeek` policy script.
Relatedly, the `service_violation` field of the connection record is
deprecated and will only be present if the
`frameworks/analyzer/deprecated-dpd-log.zeek` policy script is loaded.
- The ``protocols/http/detect-sqli.zeek`` script has been deprecated in favor of a
new ``protocols/http/detect-sql-injection.zeek`` script to switch from the victim
host being placed into the ``src`` field of a notice to instead use ``dst``.
The attacker host is now placed into ``src``. Further, notices hold the first
sampled connection uid.
Note that the ``Notice::Type`` enumeration names remain the same. You can determine
which script was used by the presence of populated ``uid`` and ``dst`` fields in the
``notice.log`` entries.
The replacement script doesn't populate the ``email_body_sections`` anymore either.
- Using ``&default`` and ``&optional`` together on a record field has been deprecated
as it would only result in ``&default`` behavior. This will become an error starting
with Zeek 8.1.
- The ``zeek::Event()`` constructor was deprecated. Use ``event_mgr::Enqueue()``
or ``event_mgr::Dispatch()`` instead.
- Passing ``ts`` as the last argument to ``EventMgr::Enqueue()`` has been deprecated
and will lead to compile time warnings. Use ``EventMgr::Enqueue(detail::MetadataVectorPtr meta, ...)``
for populating ``meta`` accordingly.
- For plugin authors: in the core, the constructor for Connection instances has
been deprecated in favor of a new one to support pluggable connection
tuples. The ConnTuple struct, used by this deprecated Connection constructor,
is now deprecated as well.
- The ``zeek::filesystem`` namespace alias is deprecated in favor of using
``std::filesystem`` directly. Similarly, the ``ghc::filesystem`` submodule stored in
``auxil/filessytem`` has been removed and the files included from it in the Zeek
installation will no longer be installed. Builds won't warn about the deprecation of
``zeek::filesystem`` due to limitations of how we can mark deprecations in C++.
- The ``zeek::util::starts_with`` and ``zeek::util::ends_with`` functions are deprecated.
``std::string`` and ``std::string_view`` added ``begins_with`` and ``ends_with`` methods
in C++ 20, and those should be used instead.
- The ``record_type_to_vector`` BIF is deprecated in favor of using the newly ordered
``record_fields`` BIF.
Zeek 7.2.0
==========
@ -608,9 +126,9 @@ New Functionality
metrics are available to understand the health of each peering's buffer,
regardless of the overflow policy active. These are:
- zeek_broker_peer_buffer_messages: a gauge of the current buffer fill level,
- zeek_broker_peer_buffer_levels: a gauge of the current buffer fill level,
- zeek_broker_peer_buffer_recent_max_messages: a gauge that tracks the maximum
- zeek_broker_peer_buffer_recent_max_levels: a gauge that tracks the maximum
buffer fill level seen over the last ``Broker::buffer_stats_reset_interval`.
- zeek_broker_peer_buffer_overflows_total: a counter that tracks the number
@ -827,7 +345,7 @@ New Functionality
some updates to Zeek's internal DNS resolver due to changes in the c-ares
API. At least version v1.28.0 is now required to build Zeek.
- Python 3.9 is now required for Zeek and all of its associated subprojects.
- Python 3.9 is now required for Zeek and all of it's associated subprojects.
- IP-based connections that were previously not logged due to using an unknown
IP protocol (e.g. not TCP, UDP, or ICMP) now appear in conn.log. All conn.log
@ -918,7 +436,7 @@ New Functionality
analyzer used for processing the packet when the event is raised. The
``unknown_protocol.log`` file was extended to include this information.
- The MySQL analyzer now generates a ``mysql_change_user()`` event when the user
- The MySQL analyzer now generates a ``mysql_user_change()`` event when the user
changes mid-session via the ``COM_USER_CHANGE`` command.
- The DNS analyzer was extended to support TKEY RRs (RFC 2390). A corresponding

4
README
View file

@ -3,7 +3,7 @@ The Zeek Network Security Monitor
=================================
Zeek is a powerful framework for network traffic analysis and security
monitoring.
monitoring. Follow us on Twitter at @zeekurity.
Key Features
============
@ -101,4 +101,4 @@ others.
[4] https://www.zeek.org/community/index.html
[5] https://clang.llvm.org/extra/clang-tidy/
[6] https://scan.coverity.com/projects/bro
[7] https://pvs-studio.com/en/pvs-studio/?utm_source=github&utm_medium=organic&utm_campaign=open_source
[7] https://pvs-studio.com/en/pvs-studio/?utm_source=github&utm_medium=organic&utm_campaign=open_source

View file

@ -15,15 +15,14 @@ traffic analysis and security monitoring.
[_Development_](#development) —
[_License_](#license)
Follow us on Twitter at [@zeekurity](https://twitter.com/zeekurity).
[![Coverage Status](https://coveralls.io/repos/github/zeek/zeek/badge.svg?branch=master)](https://coveralls.io/github/zeek/zeek?branch=master)
[![Build Status](https://img.shields.io/cirrus/github/zeek/zeek)](https://cirrus-ci.com/github/zeek/zeek)
[![Slack](https://img.shields.io/badge/slack-@zeek-brightgreen.svg?logo=slack)](https://zeek.org/slack)
[![Discourse](https://img.shields.io/discourse/status?server=https%3A%2F%2Fcommunity.zeek.org)](https://community.zeek.org)
[![Mastodon](https://img.shields.io/badge/mastodon-@zeek@infosec.exchange-brightgreen.svg?logo=mastodon)](https://infosec.exchange/@zeek)
[![Bluesky](https://img.shields.io/badge/bluesky-@zeek-brightgreen.svg?logo=bluesky)](https://bsky.app/profile/zeek.org)
</h4>

View file

@ -1 +1 @@
8.1.0-dev.626
8.0.0-dev.0

1
auxil/bifcl Submodule

@ -0,0 +1 @@
Subproject commit 49e956cd278ad0ca72040536ff606f4bb8d4224f

1
auxil/binpac Submodule

@ -0,0 +1 @@
Subproject commit 894afb64d954f6858ba9101061e56be93b621aca

@ -1 +1 @@
Subproject commit 06d491943f4bee6c2d1e17a5c7c31836d725273d
Subproject commit 5b6cbb8c2d9124aa1fb0bea5799433138dc64cf9

@ -1 +1 @@
Subproject commit 8c0fbfd74325b6c9be022a98bcd414b6f103d09e
Subproject commit 1092e9c03ca62c16fd3d9065117f708630ec2573

1
auxil/filesystem Submodule

@ -0,0 +1 @@
Subproject commit 72a76d774e4c7c605141fd6d11c33cc211209ed9

1
auxil/gen-zam Submodule

@ -0,0 +1 @@
Subproject commit f113c5f3220263eca87c3ffaafae43fda3295ae3

@ -1 +1 @@
Subproject commit ea30540c77679ced3ce7886199384e8743628921
Subproject commit 10d93cff9fd6c8d8c3e0bae58312aed470843ff8

@ -1 +1 @@
Subproject commit 7e3670aa1f6ab7623a87ff1e770f7f6b5a1c59f1
Subproject commit d2bfec929540c1fec5d1d45f0bcee3cff1eb7fa5

@ -1 +1 @@
Subproject commit ad301651ad0a7426757f8bc94cfc8e8cd98451a8
Subproject commit ab6aff89296d11363427beab34f88258c0abd467

@ -1 +1 @@
Subproject commit 4505c4323283b56ea59935210e105da26ab7bb0b
Subproject commit 45ce017874aac9ffabac0ddc4d016f1747804234

@ -1 +1 @@
Subproject commit 7635e113080be6fc20cb308636c8c38565c95c8a
Subproject commit e15e0bd959a03d06822ae76b53eef6181daf01a2

@ -0,0 +1 @@
Subproject commit a3fe59b3f1ded5c3461995134b66c6db182fa56f

@ -1 +1 @@
Subproject commit 9a51ce1940a808aaad253077905c2b34f15f1e08
Subproject commit ee706c54e665dab92a54253f934d2acf1f79137d

@ -1 +1 @@
Subproject commit 16849ca3ec2f8637e3f8ef8ee27e2c279724387f
Subproject commit 4440c7a05ba4be229ac88d70e8f4eef2465afc50

@ -1 +1 @@
Subproject commit 485abcad45daeea6d09680e5fc7d29e97d2e3fbe
Subproject commit a824eedf2fdd28298f09d96ed10c7c74802dc8e4

@ -1 +1 @@
Subproject commit e5985abfffc1ef5ead3a0bab196fa5d86bc5276f
Subproject commit 614380100480b6b4ddcf8d868119865d1f97abad

View file

@ -2,7 +2,7 @@ FROM alpine:latest
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20250306
RUN apk add --no-cache \
bash \

View file

@ -1,49 +0,0 @@
FROM quay.io/centos/centos:stream10
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
# dnf config-manager isn't available at first, and
# we need it to install the CRB repo below.
RUN dnf -y install 'dnf-command(config-manager)'
# What used to be powertools is now called "CRB".
# We need it for some of the packages installed below.
# https://docs.fedoraproject.org/en-US/epel/
RUN dnf config-manager --set-enabled crb
RUN dnf -y install \
https://dl.fedoraproject.org/pub/epel/epel-release-latest-10.noarch.rpm
# The --nobest flag is hopefully temporary. Without it we currently hit
# package versioning conflicts around OpenSSL.
RUN dnf -y --nobest install \
bison \
ccache \
cmake \
cppzmq-devel \
diffutils \
flex \
gcc \
gcc-c++ \
git \
jq \
libpcap-devel \
make \
openssl \
openssl-devel \
procps-ng \
python3 \
python3-devel \
python3-pip\
sqlite \
swig \
tar \
which \
zlib-devel \
&& dnf clean all && rm -rf /var/cache/dnf
# Set the crypto policy to allow SHA-1 certificates - which we have in our tests
RUN dnf -y --nobest install crypto-policies-scripts && update-crypto-policies --set LEGACY
RUN pip3 install websockets junit2html

View file

@ -2,7 +2,7 @@ FROM quay.io/centos/centos:stream9
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241024
# dnf config-manager isn't available at first, and
# we need it to install the CRB repo below.
@ -34,9 +34,9 @@ RUN dnf -y --nobest install \
openssl \
openssl-devel \
procps-ng \
python3.13 \
python3.13-devel \
python3.13-pip\
python3 \
python3-devel \
python3-pip\
sqlite \
swig \
tar \
@ -47,8 +47,4 @@ RUN dnf -y --nobest install \
# Set the crypto policy to allow SHA-1 certificates - which we have in our tests
RUN dnf -y --nobest install crypto-policies-scripts && update-crypto-policies --set LEGACY
# Override the default python3.9 installation paths with 3.13
RUN alternatives --install /usr/bin/python3 python3 /usr/bin/python3.13 10
RUN alternatives --install /usr/bin/pip3 pip3 /usr/bin/pip3.13 10
RUN pip3 install websockets junit2html

View file

@ -1,36 +1,32 @@
FROM debian:13
FROM debian:11
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241024
RUN apt-get update && apt-get -y install \
bison \
bsdmainutils \
ccache \
cmake \
cppzmq-dev \
curl \
dnsmasq \
flex \
g++ \
gcc \
git \
jq \
libkrb5-dev \
libnats-dev \
libnode-dev \
libpcap-dev \
librdkafka-dev \
libssl-dev \
libuv1-dev \
libzmq3-dev \
make \
python3 \
python3-dev \
python3-pip\
python3-websockets \
sqlite3 \
swig \
wget \
@ -39,6 +35,4 @@ RUN apt-get update && apt-get -y install \
&& apt autoclean \
&& rm -rf /var/lib/apt/lists/*
# Debian trixie really doesn't like using pip to install system wide stuff, but
# doesn't seem there's a python3-junit2html package, so not sure what we'd break.
RUN pip3 install --break-system-packages junit2html
RUN pip3 install websockets junit2html

View file

@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241024
RUN apt-get update && apt-get -y install \
bison \

View file

@ -1,8 +1,8 @@
FROM fedora:42
FROM fedora:40
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241024
RUN dnf -y install \
bison \
@ -10,9 +10,8 @@ RUN dnf -y install \
cmake \
cppzmq-devel \
diffutils \
findutils \
dnsmasq \
flex \
gawk \
gcc \
gcc-c++ \
git \
@ -23,14 +22,12 @@ RUN dnf -y install \
openssl \
openssl-devel \
procps-ng \
python3 \
python3-devel \
python3-pip\
sqlite \
swig \
which \
zlib-devel \
crypto-policies-scripts \
&& dnf clean all && rm -rf /var/cache/dnf
RUN pip3 install websockets junit2html

View file

@ -2,7 +2,7 @@ FROM fedora:41
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20250203
RUN dnf -y install \
bison \

View file

@ -7,7 +7,7 @@ set -x
brew update
brew upgrade cmake
brew install cppzmq openssl@3 python@3 swig bison flex ccache libmaxminddb dnsmasq krb5
brew install cppzmq openssl@3 python@3 swig bison flex ccache libmaxminddb dnsmasq
which python3
python3 --version

View file

@ -2,7 +2,7 @@ FROM opensuse/leap:15.6
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241024
RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.6:Update/standard/openSUSE:Leap:15.6:Update.repo \
&& zypper refresh \

View file

@ -2,7 +2,7 @@ FROM opensuse/tumbleweed
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20250311
# Remove the repo-openh264 repository, it caused intermittent issues
# and we should not be needing any packages from it.

View file

@ -1,27 +0,0 @@
#!/bin/sh
zypper refresh
zypper patch -y --with-update --with-optional
LATEST_VERSION=$(zypper search -n ${ZEEK_CI_COMPILER} |
awk -F "|" "match(\$2, / ${ZEEK_CI_COMPILER}([0-9]{2})[^-]/, a) {print a[1]}" |
sort | tail -1)
echo "Installing ${ZEEK_CI_COMPILER} ${LATEST_VERSION}"
zypper install -y "${ZEEK_CI_COMPILER}${LATEST_VERSION}"
if [ "${ZEEK_CI_COMPILER}" == "gcc" ]; then
zypper install -y "${ZEEK_CI_COMPILER}${LATEST_VERSION}-c++"
fi
update-alternatives --install /usr/bin/cc cc "/usr/bin/${ZEEK_CI_COMPILER}-${LATEST_VERSION}" 100
update-alternatives --set cc "/usr/bin/${ZEEK_CI_COMPILER}-${LATEST_VERSION}"
if [ "${ZEEK_CI_COMPILER}" == "gcc" ]; then
update-alternatives --install /usr/bin/c++ c++ "/usr/bin/g++-${LATEST_VERSION}" 100
update-alternatives --set c++ "/usr/bin/g++-${LATEST_VERSION}"
else
update-alternatives --install /usr/bin/c++ c++ "/usr/bin/clang++-${LATEST_VERSION}" 100
update-alternatives --set c++ "/usr/bin/clang++-${LATEST_VERSION}"
fi

View file

@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241024
RUN apt-get update && apt-get -y install \
bc \

View file

@ -4,16 +4,15 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241024
RUN apt-get update && apt-get -y install \
bc \
bison \
bsdmainutils \
ccache \
clang-19 \
clang++-19 \
clang-tidy-19 \
clang-18 \
clang++-18 \
cmake \
cppzmq-dev \
curl \
@ -32,9 +31,7 @@ RUN apt-get update && apt-get -y install \
make \
python3 \
python3-dev \
python3-git \
python3-pip \
python3-semantic-version \
redis-server \
ruby \
sqlite3 \
@ -50,10 +47,6 @@ RUN apt-get update && apt-get -y install \
RUN pip3 install --break-system-packages websockets junit2html
RUN gem install coveralls-lcov
# Ubuntu installs clang versions with the binaries having the version number
# appended. Create a symlink for clang-tidy so cmake finds it correctly.
RUN update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-19 1000
# Download a newer pre-built ccache version that recognizes -fprofile-update=atomic
# which is used when building with --coverage.
#

View file

@ -1,10 +1,10 @@
FROM ubuntu:25.04
FROM ubuntu:24.10
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20241115
RUN apt-get update && apt-get -y install \
bc \

View file

@ -28,7 +28,7 @@ cd $build_dir
export ZEEK_SEED_FILE=$source_dir/testing/btest/random.seed
function run_zeek {
ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen
ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen >/dev/null
if [ $? -ne 0 ]; then
echo "Failed running zeek with zeekygen config file $conf_file" >&2

View file

@ -5,7 +5,7 @@ SHELL [ "powershell" ]
# A version field to invalidatea Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION=20250905
ENV DOCKERFILE_VERSION 20230801
RUN Set-ExecutionPolicy Unrestricted -Force
@ -14,8 +14,8 @@ RUN [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePoin
iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
# Install prerequisites
RUN choco install -y --no-progress visualstudio2022buildtools --version=117.14.1
RUN choco install -y --no-progress visualstudio2022-workload-vctools --version=1.0.0 --package-parameters '--add Microsoft.VisualStudio.Component.VC.ATLMFC'
RUN choco install -y --no-progress visualstudio2019buildtools --version=16.11.11.0
RUN choco install -y --no-progress visualstudio2019-workload-vctools --version=1.0.0 --package-parameters '--add Microsoft.VisualStudio.Component.VC.ATLMFC'
RUN choco install -y --no-progress sed
RUN choco install -y --no-progress winflexbison3
RUN choco install -y --no-progress msysgit
@ -30,4 +30,4 @@ RUN mkdir C:\build
WORKDIR C:\build
# This entry point starts the developer command prompt and launches the PowerShell shell.
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools\\Common7\\Tools\\VsDevCmd.bat", "-arch=x64", "&&", "powershell.exe", "-NoLogo", "-ExecutionPolicy", "Unrestricted"]
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\BuildTools\\Common7\\Tools\\VsDevCmd.bat", "-arch=x64", "&&", "powershell.exe", "-NoLogo", "-ExecutionPolicy", "Unrestricted"]

View file

@ -2,7 +2,7 @@
:: cmd current shell. This path is hard coded to the one on the CI image, but
:: can be adjusted if running builds locally. Unfortunately, the initial path
:: isn't in the environment so we have to hardcode the whole path.
call "c:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
call "c:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
mkdir build
cd build

View file

@ -1,5 +1,5 @@
:: See build.cmd for documentation on this call.
call "c:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
call "c:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
cd build

2
cmake

@ -1 +1 @@
Subproject commit d51c6990446cf70cb9c01bca17dad171a1db05d3
Subproject commit fd0696f9077933660f7da5f81978e86b3e967647

View file

@ -2,9 +2,10 @@
#pragma once
constexpr char ZEEK_SCRIPT_INSTALL_PATH[] = "@ZEEK_SCRIPT_INSTALL_PATH@";
constexpr char ZEEK_PLUGIN_INSTALL_PATH[] = "@ZEEK_PLUGIN_DIR@";
constexpr char DEFAULT_ZEEKPATH[] = "@DEFAULT_ZEEKPATH@";
constexpr char ZEEK_SPICY_MODULE_PATH[] = "@ZEEK_SPICY_MODULE_PATH@";
constexpr char ZEEK_SPICY_LIBRARY_PATH[] = "@ZEEK_SPICY_LIBRARY_PATH@";
constexpr char ZEEK_SPICY_DATA_PATH[] = "@ZEEK_SPICY_DATA_PATH@";
#define ZEEK_SCRIPT_INSTALL_PATH "@ZEEK_SCRIPT_INSTALL_PATH@"
#define BRO_PLUGIN_INSTALL_PATH "@ZEEK_PLUGIN_DIR@"
#define ZEEK_PLUGIN_INSTALL_PATH "@ZEEK_PLUGIN_DIR@"
#define DEFAULT_ZEEKPATH "@DEFAULT_ZEEKPATH@"
#define ZEEK_SPICY_MODULE_PATH "@ZEEK_SPICY_MODULE_PATH@"
#define ZEEK_SPICY_LIBRARY_PATH "@ZEEK_SPICY_LIBRARY_PATH@"
#define ZEEK_SPICY_DATA_PATH "@ZEEK_SPICY_DATA_PATH@"

View file

@ -1,6 +1,4 @@
// See the file "COPYING" in the main distribution directory for copyright.
// NOLINTBEGIN(modernize-macro-to-enum)
// NOLINTBEGIN(cppcoreguidelines-macro-usage)
#pragma once
@ -308,6 +306,3 @@
/* compiled with Spicy support */
#cmakedefine HAVE_SPICY
// NOLINTEND(cppcoreguidelines-macro-usage)
// NOLINTEND(modernize-macro-to-enum)

24
configure vendored
View file

@ -90,9 +90,16 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--disable-zkg don't install zkg
Required Packages in Non-Standard Locations:
--with-bifcl=PATH path to Zeek BIF compiler executable
(useful for cross-compiling)
--with-bind=PATH path to BIND install root
--with-binpac=PATH path to BinPAC executable
(useful for cross-compiling)
--with-bison=PATH path to bison executable
--with-broker=PATH path to Broker install root
(Zeek uses an embedded version by default)
--with-gen-zam=PATH path to Gen-ZAM code generator
(Zeek uses an embedded version by default)
--with-flex=PATH path to flex executable
--with-libkqueue=PATH path to libkqueue install root
(Zeek uses an embedded version by default)
@ -321,6 +328,9 @@ while [ $# -ne 0 ]; do
--disable-af-packet)
append_cache_entry DISABLE_AF_PACKET BOOL true
;;
--disable-archiver)
has_disable_archiver=1
;;
--disable-auxtools)
append_cache_entry INSTALL_AUX_TOOLS BOOL false
;;
@ -361,9 +371,15 @@ while [ $# -ne 0 ]; do
--disable-zkg)
append_cache_entry INSTALL_ZKG BOOL false
;;
--with-bifcl=*)
append_cache_entry BIFCL_EXE_PATH PATH $optarg
;;
--with-bind=*)
append_cache_entry BIND_ROOT_DIR PATH $optarg
;;
--with-binpac=*)
append_cache_entry BINPAC_EXE_PATH PATH $optarg
;;
--with-bison=*)
append_cache_entry BISON_EXECUTABLE PATH $optarg
;;
@ -376,6 +392,9 @@ while [ $# -ne 0 ]; do
--with-flex=*)
append_cache_entry FLEX_EXECUTABLE PATH $optarg
;;
--with-gen-zam=*)
append_cache_entry GEN_ZAM_EXE_PATH PATH $optarg
;;
--with-geoip=*)
append_cache_entry LibMMDB_ROOT_DIR PATH $optarg
;;
@ -491,3 +510,8 @@ eval ${cmake} 2>&1
echo "# This is the command used to configure this build" >config.status
echo $command >>config.status
chmod u+x config.status
if [ $has_disable_archiver -eq 1 ]; then
echo
echo "NOTE: The --disable-archiver argument no longer has any effect and will be removed in v7.1. zeek-archiver is now part of zeek-aux, so consider --disable-auxtools instead."
fi

2
doc

@ -1 +1 @@
Subproject commit 8f38ae2fd563314393eb1ca58c827d26e9966520
Subproject commit 858dd108b10a7d88852e01dc0134d6c0032f3c60

View file

@ -1,7 +1,7 @@
# See the file "COPYING" in the main distribution directory for copyright.
# Layer to build Zeek.
FROM debian:13-slim
FROM debian:bookworm-slim
# Make the shell split commands in the log so we can determine reasons for
# failures more easily.
@ -16,7 +16,6 @@ RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
# Configure system for build.
RUN apt-get -q update \
&& apt-get upgrade -q -y \
&& apt-get install -q -y --no-install-recommends \
bind9 \
bison \
@ -37,7 +36,7 @@ RUN apt-get -q update \
libz-dev \
make \
python3-minimal \
python3-dev \
python3.11-dev \
swig \
ninja-build \
python3-pip \

View file

@ -1,7 +1,7 @@
# See the file "COPYING" in the main distribution directory for copyright.
# Final layer containing all artifacts.
FROM debian:13-slim
FROM debian:bookworm-slim
# Make the shell split commands in the log so we can determine reasons for
# failures more easily.
@ -15,21 +15,18 @@ RUN echo 'Acquire::http::timeout "180";' > /etc/apt/apt.conf.d/99-timeouts
RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
RUN apt-get -q update \
&& apt-get upgrade -q -y \
&& apt-get install -q -y --no-install-recommends \
ca-certificates \
git \
jq \
libmaxminddb0 \
libnode115 \
libnode108 \
libpcap0.8 \
libpython3.13 \
libpython3.11 \
libssl3 \
libuv1 \
libz1 \
libzmq5 \
net-tools \
procps \
python3-git \
python3-minimal \
python3-semantic-version \

View file

@ -60,13 +60,13 @@ const pe_mime_types = { "application/x-dosexec" };
event zeek_init() &priority=5
{
Files::register_for_mime_types(Files::ANALYZER_PE, pe_mime_types);
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_pe, $path="pe", $policy=log_policy));
Log::create_stream(LOG, [$columns=Info, $ev=log_pe, $path="pe", $policy=log_policy]);
}
hook set_file(f: fa_file) &priority=5
{
if ( ! f?$pe )
f$pe = PE::Info($ts=f$info$ts, $id=f$id);
f$pe = [$ts=f$info$ts, $id=f$id];
}
event pe_dos_header(f: fa_file, h: PE::DOSHeader) &priority=5

View file

@ -40,7 +40,7 @@ export {
event zeek_init() &priority=5
{
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_ocsp, $path="ocsp", $policy=log_policy));
Log::create_stream(LOG, [$columns=Info, $ev=log_ocsp, $path="ocsp", $policy=log_policy]);
Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response");
}

View file

@ -105,29 +105,6 @@ export {
## Event for accessing logged records.
global log_x509: event(rec: Info);
## The maximum number of bytes that a single string field can contain when
## logging. If a string reaches this limit, the log output for the field will be
## truncated. Setting this to zero disables the limiting.
##
## .. zeek:see:: Log::default_max_field_string_bytes
const default_max_field_string_bytes = Log::default_max_field_string_bytes &redef;
## The maximum number of elements a single container field can contain when
## logging. If a container reaches this limit, the log output for the field will
## be truncated. Setting this to zero disables the limiting.
##
## .. zeek:see:: Log::default_max_field_container_elements
const default_max_field_container_elements = 500 &redef;
## The maximum total number of container elements a record may log. This is the
## sum of all container elements logged for the record. If this limit is reached,
## all further containers will be logged as empty containers. If the limit is
## reached while processing a container, the container will be truncated in the
## output. Setting this to zero disables the limiting.
##
## .. zeek:see:: Log::default_max_total_container_elements
const default_max_total_container_elements = 1500 &redef;
}
global known_log_certs_with_broker: set[LogCertHash] &create_expire=relog_known_certificates_after &backend=Broker::MEMORY;
@ -140,12 +117,7 @@ redef record Files::Info += {
event zeek_init() &priority=5
{
# x509 can have some very large certificates and very large sets of URIs. Expand the log size filters
# so that we're not truncating those.
Log::create_stream(X509::LOG, Log::Stream($columns=Info, $ev=log_x509, $path="x509", $policy=log_policy,
$max_field_string_bytes=X509::default_max_field_string_bytes,
$max_field_container_elements=X509::default_max_field_container_elements,
$max_total_container_elements=X509::default_max_total_container_elements));
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509, $path="x509", $policy=log_policy]);
# We use MIME types internally to distinguish between user and CA certificates.
# The first certificate in a connection always gets tagged as user-cert, all
@ -195,7 +167,7 @@ event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certifi
{
local der_cert = x509_get_certificate_string(cert_ref);
local fp = hash_function(der_cert);
f$info$x509 = X509::Info($ts=f$info$ts, $fingerprint=fp, $certificate=cert, $handle=cert_ref);
f$info$x509 = [$ts=f$info$ts, $fingerprint=fp, $certificate=cert, $handle=cert_ref];
if ( f$info$mime_type == "application/x-x509-user-cert" )
f$info$x509$host_cert = T;
if ( f$is_orig )
@ -253,3 +225,4 @@ event file_state_remove(f: fa_file) &priority=5
Log::write(LOG, f$info$x509);
}

View file

@ -1,12 +1,35 @@
##! Disables analyzers if protocol violations occur, and adds service information
##! to connection log.
@load ./main
##! Activates port-independent protocol detection and selectively disables
##! analyzers if protocol violations occur.
module DPD;
export {
## Analyzers which you don't want to remove on violations.
## Add the DPD logging stream identifier.
redef enum Log::ID += { LOG };
## A default logging policy hook for the stream.
global log_policy: Log::PolicyHook;
## The record type defining the columns to log in the DPD logging stream.
type Info: record {
## Timestamp for when protocol analysis failed.
ts: time &log;
## Connection unique ID.
uid: string &log;
## Connection ID containing the 4-tuple which identifies endpoints.
id: conn_id &log;
## Transport protocol for the violation.
proto: transport_proto &log;
## The analyzer that generated the violation.
analyzer: string &log;
## The textual reason for the analysis failure.
failure_reason: string &log;
};
## Deprecated, please see https://github.com/zeek/zeek/pull/4200 for details
option max_violations: table[Analyzer::Tag] of count = table() &deprecated="Remove in v8.1: This has become non-functional in Zeek 7.2, see PR #4200" &default = 5;
## Analyzers which you don't want to throw
option ignore_violations: set[Analyzer::Tag] = set();
## Ignore violations which go this many bytes into the connection.
@ -22,12 +45,17 @@ export {
}
redef record connection += {
## The set of prototol analyzers that were removed due to a protocol
## violation after the same analyzer had previously been confirmed.
failed_analyzers: set[string] &default=set() &ordered;
dpd: Info &optional;
## The set of services (analyzers) for which Zeek has observed a
## violation after the same service had previously been confirmed.
service_violation: set[string] &default=set() &ordered;
};
# Add confirmed protocol analyzers to conn.log service field
event zeek_init() &priority=5
{
Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd", $policy=log_policy]);
}
event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirmationInfo) &priority=10
{
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
@ -41,11 +69,9 @@ event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirm
add c$service[analyzer];
}
# Remove failed analyzers from service field and add them to c$failed_analyzers
# Low priority to allow other handlers to check if the analyzer was confirmed
event analyzer_failed(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=-5
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=10
{
if ( ! is_protocol_analyzer(atype) )
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
return;
if ( ! info?$c )
@ -64,21 +90,35 @@ event analyzer_failed(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolatio
# if statement is separate, to allow repeated removal of service, in case there are several
# confirmation and violation events
if ( analyzer !in c$failed_analyzers )
add c$failed_analyzers[analyzer];
if ( analyzer in c$service_violation )
return;
# add "-service" to the list of services on removal due to violation, if analyzer was confirmed before
if ( track_removed_services_in_connection && Analyzer::name(atype) in c$service )
add c$service_violation[analyzer];
local dpd: Info;
dpd$ts = network_time();
dpd$uid = c$uid;
dpd$id = c$id;
dpd$proto = get_port_transport_proto(c$id$orig_p);
dpd$analyzer = analyzer;
# Encode data into the reason if there's any as done for the old
# analyzer_violation event, previously.
local reason = info$reason;
if ( info?$data )
{
local rname = cat("-", Analyzer::name(atype));
if ( rname !in c$service )
add c$service[rname];
local ellipsis = |info$data| > 40 ? "..." : "";
local data = info$data[0:40];
reason = fmt("%s [%s%s]", reason, data, ellipsis);
}
dpd$failure_reason = reason;
c$dpd = dpd;
}
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo ) &priority=5
{
if ( ! is_protocol_analyzer(atype) )
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
return;
if ( ! info?$c || ! info?$aid )
@ -93,17 +133,29 @@ event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationI
if ( ignore_violations_after > 0 && size > ignore_violations_after )
return;
# analyzer already was removed or connection finished
# let's still log this.
if ( lookup_connection_analyzer_id(c$id, atype) == 0 )
{
event analyzer_failed(network_time(), atype, info);
return;
}
local disabled = disable_analyzer(c$id, aid, F);
# If analyzer was disabled, send failed event
if ( disabled )
event analyzer_failed(network_time(), atype, info);
# add "-service" to the list of services on removal due to violation, if analyzer was confirmed before
if ( track_removed_services_in_connection && disabled && Analyzer::name(atype) in c$service )
{
local rname = cat("-", Analyzer::name(atype));
if ( rname !in c$service )
add c$service[rname];
}
}
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo ) &priority=-5
{
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
return;
if ( ! info?$c )
return;
if ( info$c?$dpd )
{
Log::write(DPD::LOG, info$c$dpd);
delete info$c$dpd;
}
}

View file

@ -1,6 +1,8 @@
##! Logging analyzer violations into analyzer.log
##! Logging analyzer confirmations and violations into analyzer.log
@load base/frameworks/config
@load base/frameworks/logging
@load ./main
module Analyzer::Logging;
@ -9,10 +11,16 @@ export {
## Add the analyzer logging stream identifier.
redef enum Log::ID += { LOG };
## A default logging policy hook for the stream.
global log_policy: Log::PolicyHook;
## The record type defining the columns to log in the analyzer logging stream.
type Info: record {
## Timestamp of the violation.
## Timestamp of confirmation or violation.
ts: time &log;
## What caused this log entry to be produced. This can
## currently be "violation" or "confirmation".
cause: string &log;
## The kind of analyzer involved. Currently "packet", "file"
## or "protocol".
analyzer_kind: string &log;
@ -23,64 +31,163 @@ export {
uid: string &log &optional;
## File UID if available.
fuid: string &log &optional;
## Connection identifier if available.
## Connection identifier if available
id: conn_id &log &optional;
## Transport protocol for the violation, if available.
proto: transport_proto &log &optional;
## Failure or violation reason, if available.
failure_reason: string &log;
failure_reason: string &log &optional;
## Data causing failure or violation if available. Truncated
## to :zeek:see:`Analyzer::Logging::failure_data_max_size`.
failure_data: string &log &optional;
};
## Enable logging of analyzer violations and optionally confirmations
## when :zeek:see:`Analyzer::Logging::include_confirmations` is set.
option enable = T;
## Enable analyzer_confirmation. They are usually less interesting
## outside of development of analyzers or troubleshooting scenarios.
## Setting this option may also generated multiple log entries per
## connection, minimally one for each conn.log entry with a populated
## service field.
option include_confirmations = F;
## Enable tracking of analyzers getting disabled. This is mostly
## interesting for troubleshooting of analyzers in DPD scenarios.
## Setting this option may also generated multiple log entries per
## connection.
option include_disabling = F;
## If a violation contains information about the data causing it,
## include at most this many bytes of it in the log.
option failure_data_max_size = 40;
## An event that can be handled to access the :zeek:type:`Analyzer::Logging::Info`
## record as it is sent on to the logging framework.
global log_analyzer: event(rec: Info);
## A default logging policy hook for the stream.
global log_policy: Log::PolicyHook;
## Set of analyzers for which to not log confirmations or violations.
option ignore_analyzers: set[AllAnalyzers::Tag] = set();
}
event zeek_init() &priority=5
{
Log::create_stream(LOG, Log::Stream($columns=Info, $path="analyzer", $ev=log_analyzer, $policy=log_policy));
Log::create_stream(LOG, [$columns=Info, $path="analyzer", $policy=log_policy,
$event_groups=set("Analyzer::Logging")]);
local enable_handler = function(id: string, new_value: bool): bool {
if ( new_value )
Log::enable_stream(LOG);
else
Log::disable_stream(LOG);
return new_value;
};
Option::set_change_handler("Analyzer::Logging::enable", enable_handler);
local include_confirmations_handler = function(id: string, new_value: bool): bool {
if ( new_value )
enable_event_group("Analyzer::Logging::include_confirmations");
else
disable_event_group("Analyzer::Logging::include_confirmations");
return new_value;
};
Option::set_change_handler("Analyzer::Logging::include_confirmations",
include_confirmations_handler);
local include_disabling_handler = function(id: string, new_value: bool): bool {
if ( new_value )
enable_event_group("Analyzer::Logging::include_disabling");
else
disable_event_group("Analyzer::Logging::include_disabling");
return new_value;
};
Option::set_change_handler("Analyzer::Logging::include_disabling",
include_disabling_handler);
# Call the handlers directly with the current values to avoid config
# framework interactions like creating entries in config.log.
enable_handler("Analyzer::Logging::enable", Analyzer::Logging::enable);
include_confirmations_handler("Analyzer::Logging::include_confirmations",
Analyzer::Logging::include_confirmations);
include_disabling_handler("Analyzer::Logging::include_disabling",
Analyzer::Logging::include_disabling);
}
function log_analyzer_failure(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo)
function analyzer_kind(atype: AllAnalyzers::Tag): string
{
if ( is_protocol_analyzer(atype) )
return "protocol";
else if ( is_packet_analyzer(atype) )
return "packet";
else if ( is_file_analyzer(atype) )
return "file";
Reporter::warning(fmt("Unknown kind of analyzer %s", atype));
return "unknown";
}
function populate_from_conn(rec: Info, c: connection)
{
rec$id = c$id;
rec$uid = c$uid;
}
function populate_from_file(rec: Info, f: fa_file)
{
rec$fuid = f$id;
# If the confirmation didn't have a connection, but the
# fa_file object has exactly one, use it.
if ( ! rec?$uid && f?$conns && |f$conns| == 1 )
{
for ( _, c in f$conns )
{
rec$id = c$id;
rec$uid = c$uid;
}
}
}
event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirmationInfo) &group="Analyzer::Logging::include_confirmations"
{
if ( atype in ignore_analyzers )
return;
local rec = Info(
$ts=ts,
$analyzer_kind=Analyzer::kind(atype),
$ts=network_time(),
$cause="confirmation",
$analyzer_kind=analyzer_kind(atype),
$analyzer_name=Analyzer::name(atype),
$failure_reason=info$reason
);
if ( info?$c )
{
rec$id = info$c$id;
rec$uid = info$c$uid;
rec$proto = get_port_transport_proto(info$c$id$orig_p);
}
populate_from_conn(rec, info$c);
if ( info?$f )
{
rec$fuid = info$f$id;
# If the confirmation didn't have a connection, but the
# fa_file object has exactly one, use it.
if ( ! rec?$uid && info$f?$conns && |info$f$conns| == 1 )
{
for ( _, c in info$f$conns )
{
rec$id = c$id;
rec$uid = c$uid;
}
}
}
populate_from_file(rec, info$f);
Log::write(LOG, rec);
}
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=6
{
if ( atype in ignore_analyzers )
return;
local rec = Info(
$ts=network_time(),
$cause="violation",
$analyzer_kind=analyzer_kind(atype),
$analyzer_name=Analyzer::name(atype),
$failure_reason=info$reason,
);
if ( info?$c )
populate_from_conn(rec, info$c);
if ( info?$f )
populate_from_file(rec, info$f);
if ( info?$data )
{
@ -93,31 +200,19 @@ function log_analyzer_failure(ts: time, atype: AllAnalyzers::Tag, info: Analyzer
Log::write(LOG, rec);
}
# event currently is only raised for protocol analyzers; we do not fail packet and file analyzers
event analyzer_failed(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo)
hook Analyzer::disabling_analyzer(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=-1000 &group="Analyzer::Logging::include_disabling"
{
if ( ! is_protocol_analyzer(atype) )
if ( atype in ignore_analyzers )
return;
if ( ! info?$c )
return;
local rec = Info(
$ts=network_time(),
$cause="disabled",
$analyzer_kind=analyzer_kind(atype),
$analyzer_name=Analyzer::name(atype),
);
# log only for previously confirmed service that did not already log violation
# note that analyzers can fail repeatedly in some circumstances - e.g. when they
# are re-attached by the dynamic protocol detection due to later data.
local analyzer_name = Analyzer::name(atype);
if ( analyzer_name !in info$c$service || analyzer_name in info$c$failed_analyzers )
return;
populate_from_conn(rec, c);
log_analyzer_failure(ts, atype, info);
Log::write(LOG, rec);
}
# log packet and file analyzers here separately
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo )
{
if ( is_protocol_analyzer(atype) )
return;
log_analyzer_failure(network_time(), atype, info);
}

View file

@ -88,15 +88,6 @@ export {
## Returns: The analyzer name corresponding to the tag.
global name: function(tag: Analyzer::Tag) : string;
## Translates an analyzer type to a string with the analyzer's type.
##
## Possible values are "protocol", "packet", "file", or "unknown".
##
## tag: The analyzer tag.
##
## Returns: The analyzer kind corresponding to the tag.
global kind: function(tag: Analyzer::Tag) : string;
## Check whether the given analyzer name exists.
##
## This can be used before calling :zeek:see:`Analyzer::get_tag` to
@ -172,23 +163,6 @@ export {
##
## This set can be added to via :zeek:see:`redef`.
global requested_analyzers: set[AllAnalyzers::Tag] = {} &redef;
## Event that is raised when an analyzer raised a service violation and was
## removed.
##
## The event is also raised if the analyzer already was no longer active by
## the time that the violation was handled - so if it happens at the very
## end of a connection.
##
## Currently this event is only raised for protocol analyzers, as packet
## and file analyzers are never actively removed/disabled.
##
## ts: time at which the violation occurred
##
## atype: atype: The analyzer tag, such as ``Analyzer::ANALYZER_HTTP``.
##
##info: Details about the violation. This record should include a :zeek:type:`connection`
global analyzer_failed: event(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo);
}
@load base/bif/analyzer.bif
@ -272,19 +246,6 @@ function name(atype: AllAnalyzers::Tag) : string
return __name(atype);
}
function kind(atype: AllAnalyzers::Tag): string
{
if ( is_protocol_analyzer(atype) )
return "protocol";
else if ( is_packet_analyzer(atype) )
return "packet";
else if ( is_file_analyzer(atype) )
return "file";
Reporter::warning(fmt("Unknown kind of analyzer %s", atype));
return "unknown";
}
function has_tag(name: string): bool
{
return __has_tag(name);

View file

@ -47,17 +47,17 @@ export {
event zeek_init() &priority=5
{
Log::create_stream(Broker::LOG, Log::Stream($columns=Info, $path="broker", $policy=log_policy));
Log::create_stream(Broker::LOG, [$columns=Info, $path="broker", $policy=log_policy]);
}
function log_status(ev: string, endpoint: EndpointInfo, msg: string)
{
local r: Info;
r = Broker::Info($ts = network_time(),
$ev = ev,
$ty = STATUS,
$message = msg);
r = [$ts = network_time(),
$ev = ev,
$ty = STATUS,
$message = msg];
if ( endpoint?$network )
r$peer = endpoint$network;
@ -87,10 +87,10 @@ event Broker::error(code: ErrorCode, msg: string)
ev = subst_string(ev, "_", "-");
ev = to_lower(ev);
Log::write(Broker::LOG, Info($ts = network_time(),
Log::write(Broker::LOG, [$ts = network_time(),
$ev = ev,
$ty = ERROR,
$message = msg));
$message = msg]);
Reporter::error(fmt("Broker error (%s): %s", code, msg));
}
@ -115,8 +115,8 @@ event Broker::internal_log_event(lvl: LogSeverityLevel, id: string, description:
severity = Broker::DEBUG_EVENT;
break;
}
Log::write(Broker::LOG, Info($ts = network_time(),
Log::write(Broker::LOG, [$ts = network_time(),
$ty = severity,
$ev = id,
$message = description));
$message = description]);
}

View file

@ -28,7 +28,7 @@ export {
## Default address on which to listen for WebSocket connections.
##
## .. zeek:see:: Cluster::listen_websocket
## .. zeek:see:: Broker::listen_websocket
const default_listen_address_websocket = getenv("ZEEK_DEFAULT_LISTEN_ADDRESS") &redef;
## Default interval to retry connecting to a peer if it cannot be made to
@ -69,6 +69,11 @@ export {
## all peers.
const ssl_keyfile = "" &redef;
## The number of buffered messages at the Broker/CAF layer after which
## a subscriber considers themselves congested (i.e. tune the congestion
## control mechanisms).
const congestion_queue_size = 200 &redef &deprecated="Remove in v8.1. Non-functional since v5.0";
## The max number of log entries per log stream to batch together when
## sending log messages to a remote logger.
const log_batch_size = 400 &redef;
@ -314,6 +319,27 @@ export {
p: port &default = default_port,
retry: interval &default = default_listen_retry): port;
## Listen for remote connections using WebSocket.
##
## a: an address string on which to accept connections, e.g.
## "127.0.0.1". An empty string refers to INADDR_ANY.
##
## p: the TCP port to listen on. The value 0 means that the OS should choose
## the next available free port.
##
## retry: If non-zero, retries listening in regular intervals if the port cannot be
## acquired immediately. 0 disables retries. If the
## ZEEK_DEFAULT_LISTEN_RETRY environment variable is set (as number
## of seconds), it overrides any value given here.
##
## Returns: the bound port or 0/? on failure.
##
## .. zeek:see:: Broker::status
global listen_websocket: function(a: string &default = default_listen_address_websocket,
p: port &default = default_port_websocket,
retry: interval &default = default_listen_retry): port
&deprecated="Remove in v8.1. Switch to Cluster::listen_websocket() instead.";
## Initiate a remote connection.
##
## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
@ -424,6 +450,29 @@ export {
##
## Returns: true if a new event forwarding/subscription is now registered.
global forward: function(topic_prefix: string): bool;
## Automatically send an event to any interested peers whenever it is
## locally dispatched. (For example, using "event my_event(...);" in a
## script.)
##
## topic: a topic string associated with the event message.
## Peers advertise interest by registering a subscription to some
## prefix of this topic name.
##
## ev: a Zeek event value.
##
## Returns: true if automatic event sending is now enabled.
global auto_publish: function(topic: string, ev: any): bool &deprecated="Remove in v8.1. Switch to explicit Cluster::publish() calls. Auto-publish won't work with all cluster backends.";
## Stop automatically sending an event to peers upon local dispatch.
##
## topic: a topic originally given to :zeek:see:`Broker::auto_publish`.
##
## ev: an event originally given to :zeek:see:`Broker::auto_publish`.
##
## Returns: true if automatic events will not occur for the topic/event
## pair.
global auto_unpublish: function(topic: string, ev: any): bool &deprecated="Remove in v8.1. See Broker::auto_publish()";
}
@load base/bif/comm.bif
@ -465,6 +514,31 @@ function listen(a: string, p: port, retry: interval): port
return bound;
}
event retry_listen_websocket(a: string, p: port, retry: interval)
{
@pragma push ignore-deprecations
listen_websocket(a, p, retry);
@pragma pop ignore-deprecations
}
function listen_websocket(a: string, p: port, retry: interval): port
{
local bound = __listen(a, p, Broker::WEBSOCKET);
if ( bound == 0/tcp )
{
local e = getenv("ZEEK_DEFAULT_LISTEN_RETRY");
if ( e != "" )
retry = double_to_interval(to_double(e));
if ( retry != 0secs )
schedule retry { retry_listen_websocket(a, p, retry) };
}
return bound;
}
function peer(a: string, p: port, retry: interval): bool
{
return __peer(a, p, retry);
@ -519,3 +593,13 @@ function unsubscribe(topic_prefix: string): bool
{
return __unsubscribe(topic_prefix);
}
function auto_publish(topic: string, ev: any): bool
{
return __auto_publish(topic, ev);
}
function auto_unpublish(topic: string, ev: any): bool
{
return __auto_unpublish(topic, ev);
}

View file

@ -1,7 +1,6 @@
# Load the core cluster support.
@load ./main
@load ./pools
@load ./telemetry
@if ( Cluster::is_enabled() )

View file

@ -5,13 +5,13 @@
module Cluster;
global broker_backpressure_disconnects_cf = Telemetry::register_counter_family(Telemetry::MetricOpts(
global broker_backpressure_disconnects_cf = Telemetry::register_counter_family([
$prefix="zeek",
$name="broker-backpressure-disconnects",
$unit="",
$label_names=vector("peer"),
$help_text="Number of Broker peerings dropped due to a neighbor falling behind in message I/O",
));
]);
event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string)
{

View file

@ -7,13 +7,13 @@ module Cluster;
## This gauge tracks the current number of locally queued messages in each
## Broker peering's send buffer. The "peer" label identifies the remote side of
## the peering, containing a Zeek cluster node name.
global broker_peer_buffer_messages_gf = Telemetry::register_gauge_family(Telemetry::MetricOpts(
global broker_peer_buffer_messages_gf = Telemetry::register_gauge_family([
$prefix="zeek",
$name="broker-peer-buffer-messages",
$unit="",
$label_names=vector("peer"),
$help_text="Number of messages queued in Broker's send buffers",
));
]);
## This gauge tracks recent maximum queue lengths for each Broker peering's send
## buffer. Most of the time the send buffers are nearly empty, so this gauge
@ -23,82 +23,47 @@ global broker_peer_buffer_messages_gf = Telemetry::register_gauge_family(Telemet
## observed message. That is, Zeek keeps a timestamp of when the window started,
## and once it notices that the interval has passed, it moves the start of the
## window to current time.
global broker_peer_buffer_recent_max_messages_gf = Telemetry::register_gauge_family(Telemetry::MetricOpts(
global broker_peer_buffer_recent_max_messages_gf = Telemetry::register_gauge_family([
$prefix="zeek",
$name="broker-peer-buffer-recent-max-messages",
$unit="",
$label_names=vector("peer"),
$help_text="Maximum number of messages recently queued in Broker's send buffers",
));
]);
## This counter tracks for each Broker peering the number of times its send
## buffer has overflowed. For the "disconnect" policy this can at most be 1,
## since Broker stops the peering at this time. For the "drop_oldest" and
## "drop_newest" policies (see :zeek:see:`Broker:peer_overflow_policy`) the count
## instead reflects the number of messages lost.
global broker_peer_buffer_overflows_cf = Telemetry::register_counter_family(Telemetry::MetricOpts(
global broker_peer_buffer_overflows_cf = Telemetry::register_counter_family([
$prefix="zeek",
$name="broker-peer-buffer-overflows",
$unit="",
$label_names=vector("peer"),
$help_text="Number of overflows in Broker's send buffers",
));
# A helper to track overflow counts over past peerings as well as the current
# one. The peer_id field allows us to identify when the counter has reset: a
# Broker ID different from the one on file means it's a new peering.
type EpochData: record {
peer_id: string;
num_overflows: count &default=0;
num_past_overflows: count &default=0;
};
# This maps from a cluster node name to its EpochData.
global peering_epoch_data: table[string] of EpochData;
]);
hook Telemetry::sync()
{
local peers = Broker::peering_stats();
local nn: NamedNode;
local labels: vector of string;
local ed: EpochData;
for ( peer_id, stats in peers )
for ( peer, stats in peers )
{
# Translate the Broker IDs to Zeek-level node names. We skip
# telemetry for peers where this mapping fails, i.e. ones for
# connections to external systems.
nn = nodeid_to_node(peer_id);
nn = nodeid_to_node(peer);
if ( |nn$name| == 0 )
next;
labels = vector(nn$name);
Telemetry::gauge_family_set(broker_peer_buffer_messages_gf,
labels, stats$num_queued);
Telemetry::gauge_family_set(broker_peer_buffer_recent_max_messages_gf,
labels, stats$max_queued_recently);
if ( nn$name !in peering_epoch_data )
peering_epoch_data[nn$name] = EpochData($peer_id=peer_id);
ed = peering_epoch_data[nn$name];
if ( peer_id != ed$peer_id )
if ( |nn$name| > 0 )
{
# A new peering. Ensure that we account for overflows in
# past ones. There is a risk here that we might have
# missed a peering altogether if we scrape infrequently,
# but re-peering should be a rare event.
ed$peer_id = peer_id;
ed$num_past_overflows += ed$num_overflows;
Telemetry::gauge_family_set(broker_peer_buffer_messages_gf,
vector(nn$name), stats$num_queued);
Telemetry::gauge_family_set(broker_peer_buffer_recent_max_messages_gf,
vector(nn$name), stats$max_queued_recently);
Telemetry::counter_family_set(broker_peer_buffer_overflows_cf,
vector(nn$name), stats$num_overflows);
}
ed$num_overflows = stats$num_overflows;
Telemetry::counter_family_set(broker_peer_buffer_overflows_cf,
labels, ed$num_past_overflows + ed$num_overflows);
}
}

View file

@ -85,9 +85,6 @@ export {
## is incremented when the maximum queue size is reached.
const default_websocket_max_event_queue_size = 32 &redef;
## The default ping interval for WebSocket clients.
const default_websocket_ping_interval = 5 sec &redef;
## Setting a default dir will, for persistent backends that have not
## been given an explicit file path via :zeek:see:`Cluster::stores`,
## automatically create a path within this dir that is based on the name of
@ -362,16 +359,12 @@ export {
## WebSocket server options to pass to :zeek:see:`Cluster::listen_websocket`.
type WebSocketServerOptions: record {
## The address to listen on, cannot be used together with ``listen_host``.
listen_addr: addr &optional;
## The host address to listen on.
listen_host: string;
## The port the WebSocket server is supposed to listen on.
listen_port: port;
## The maximum event queue size for this server.
max_event_queue_size: count &default=default_websocket_max_event_queue_size;
## Ping interval to use. A WebSocket client not responding to
## the pings will be disconnected. Set to a negative value to
## disable pings. Subsecond intervals are currently not supported.
ping_interval: interval &default=default_websocket_ping_interval;
## The TLS options used for this WebSocket server. By default,
## TLS is disabled. See also :zeek:see:`Cluster::WebSocketTLSOptions`.
tls_options: WebSocketTLSOptions &default=WebSocketTLSOptions();
@ -396,23 +389,7 @@ export {
type EndpointInfo: record {
id: string;
network: NetworkInfo;
## The value of the X-Application-Name HTTP header, if any.
application_name: string &optional;
};
## A hook invoked for every :zeek:see:`Cluster::subscribe` call.
##
## Breaking from this hook has no effect.
##
## topic: The topic string as given to :zeek:see:`Cluster::subscribe`.
global on_subscribe: hook(topic: string);
## A hook invoked for every :zeek:see:`Cluster::subscribe` call.
##
## Breaking from this hook has no effect.
##
## topic: The topic string as given to :zeek:see:`Cluster::subscribe`.
global on_unsubscribe: hook(topic: string);
}
# Needs declaration of Cluster::Event type.
@ -504,7 +481,7 @@ function nodeid_to_node(id: string): NamedNode
return NamedNode($name=name, $node=n);
}
return NamedNode($name="", $node=Node($node_type=NONE, $ip=0.0.0.0));
return NamedNode($name="", $node=[$node_type=NONE, $ip=0.0.0.0]);
}
event Cluster::hello(name: string, id: string) &priority=10
@ -584,7 +561,7 @@ event zeek_init() &priority=5
terminate();
}
Log::create_stream(Cluster::LOG, Log::Stream($columns=Info, $path="cluster", $policy=log_policy));
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster", $policy=log_policy]);
}
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
@ -666,7 +643,7 @@ function create_store(name: string, persistent: bool &default=F): Cluster::Store
function log(msg: string)
{
Log::write(Cluster::LOG, Info($ts = network_time(), $node = node, $message = msg));
Log::write(Cluster::LOG, [$ts = network_time(), $node = node, $message = msg]);
}
function init(): bool
@ -689,26 +666,17 @@ function listen_websocket(options: WebSocketServerOptions): bool
return Cluster::__listen_websocket(options);
}
function format_endpoint_info(ei: EndpointInfo): string
{
local s = fmt("'%s' (%s:%d)", ei$id, ei$network$address, ei$network$bound_port);
if ( ei?$application_name )
s += fmt(" application_name=%s", ei$application_name);
return s;
}
event websocket_client_added(endpoint: EndpointInfo, subscriptions: string_vec)
{
local msg = fmt("WebSocket client %s subscribed to %s",
format_endpoint_info(endpoint), subscriptions);
local msg = fmt("WebSocket client '%s' (%s:%d) subscribed to %s",
endpoint$id, endpoint$network$address, endpoint$network$bound_port, subscriptions);
Cluster::log(msg);
}
event websocket_client_lost(endpoint: EndpointInfo, code: count, reason: string)
event websocket_client_lost(endpoint: EndpointInfo)
{
local msg = fmt("WebSocket client %s gone with code %d%s",
format_endpoint_info(endpoint), code,
|reason| > 0 ? fmt(" and reason '%s'", reason) : "");
local msg = fmt("WebSocket client '%s' (%s:%d) gone",
endpoint$id, endpoint$network$address, endpoint$network$bound_port);
Cluster::log(msg);
}

View file

@ -42,7 +42,7 @@ function __init_cluster_nodes(): bool
if ( endp$role in rolemap )
typ = rolemap[endp$role];
cnode = Cluster::Node($node_type=typ, $ip=endp$host, $p=endp$p);
cnode = [$node_type=typ, $ip=endp$host, $p=endp$p];
if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER )
cnode$manager = manager_name;
if ( endp?$metrics_port )

View file

@ -1,39 +0,0 @@
## Module for cluster telemetry.
module Cluster::Telemetry;
export {
type Type: enum {
## Creates counter metrics for incoming and for outgoing
## events without labels.
INFO,
## Creates counter metrics for incoming and outgoing events
## labeled with handler and normalized topic names.
VERBOSE,
## Creates histogram metrics using the serialized message size
## for events, labeled by topic, handler and script location
## (outgoing only).
DEBUG,
};
## The telemetry types to enable for the core backend.
const core_metrics: set[Type] = {
INFO,
} &redef;
## The telemetry types to enable for WebSocket backends.
const websocket_metrics: set[Type] = {
INFO,
} &redef;
## Table used for normalizing topic names that contain random parts.
## Map to an empty string to skip recording a specific metric
## completely.
const topic_normalizations: table[pattern] of string = {
[/^zeek\/cluster\/nodeid\/.*/] = "zeek/cluster/nodeid/__normalized__",
} &ordered &redef;
## For the DEBUG metrics, the histogram buckets to use.
const message_size_bounds: vector of double = {
10.0, 50.0, 100.0, 500.0, 1000.0, 5000.0, 10000.0, 50000.0,
} &redef;
}

View file

@ -40,14 +40,14 @@ event zeek_init() &priority=5
return;
for ( fi in config_files )
Input::add_table(Input::TableDescription($reader=Input::READER_CONFIG,
Input::add_table([$reader=Input::READER_CONFIG,
$mode=Input::REREAD,
$source=fi,
$name=cat("config-", fi),
$idx=ConfigItem,
$val=ConfigItem,
$want_record=F,
$destination=current_config));
$destination=current_config]);
}
event InputConfig::new_value(name: string, source: string, id: string, value: any)
@ -67,11 +67,11 @@ function read_config(filename: string)
local iname = cat("config-oneshot-", filename);
Input::add_event(Input::EventDescription($reader=Input::READER_CONFIG,
Input::add_event([$reader=Input::READER_CONFIG,
$mode=Input::MANUAL,
$source=filename,
$name=iname,
$fields=EventFields,
$ev=config_line));
$ev=config_line]);
Input::remove(iname);
}

View file

@ -153,7 +153,7 @@ function config_option_changed(ID: string, new_value: any, location: string): an
event zeek_init() &priority=10
{
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_config, $path="config", $policy=log_policy));
Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config", $policy=log_policy]);
# Limit logging to the manager - everyone else just feeds off it.
@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )

View file

@ -341,7 +341,7 @@ global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: A
event zeek_init() &priority=5
{
Log::create_stream(Files::LOG, Log::Stream($columns=Info, $ev=log_files, $path="files", $policy=log_policy));
Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files, $path="files", $policy=log_policy]);
}
function set_info(f: fa_file)

View file

@ -105,30 +105,10 @@ event Intel::insert_indicator(item: Intel::Item) &priority=5
Intel::_insert(item, F);
}
function invoke_indicator_hook(store: MinDataStore, h: hook(v: string, t: Intel::Type))
{
for ( a in store$host_data )
hook h(cat(a), Intel::ADDR);
for ( sn in store$subnet_data)
hook h(cat(sn), Intel::SUBNET);
for ( [indicator_value, indicator_type] in store$string_data )
hook h(indicator_value, indicator_type);
}
# Handling of a complete MinDataStore snapshot
#
# Invoke the removed and inserted hooks using the old and new min data store
# instances, respectively. The way this event is used, the original
# min_data_store should essentially be empty.
event new_min_data_store(store: MinDataStore)
{
invoke_indicator_hook(min_data_store, Intel::indicator_removed);
min_data_store = store;
invoke_indicator_hook(min_data_store, Intel::indicator_inserted);
}
@endif

View file

@ -68,13 +68,13 @@ event zeek_init() &priority=5
if ( |path_prefix| > 0 && sub_bytes(a_file, 0, 1) != "/" )
source = cat(rstrip(path_prefix, "/"), "/", a_file);
Input::add_event(Input::EventDescription($source=source,
$reader=Input::READER_ASCII,
$mode=Input::REREAD,
$name=cat("intel-", a_file),
$fields=Intel::Item,
$ev=Intel::read_entry,
$error_ev=Intel::read_error));
Input::add_event([$source=source,
$reader=Input::READER_ASCII,
$mode=Input::REREAD,
$name=cat("intel-", a_file),
$fields=Intel::Item,
$ev=Intel::read_entry,
$error_ev=Intel::read_error]);
}
}
}

View file

@ -207,35 +207,6 @@ export {
## item: The intel item that should be inserted.
global filter_item: hook(item: Intel::Item);
## This hook is invoked when a new indicator has been inserted into
## the min data store for the first time.
##
## Calls to :zeek:see:`Intel::seen` with a matching indicator value
## and type will result in matches.
##
## Subsequent inserts of the same indicator type and value do not
## invoke this hook. Breaking from this hook has no effect.
##
## indicator: The indicator value.
##
## indicator_type: The indicator type.
##
## .. zeek::see:: Intel::indicator_removed
global indicator_inserted: hook(indicator: string, indiator_type: Type);
## This hook is invoked when an indicator has been removed from
## the min data store.
##
## After this hooks runs, :zeek:see:`Intel::seen` for the indicator
## will not return any matches. Breaking from this hook has no effect.
##
## indicator: The indicator value.
##
## indicator_type: The indicator type.
##
## .. zeek::see:: Intel::indicator_inserted
global indicator_removed: hook(indicator: string, indiator_type: Type);
global log_intel: event(rec: Info);
}
@ -280,7 +251,7 @@ global min_data_store: MinDataStore &redef;
event zeek_init() &priority=5
{
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_intel, $path="intel", $policy=log_policy));
Log::create_stream(LOG, [$columns=Info, $ev=log_intel, $path="intel", $policy=log_policy]);
}
# Function that abstracts expiration of different types.
@ -289,7 +260,7 @@ function expire_item(indicator: string, indicator_type: Type, metas: set[MetaDat
if ( hook item_expired(indicator, indicator_type, metas) )
return item_expiration;
else
remove(Item($indicator=indicator, $indicator_type=indicator_type, $meta=MetaData($source="")), T);
remove([$indicator=indicator, $indicator_type=indicator_type, $meta=[$source=""]], T);
return 0 sec;
}
@ -536,44 +507,18 @@ function _insert(item: Item, first_dispatch: bool &default = T)
# All intelligence is case insensitive at the moment.
local lower_indicator = to_lower(item$indicator);
# Track if the indicator was inserted into the min_data_store.
# It's tempting to just use is_new above, but it seems that only works
# correctly on a worker if the manager never spuriously sends a
# Intel::insert_item(), so better to determine this locally based
# on the actual contents of the min_data_store.
local inserted = F;
local inserted_value = "";
# Insert indicator into MinDataStore (might exist already).
switch ( item$indicator_type )
{
case ADDR:
local host = to_addr(item$indicator);
if ( host !in min_data_store$host_data )
{
inserted = T;
inserted_value = cat(host);
}
add min_data_store$host_data[host];
break;
case SUBNET:
local net = to_subnet(item$indicator);
if ( net !in min_data_store$subnet_data )
{
inserted = T;
inserted_value = cat(net);
}
add min_data_store$subnet_data[net];
break;
default:
if ( [lower_indicator, item$indicator_type] !in min_data_store$string_data )
{
inserted = T;
inserted_value = lower_indicator;
}
add min_data_store$string_data[lower_indicator, item$indicator_type];
break;
}
@ -588,9 +533,6 @@ function _insert(item: Item, first_dispatch: bool &default = T)
# Announce a (possibly) new item if this is the first dispatch and
# we know it is new or have to assume that on a worker.
event Intel::new_item(item);
if ( inserted )
hook Intel::indicator_inserted(inserted_value, item$indicator_type);
}
function insert(item: Item)
@ -690,43 +632,18 @@ function remove(item: Item, purge_indicator: bool)
# Handling of indicator removal in minimal data stores.
event remove_indicator(item: Item)
{
local removed = F;
local removed_value = "";
switch ( item$indicator_type )
{
case ADDR:
local host = to_addr(item$indicator);
if ( host in min_data_store$host_data )
{
removed = T;
removed_value = cat(host);
}
delete min_data_store$host_data[host];
break;
case SUBNET:
local net = to_subnet(item$indicator);
if ( net in min_data_store$subnet_data )
{
removed = T;
removed_value = cat(net);
}
delete min_data_store$subnet_data[net];
break;
default:
local indicator_value = to_lower(item$indicator);
if ( [indicator_value, item$indicator_type] in min_data_store$string_data )
{
removed = T;
removed_value = indicator_value;
}
delete min_data_store$string_data[indicator_value, item$indicator_type];
delete min_data_store$string_data[to_lower(item$indicator), item$indicator_type];
break;
}
if ( removed )
hook Intel::indicator_removed(removed_value, item$indicator_type);
}

View file

@ -422,30 +422,10 @@ export {
## .. :zeek:see:`Log::default_max_delay_queue_size`
## .. :zeek:see:`Log::set_max_delay_queue_size`
max_delay_queue_size: count &default=default_max_delay_queue_size;
## Maximum string size for field in a log record from this stream.
##
## .. :zeek:see:`Log::default_max_field_string_bytes`
max_field_string_bytes: count &default=Log::default_max_field_string_bytes;
## Maximum total string size in a log record from this stream.
##
## .. :zeek:see:`Log::default_max_total_string_bytes`
max_total_string_bytes: count &default=Log::default_max_total_string_bytes;
## Maximum container elements for field in a log record from this stream.
##
## .. :zeek:see:`Log::default_max_field_container_elements`
max_field_container_elements: count &default=Log::default_max_field_container_elements;
## Maximum total container elements in a log record from this stream.
##
## .. :zeek:see:`Log::default_max_total_container_elements`
max_total_container_elements: count &default=Log::default_max_total_container_elements;
};
## Sentinel value for indicating that a filter was not found when looked up.
const no_filter = Filter($name="<not found>");
const no_filter: Filter = [$name="<not found>"];
## Creates a new logging stream with the default filter.
##
@ -1017,7 +997,7 @@ function flush(id: ID): bool
function add_default_filter(id: ID) : bool
{
return add_filter(id, Filter($name="default"));
return add_filter(id, [$name="default"]);
}
function remove_default_filter(id: ID) : bool
@ -1028,7 +1008,7 @@ function remove_default_filter(id: ID) : bool
event zeek_init() &priority=5
{
if ( print_to_log != REDIRECT_NONE )
Log::create_stream(PRINTLOG, Log::Stream($columns=PrintLogInfo, $ev=log_print, $path=print_log_path));
Log::create_stream(PRINTLOG, [$columns=PrintLogInfo, $ev=log_print, $path=print_log_path]);
}
function empty_post_delay_cb(rec: any, id: ID): bool {

View file

@ -7,9 +7,9 @@
##! names is printed out as meta information, with no "# fields" prepended; no
##! other meta data gets included in that mode. Example filter using this::
##!
##! local f = Log::Filter($name = "my-filter",
##! $writer = Log::WRITER_ASCII,
##! $config = table(["tsv"] = "T"));
##! local f: Log::Filter = [$name = "my-filter",
##! $writer = Log::WRITER_ASCII,
##! $config = table(["tsv"] = "T")];
##!
module LogAscii;

View file

@ -59,13 +59,13 @@ export {
event zeek_init() &priority=5
{
Log::create_stream(NetControl::DROP_LOG, Log::Stream($columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop", $policy=log_policy_drop));
Log::create_stream(NetControl::DROP_LOG, [$columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop", $policy=log_policy_drop]);
}
function drop_connection(c: conn_id, t: interval, location: string &default="") : string
{
local e = Entity($ty=CONNECTION, $conn=c);
local r = Rule($ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location);
local e: Entity = [$ty=CONNECTION, $conn=c];
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
if ( ! hook NetControl::drop_rule_policy(r) )
return "";
@ -88,8 +88,8 @@ function drop_connection(c: conn_id, t: interval, location: string &default="")
function drop_address(a: addr, t: interval, location: string &default="") : string
{
local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a));
local r = Rule($ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location);
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
if ( ! hook NetControl::drop_rule_policy(r) )
return "";

View file

@ -383,7 +383,7 @@ global rule_entities: table[Entity, RuleType] of Rule;
event zeek_init() &priority=5
{
Log::create_stream(NetControl::LOG, Log::Stream($columns=Info, $ev=log_netcontrol, $path="netcontrol", $policy=log_policy));
Log::create_stream(NetControl::LOG, [$columns=Info, $ev=log_netcontrol, $path="netcontrol", $policy=log_policy]);
}
function entity_to_info(info: Info, e: Entity)
@ -489,22 +489,22 @@ function rule_to_info(info: Info, r: Rule)
function log_msg(msg: string, p: PluginState)
{
Log::write(LOG, Info($ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)));
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)]);
}
function log_error(msg: string, p: PluginState)
{
Log::write(LOG, Info($ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)));
Log::write(LOG, [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]);
}
function log_msg_no_plugin(msg: string)
{
Log::write(LOG, Info($ts=network_time(), $category=MESSAGE, $msg=msg));
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg]);
}
function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: string &default="")
{
local info = Info($ts=network_time());
local info: Info = [$ts=network_time()];
info$category = RULE;
info$cmd = cmd;
info$state = state;
@ -519,14 +519,14 @@ function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: s
function log_rule_error(r: Rule, msg: string, p: PluginState)
{
local info = Info($ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p));
local info: Info = [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)];
rule_to_info(info, r);
Log::write(LOG, info);
}
function log_rule_no_plugin(r: Rule, state: InfoState, msg: string)
{
local info = Info($ts=network_time());
local info: Info = [$ts=network_time()];
info$category = RULE;
info$state = state;
info$msg = msg;
@ -538,16 +538,16 @@ function log_rule_no_plugin(r: Rule, state: InfoState, msg: string)
function whitelist_address(a: addr, t: interval, location: string &default="") : string
{
local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a));
local r = Rule($ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location);
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
return add_rule(r);
}
function whitelist_subnet(s: subnet, t: interval, location: string &default="") : string
{
local e = Entity($ty=ADDRESS, $ip=s);
local r = Rule($ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location);
local e: Entity = [$ty=ADDRESS, $ip=s];
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
return add_rule(r);
}
@ -561,8 +561,8 @@ function redirect_flow(f: flow_id, out_port: count, t: interval, location: strin
$dst_h=addr_to_subnet(f$dst_h),
$dst_p=f$dst_p
);
local e = Entity($ty=FLOW, $flow=flow);
local r = Rule($ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port);
local e: Entity = [$ty=FLOW, $flow=flow];
local r: Rule = [$ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port];
return add_rule(r);
}
@ -570,19 +570,19 @@ function redirect_flow(f: flow_id, out_port: count, t: interval, location: strin
function quarantine_host(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string
{
local orules: vector of string = vector();
local edrop = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected)));
local rdrop = Rule($ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location);
local edrop: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected))];
local rdrop: Rule = [$ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location];
orules += add_rule(rdrop);
local todnse = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp));
local todnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp)];
local todnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=todnse, $expire=t, $location=location, $mod=FlowMod($dst_h=quarantine), $priority=+5);
orules += add_rule(todnsr);
local fromdnse = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected)));
local fromdnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected))];
local fromdnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=fromdnse, $expire=t, $location=location, $mod=FlowMod($src_h=dns), $priority=+5);
orules += add_rule(fromdnsr);
local wle = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp));
local wle: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp)];
local wlr = Rule($ty=WHITELIST, $target=FORWARD, $entity=wle, $expire=t, $location=location, $priority=+5);
orules += add_rule(wlr);

View file

@ -303,7 +303,7 @@ function create_acld(config: AcldConfig) : PluginState
add netcontrol_acld_topics[config$acld_topic];
local host = cat(config$acld_host);
local p = PluginState($acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id);
local p: PluginState = [$acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id];
if ( [config$acld_port, host] in netcontrol_acld_peers )
Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, config$acld_port));

View file

@ -117,7 +117,7 @@ global debug_plugin = Plugin(
function create_debug(do_something: bool, name: string) : PluginState
{
local p = PluginState($plugin=debug_plugin);
local p: PluginState = [$plugin=debug_plugin];
# FIXME: Why's the default not working?
p$config = table();
@ -132,7 +132,7 @@ function create_debug(do_something: bool, name: string) : PluginState
function create_debug_error(name: string) : PluginState
{
local p = copy(PluginState($plugin=debug_plugin));
local p: PluginState = copy([$plugin=debug_plugin]);
p$config["name"] = name;
p$config["all"] = "1";
p$plugin$add_rule = debug_add_rule_error;
@ -141,7 +141,7 @@ function create_debug_error(name: string) : PluginState
function create_debug_exists(name: string) : PluginState
{
local p = copy(PluginState($plugin=debug_plugin));
local p: PluginState = copy([$plugin=debug_plugin]);
p$config["name"] = name;
p$config["all"] = "1";
p$plugin$add_rule = debug_add_rule_exists;

View file

@ -447,7 +447,7 @@ global openflow_plugin = Plugin(
function create_openflow(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState
{
local p = PluginState($plugin=openflow_plugin, $of_controller=controller, $of_config=config);
local p: PluginState = [$plugin=openflow_plugin, $of_controller=controller, $of_config=config];
return p;
}

View file

@ -106,7 +106,7 @@ global packetfilter_plugin = Plugin(
function create_packetfilter() : PluginState
{
local p = PluginState($plugin=packetfilter_plugin);
local p: PluginState = [$plugin=packetfilter_plugin];
return p;
}

View file

@ -40,7 +40,7 @@ export {
event zeek_init() &priority=5
{
Log::create_stream(NetControl::SHUNT, Log::Stream($columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt", $policy=log_policy_shunt));
Log::create_stream(NetControl::SHUNT, [$columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt", $policy=log_policy_shunt]);
}
function shunt_flow(f: flow_id, t: interval, location: string &default="") : string
@ -51,8 +51,8 @@ function shunt_flow(f: flow_id, t: interval, location: string &default="") : str
$dst_h=addr_to_subnet(f$dst_h),
$dst_p=f$dst_p
);
local e = Entity($ty=FLOW, $flow=flow);
local r = Rule($ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location);
local e: Entity = [$ty=FLOW, $flow=flow];
local r: Rule = [$ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location];
local id = add_rule(r);

View file

@ -102,9 +102,9 @@ event zeek_init()
# This replaces the standard non-pretty-printing filter.
Log::add_filter(Notice::ALARM_LOG,
Log::Filter($name="alarm-mail", $writer=Log::WRITER_NONE,
$interv=Log::default_mail_alarms_interval,
$postprocessor=pp_postprocessor));
[$name="alarm-mail", $writer=Log::WRITER_NONE,
$interv=Log::default_mail_alarms_interval,
$postprocessor=pp_postprocessor]);
}
hook notice(n: Notice::Info) &priority=-5

View file

@ -381,16 +381,16 @@ function log_mailing_postprocessor(info: Log::RotationInfo): bool
event zeek_init() &priority=5
{
Log::create_stream(Notice::LOG, Log::Stream($columns=Info, $ev=log_notice, $path="notice", $policy=log_policy));
Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice, $path="notice", $policy=log_policy]);
Log::create_stream(Notice::ALARM_LOG, Log::Stream($columns=Notice::Info, $path="notice_alarm", $policy=log_policy_alarm));
Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info, $path="notice_alarm", $policy=log_policy_alarm]);
# If Zeek is configured for mailing notices, set up mailing for alarms.
# Make sure that this alarm log is also output as text so that it can
# be packaged up and emailed later.
if ( ! reading_traces() && mail_dest != "" )
Log::add_filter(Notice::ALARM_LOG,
Log::Filter($name="alarm-mail", $path="alarm-mail", $writer=Log::WRITER_ASCII,
$interv=24hrs, $postprocessor=log_mailing_postprocessor));
[$name="alarm-mail", $path="alarm-mail", $writer=Log::WRITER_ASCII,
$interv=24hrs, $postprocessor=log_mailing_postprocessor]);
}
function email_headers(subject_desc: string, dest: string): string

View file

@ -52,7 +52,7 @@ export {
## The peer that originated this weird. This is helpful in
## cluster deployments if a particular cluster node is having
## trouble to help identify which node is having trouble.
peer: string &log &default=peer_description;
peer: string &log &optional &default=peer_description;
## The source of the weird. When reported by an analyzer, this
## should be the name of the analyzer.
@ -318,7 +318,7 @@ const notice_actions = {
event zeek_init() &priority=5
{
Log::create_stream(Weird::LOG, Log::Stream($columns=Info, $ev=log_weird, $path="weird", $policy=log_policy));
Log::create_stream(Weird::LOG, [$columns=Info, $ev=log_weird, $path="weird", $policy=log_policy]);
}
function flow_id_string(src: addr, dst: addr): string

View file

@ -50,12 +50,12 @@ export {
event zeek_init() &priority=5
{
Log::create_stream(OpenFlow::LOG, Log::Stream($columns=Info, $ev=log_openflow, $path="openflow", $policy=log_policy));
Log::create_stream(OpenFlow::LOG, [$columns=Info, $ev=log_openflow, $path="openflow", $policy=log_policy]);
}
function log_flow_mod(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
{
Log::write(LOG, Info($ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod));
Log::write(OpenFlow::LOG, [$ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod]);
if ( state$log_success_event )
event OpenFlow::flow_mod_success(state$_name, match, flow_mod);

View file

@ -175,7 +175,7 @@ event filter_change_tracking()
event zeek_init() &priority=5
{
Log::create_stream(PacketFilter::LOG, Log::Stream($columns=Info, $path="packet_filter", $policy=log_policy));
Log::create_stream(PacketFilter::LOG, [$columns=Info, $path="packet_filter", $policy=log_policy]);
# Preverify the capture and restrict filters to give more granular failure messages.
for ( id, cf in capture_filters )
@ -303,9 +303,9 @@ function install(): bool
local error_string : string;
if ( state == Pcap::fatal )
{
NOTICE(Notice::Info($note=Compile_Failure,
$msg=fmt("Compiling packet filter failed"),
$sub=tmp_filter));
NOTICE([$note=Compile_Failure,
$msg=fmt("Compiling packet filter failed"),
$sub=tmp_filter]);
error_string = fmt("Bad pcap filter '%s': %s", tmp_filter,
Pcap::get_filter_state_string(DefaultPcapFilter));
@ -326,8 +326,8 @@ function install(): bool
}
local diff = current_time()-ts;
if ( diff > max_filter_compile_time )
NOTICE(Notice::Info($note=Too_Long_To_Compile_Filter,
$msg=fmt("A BPF filter is taking longer than %0.1f seconds to compile", diff)));
NOTICE([$note=Too_Long_To_Compile_Filter,
$msg=fmt("A BPF filter is taking longer than %0.1f seconds to compile", diff)]);
# Set it to the current filter if it passed precompiling
current_filter = tmp_filter;
@ -350,9 +350,9 @@ function install(): bool
info$success = F;
info$failure_reason = Pcap::get_filter_state_string(DefaultPcapFilter);
NOTICE(Notice::Info($note=Install_Failure,
$msg=fmt("Installing packet filter failed"),
$sub=current_filter));
NOTICE([$note=Install_Failure,
$msg=fmt("Installing packet filter failed"),
$sub=current_filter]);
}
if ( reading_live_traffic() || reading_traces() )

View file

@ -24,10 +24,10 @@ event net_stats_update(last_stat: NetStats)
{
local new_recvd = ns$pkts_recvd - last_stat$pkts_recvd;
local new_link = ns$pkts_link - last_stat$pkts_link;
NOTICE(Notice::Info($note=Dropped_Packets,
$msg=fmt("%d packets dropped after filtering, %d received%s",
new_dropped, new_recvd + new_dropped,
new_link != 0 ? fmt(", %d on link", new_link) : "")));
NOTICE([$note=Dropped_Packets,
$msg=fmt("%d packets dropped after filtering, %d received%s",
new_dropped, new_recvd + new_dropped,
new_link != 0 ? fmt(", %d on link", new_link) : "")]);
}
schedule stats_collection_interval { net_stats_update(ns) };

View file

@ -40,20 +40,20 @@ export {
event zeek_init() &priority=5
{
Log::create_stream(Reporter::LOG, Log::Stream($columns=Info, $path="reporter", $policy=log_policy));
Log::create_stream(Reporter::LOG, [$columns=Info, $path="reporter", $policy=log_policy]);
}
event reporter_info(t: time, msg: string, location: string) &priority=-5
{
Log::write(Reporter::LOG, Info($ts=t, $level=INFO, $message=msg, $location=location));
Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]);
}
event reporter_warning(t: time, msg: string, location: string) &priority=-5
{
Log::write(Reporter::LOG, Info($ts=t, $level=WARNING, $message=msg, $location=location));
Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]);
}
event reporter_error(t: time, msg: string, location: string) &priority=-5
{
Log::write(Reporter::LOG, Info($ts=t, $level=ERROR, $message=msg, $location=location));
Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]);
}

View file

@ -145,14 +145,14 @@ global did_sig_log: set[string] &read_expire = 1 hr;
event zeek_init() &priority=5
{
Log::create_stream(Signatures::LOG, Log::Stream($columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy));
Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy]);
}
event sig_summary(orig: addr, id: string, msg: string)
{
NOTICE(Notice::Info($note=Signature_Summary, $src=orig,
$msg=fmt("%s: %s", orig, msg),
$n=count_per_orig[orig,id]));
NOTICE([$note=Signature_Summary, $src=orig,
$msg=fmt("%s: %s", orig, msg),
$n=count_per_orig[orig,id] ]);
}
event signature_match(state: signature_state, msg: string, data: string)
@ -189,16 +189,16 @@ event signature_match(state: signature_state, msg: string, data: string)
if ( action != SIG_QUIET && action != SIG_COUNT_PER_RESP )
{
local info = Info($ts=network_time(),
$note=Sensitive_Signature,
$uid=state$conn$uid,
$src_addr=src_addr,
$src_port=src_port,
$dst_addr=dst_addr,
$dst_port=dst_port,
$event_msg=fmt("%s: %s", src_addr, msg),
$sig_id=sig_id,
$sub_msg=data);
local info: Info = [$ts=network_time(),
$note=Sensitive_Signature,
$uid=state$conn$uid,
$src_addr=src_addr,
$src_port=src_port,
$dst_addr=dst_addr,
$dst_port=dst_port,
$event_msg=fmt("%s: %s", src_addr, msg),
$sig_id=sig_id,
$sub_msg=data];
Log::write(Signatures::LOG, info);
}
@ -211,12 +211,12 @@ event signature_match(state: signature_state, msg: string, data: string)
local dst = state$conn$id$resp_h;
if ( ++count_per_resp[dst,sig_id] in count_thresholds )
{
NOTICE(Notice::Info($note=Count_Signature, $conn=state$conn,
$msg=msg,
$n=count_per_resp[dst,sig_id],
$sub=fmt("%d matches of signature %s on host %s",
count_per_resp[dst,sig_id],
sig_id, dst)));
NOTICE([$note=Count_Signature, $conn=state$conn,
$msg=msg,
$n=count_per_resp[dst,sig_id],
$sub=fmt("%d matches of signature %s on host %s",
count_per_resp[dst,sig_id],
sig_id, dst)]);
}
}
@ -241,10 +241,10 @@ event signature_match(state: signature_state, msg: string, data: string)
}
if ( notice )
NOTICE(Notice::Info($note=Sensitive_Signature,
$conn=state$conn, $src=src_addr,
$dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg),
$sub=data));
NOTICE([$note=Sensitive_Signature,
$conn=state$conn, $src=src_addr,
$dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg),
$sub=data]);
if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY )
return;
@ -273,12 +273,12 @@ event signature_match(state: signature_state, msg: string, data: string)
orig, sig_id, hcount);
Log::write(Signatures::LOG,
Info($ts=network_time(), $note=Multiple_Sig_Responders,
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
$host_count=hcount, $sub_msg=horz_scan_msg));
[$ts=network_time(), $note=Multiple_Sig_Responders,
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
$host_count=hcount, $sub_msg=horz_scan_msg]);
NOTICE(Notice::Info($note=Multiple_Sig_Responders, $src=orig,
$msg=msg, $n=hcount, $sub=horz_scan_msg));
NOTICE([$note=Multiple_Sig_Responders, $src=orig,
$msg=msg, $n=hcount, $sub=horz_scan_msg]);
last_hthresh[orig] = hcount;
}
@ -290,16 +290,16 @@ event signature_match(state: signature_state, msg: string, data: string)
orig, vcount, resp);
Log::write(Signatures::LOG,
Info($ts=network_time(),
$note=Multiple_Signatures,
$src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount),
$sub_msg=vert_scan_msg));
[$ts=network_time(),
$note=Multiple_Signatures,
$src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount),
$sub_msg=vert_scan_msg]);
NOTICE(Notice::Info($note=Multiple_Signatures, $src=orig, $dst=resp,
$msg=fmt("%s different signatures triggered", vcount),
$n=vcount, $sub=vert_scan_msg));
NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp,
$msg=fmt("%s different signatures triggered", vcount),
$n=vcount, $sub=vert_scan_msg]);
last_vthresh[orig] = vcount;
}

View file

@ -126,7 +126,7 @@ export {
event zeek_init() &priority=5
{
Log::create_stream(Software::LOG, Log::Stream($columns=Info, $ev=log_software, $path="software", $policy=log_policy));
Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software", $policy=log_policy]);
}
type Description: record {
@ -163,7 +163,7 @@ function parse(unparsed_version: string): Description
else
v = Version($major=extract_count(vs));
return Description($version=v, $unparsed_version=unparsed_version, $name=software_name);
return [$version=v, $unparsed_version=unparsed_version, $name=software_name];
}
}
else
@ -236,7 +236,7 @@ function parse(unparsed_version: string): Description
}
}
return Description($version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]);
return [$version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]];
}
global parse_cache: table[string] of Description &read_expire=65secs;
@ -269,13 +269,13 @@ function parse_mozilla(unparsed_version: string): Description
{
software_name = "MSIE";
if ( /Trident\/4\.0/ in unparsed_version )
v = Version($major=8,$minor=0);
v = [$major=8,$minor=0];
else if ( /Trident\/5\.0/ in unparsed_version )
v = Version($major=9,$minor=0);
v = [$major=9,$minor=0];
else if ( /Trident\/6\.0/ in unparsed_version )
v = Version($major=10,$minor=0);
v = [$major=10,$minor=0];
else if ( /Trident\/7\.0/ in unparsed_version )
v = Version($major=11,$minor=0);
v = [$major=11,$minor=0];
else
{
parts = split_string_all(unparsed_version, /MSIE [0-9]{1,2}\.*[0-9]*b?[0-9]*/);
@ -373,7 +373,7 @@ function parse_mozilla(unparsed_version: string): Description
v = parse(parts[1])$version;
}
return Description($version=v, $unparsed_version=unparsed_version, $name=software_name);
return [$version=v, $unparsed_version=unparsed_version, $name=software_name];
}

View file

@ -8,8 +8,8 @@ export {
event max_file_depth_exceeded(f: fa_file, args: Files::AnalyzerArgs, limit: count)
{
NOTICE(Notice::Info(
$note=Spicy::Spicy_Max_File_Depth_Exceeded,
$msg=fmt("Maximum file depth exceeded for file %s", f$id)
));
NOTICE([
$note=Spicy::Spicy_Max_File_Depth_Exceeded,
$msg=fmt("Maximum file depth exceeded for file %s", f$id)
]);
}

View file

@ -81,46 +81,30 @@ export {
function open_backend(btype: Storage::Backend, options: Storage::BackendOptions,
key_type: any, val_type: any): Storage::OperationResult
{
if ( options$forced_sync )
return Storage::Sync::__open_backend(btype, options, key_type, val_type);
else
return Storage::Async::__open_backend(btype, options, key_type, val_type);
return Storage::Async::__open_backend(btype, options, key_type, val_type);
}
function close_backend(backend: opaque of Storage::BackendHandle)
: Storage::OperationResult
{
if ( Storage::is_forced_sync(backend) )
return Storage::Sync::__close_backend(backend);
else
return Storage::Async::__close_backend(backend);
return Storage::Async::__close_backend(backend);
}
function put(backend: opaque of Storage::BackendHandle, args: Storage::PutArgs)
: Storage::OperationResult
{
if ( Storage::is_forced_sync(backend) )
return Storage::Sync::__put(backend, args$key, args$value, args$overwrite,
args$expire_time);
else
return Storage::Async::__put(backend, args$key, args$value, args$overwrite,
args$expire_time);
return Storage::Async::__put(backend, args$key, args$value, args$overwrite,
args$expire_time);
}
function get(backend: opaque of Storage::BackendHandle, key: any)
: Storage::OperationResult
{
if ( Storage::is_forced_sync(backend) )
return Storage::Sync::__get(backend, key);
else
return Storage::Async::__get(backend, key);
return Storage::Async::__get(backend, key);
}
function erase(backend: opaque of Storage::BackendHandle, key: any)
: Storage::OperationResult
{
if ( Storage::is_forced_sync(backend) )
return Storage::Sync::__erase(backend, key);
else
return Storage::Async::__erase(backend, key);
return Storage::Async::__erase(backend, key);
}

View file

@ -3,9 +3,6 @@
module Storage;
export {
# Default value for the BackendOptions::forced_sync field.
const default_forced_sync: bool = F &redef;
## Base record for backend options that can be passed to
## :zeek:see:`Storage::Async::open_backend` and
## :zeek:see:`Storage::Sync::open_backend`. Backend plugins can redef this record
@ -13,11 +10,6 @@ export {
type BackendOptions: record {
## The serializer used for converting Zeek data.
serializer: Storage::Serializer &default=Storage::STORAGE_SERIALIZER_JSON;
## Sets the backend into forced-synchronous mode. All operations will run
## in synchronous mode, even if the async functions are called. This
## should generally only be set to ``T`` during testing.
forced_sync : bool &default=Storage::default_forced_sync;
};
## Record for passing arguments to :zeek:see:`Storage::Async::put` and
@ -37,7 +29,4 @@ export {
## backend.
expire_time: interval &default=0sec;
};
# The histogram buckets to use for operation latency metrics, in seconds.
const latency_metric_bounds: vector of double = { 0.001, 0.01, 0.1, 1.0, } &redef;
}

View file

@ -312,7 +312,7 @@ event zeek_init() &priority=100000
function init_resultval(r: Reducer): ResultVal
{
local rv = ResultVal($begin=network_time(), $end=network_time());
local rv: ResultVal = [$begin=network_time(), $end=network_time()];
hook init_resultval_hook(r, rv);
return rv;
}

View file

@ -54,7 +54,7 @@ hook register_observe_plugins()
if ( r$num_last_elements > 0 )
{
if ( ! rv?$last_elements )
rv$last_elements = Queue::init(Queue::Settings($max_len=r$num_last_elements));
rv$last_elements = Queue::init([$max_len=r$num_last_elements]);
Queue::put(rv$last_elements, obs);
}
});

View file

@ -245,6 +245,16 @@ export {
label_values: labels_vector,
measurement: double): bool;
## Interval at which the :zeek:see:`Telemetry::sync` hook is invoked.
##
## By default, the hook is invoked on demand, setting this option to
## a positive interval allows to invoke it regularly, too. Regular
## invocations are relative to Zeek's network time.
##
## Note that on-demand hook invocation will happen even if this
## is set.
option sync_interval = 0sec &deprecated="Remove in 8.1. If you require regular sync invocation, do so explicitly in a scheduled event.";
## Collect all counter and gauge metrics matching the given *name* and *prefix*.
##
## For histogram metrics, use the :zeek:see:`Telemetry::collect_histogram_metrics`.
@ -286,12 +296,12 @@ function register_counter_family(opts: MetricOpts): CounterFamily
}
# Fallback Counter returned when there are issues with the labels.
global error_counter_cf = register_counter_family(MetricOpts(
global error_counter_cf = register_counter_family([
$prefix="zeek",
$name="telemetry_counter_usage_error",
$unit="",
$help_text="This counter is returned when label usage for counters is wrong. Check reporter.log if non-zero."
));
]);
function counter_with(cf: CounterFamily, label_values: labels_vector): Counter
{
@ -345,12 +355,12 @@ function register_gauge_family(opts: MetricOpts): GaugeFamily
}
# Fallback Gauge returned when there are issues with the label usage.
global error_gauge_cf = register_gauge_family(MetricOpts(
global error_gauge_cf = register_gauge_family([
$prefix="zeek",
$name="telemetry_gauge_usage_error",
$unit="",
$help_text="This gauge is returned when label usage for gauges is wrong. Check reporter.log if non-zero."
));
]);
function gauge_with(gf: GaugeFamily, label_values: labels_vector): Gauge
{
@ -414,13 +424,13 @@ function register_histogram_family(opts: MetricOpts): HistogramFamily
}
# Fallback Histogram when there are issues with the labels.
global error_histogram_hf = register_histogram_family(MetricOpts(
global error_histogram_hf = register_histogram_family([
$prefix="zeek",
$name="telemetry_histogram_usage_error",
$unit="",
$help_text="This histogram is returned when label usage for histograms is wrong. Check reporter.log if non-zero.",
$bounds=vector(1.0)
));
]);
function histogram_with(hf: HistogramFamily, label_values: labels_vector): Histogram
{
@ -455,18 +465,31 @@ function collect_histogram_metrics(prefix: string, name: string): vector of Hist
return Telemetry::__collect_histogram_metrics(prefix, name);
}
event run_sync_hook()
{
hook Telemetry::sync();
@pragma push ignore-deprecations
schedule sync_interval { run_sync_hook() };
@pragma pop ignore-deprecations
}
# Expose the Zeek version as Prometheus style info metric
global version_gauge_family = Telemetry::register_gauge_family(Telemetry::MetricOpts(
global version_gauge_family = Telemetry::register_gauge_family([
$prefix="zeek",
$name="version_info",
$unit="",
$help_text="The Zeek version",
$label_names=vector("version_number", "major", "minor", "patch", "commit",
"beta", "debug","version_string")
));
]);
event zeek_init()
{
@pragma push ignore-deprecations
if ( sync_interval > 0sec )
schedule sync_interval { run_sync_hook() };
@pragma pop ignore-deprecations
local v = Version::info;
local labels = vector(cat(v$version_number),
cat(v$major), cat(v$minor), cat (v$patch),

View file

@ -15,13 +15,6 @@ export {
## HTTP. The default value means Zeek won't expose the port.
const metrics_port = 0/unknown &redef;
## Every metric automatically receives a label with the following name
## and the metrics_endpoint_name as value to identify the originating
## cluster node.
## The label was previously hard-code as "endpoint", and that's why
## the variable is called the way it is, but "node" is the better label.
const metrics_endpoint_label = "node" &redef;
## ID for the metrics exporter. This is used as the 'endpoint' label
## value when exporting data to Prometheus. In a cluster setup, this
## defaults to the name of the node in the cluster configuration.

Some files were not shown because too many files have changed in this diff Show more