mirror of
https://github.com/zeek/zeek.git
synced 2025-10-03 15:18:20 +00:00
Compare commits
No commits in common. "master" and "v8.0.0-dev" have entirely different histories.
master
...
v8.0.0-dev
2492 changed files with 110757 additions and 436563 deletions
447
.cirrus.yml
447
.cirrus.yml
|
@ -18,8 +18,6 @@ spicy_ssl_config: &SPICY_SSL_CONFIG --build-type=release --disable-broker-tests
|
||||||
asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --ccache --enable-werror
|
asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --ccache --enable-werror
|
||||||
ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --ccache --enable-werror
|
ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --ccache --enable-werror
|
||||||
tsan_sanitizer_config: &TSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=thread --enable-fuzzers --ccache --enable-werror
|
tsan_sanitizer_config: &TSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=thread --enable-fuzzers --ccache --enable-werror
|
||||||
macos_config: &MACOS_CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror --with-krb5=/opt/homebrew/opt/krb5
|
|
||||||
clang_tidy_config: &CLANG_TIDY_CONFIG --build-type=debug --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache --enable-werror --enable-clang-tidy
|
|
||||||
|
|
||||||
resources_template: &RESOURCES_TEMPLATE
|
resources_template: &RESOURCES_TEMPLATE
|
||||||
cpu: *CPUS
|
cpu: *CPUS
|
||||||
|
@ -35,7 +33,6 @@ macos_environment: &MACOS_ENVIRONMENT
|
||||||
ZEEK_CI_BTEST_JOBS: 12
|
ZEEK_CI_BTEST_JOBS: 12
|
||||||
# No permission to write to default location of /zeek
|
# No permission to write to default location of /zeek
|
||||||
CIRRUS_WORKING_DIR: /tmp/zeek
|
CIRRUS_WORKING_DIR: /tmp/zeek
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *MACOS_CONFIG
|
|
||||||
|
|
||||||
freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE
|
freebsd_resources_template: &FREEBSD_RESOURCES_TEMPLATE
|
||||||
cpu: 8
|
cpu: 8
|
||||||
|
@ -48,108 +45,48 @@ freebsd_environment: &FREEBSD_ENVIRONMENT
|
||||||
ZEEK_CI_CPUS: 8
|
ZEEK_CI_CPUS: 8
|
||||||
ZEEK_CI_BTEST_JOBS: 8
|
ZEEK_CI_BTEST_JOBS: 8
|
||||||
|
|
||||||
only_if_pr_master_release: &ONLY_IF_PR_MASTER_RELEASE
|
builds_only_if_template: &BUILDS_ONLY_IF_TEMPLATE
|
||||||
|
# Rules for skipping builds:
|
||||||
|
# - Do not run builds for anything that's cron triggered
|
||||||
|
# - Don't do darwin builds on zeek-security repo because they use up a ton of compute credits.
|
||||||
|
# - Always build PRs, but not if they come from dependabot
|
||||||
|
# - Always build master and release/* builds from the main repo
|
||||||
only_if: >
|
only_if: >
|
||||||
|
( $CIRRUS_CRON == '' ) &&
|
||||||
|
( ( $CIRRUS_PR != '' && $CIRRUS_BRANCH !=~ 'dependabot/.*' ) ||
|
||||||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
||||||
( $CIRRUS_CRON != 'weekly' ) &&
|
(
|
||||||
( $CIRRUS_PR != '' ||
|
|
||||||
$CIRRUS_BRANCH == 'master' ||
|
$CIRRUS_BRANCH == 'master' ||
|
||||||
$CIRRUS_BRANCH =~ 'release/.*'
|
$CIRRUS_BRANCH =~ 'release/.*'
|
||||||
)
|
)
|
||||||
)
|
) )
|
||||||
|
|
||||||
only_if_pr_master_release_nightly: &ONLY_IF_PR_MASTER_RELEASE_NIGHTLY
|
skip_task_on_pr: &SKIP_TASK_ON_PR
|
||||||
|
# Skip this task on PRs if it does not have the fullci label,
|
||||||
|
# it continues to run for direct pushes to master/release.
|
||||||
|
skip: >
|
||||||
|
! ( $CIRRUS_PR == '' || $CIRRUS_PR_LABELS =~ '.*fullci.*' )
|
||||||
|
|
||||||
|
zam_skip_task_on_pr: &ZAM_SKIP_TASK_ON_PR
|
||||||
|
# Skip this task on PRs unless it has the `fullci` or `zamci` label
|
||||||
|
# or files in src/script_opt/** were modified.
|
||||||
|
# It continues to run for direct pushes to master/release, as
|
||||||
|
# CIRRUS_PR will be empty.
|
||||||
|
skip: >
|
||||||
|
! ( $CIRRUS_PR == '' || $CIRRUS_PR_LABELS =~ '.*fullci.*' || $CIRRUS_PR_LABELS =~ '.*zamci.*' || changesInclude('src/script_opt/**') )
|
||||||
|
|
||||||
|
benchmark_only_if_template: &BENCHMARK_ONLY_IF_TEMPLATE
|
||||||
|
# only_if condition for cron-triggered benchmarking tests.
|
||||||
|
# These currently do not run for release/.*
|
||||||
only_if: >
|
only_if: >
|
||||||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
||||||
( $CIRRUS_CRON != 'weekly' ) &&
|
( $CIRRUS_CRON == 'benchmark-nightly' ||
|
||||||
( $CIRRUS_PR != '' ||
|
$CIRRUS_PR_LABELS =~ '.*fullci.*' ||
|
||||||
$CIRRUS_BRANCH == 'master' ||
|
$CIRRUS_PR_LABELS =~ '.*benchmark.*' )
|
||||||
$CIRRUS_BRANCH =~ 'release/.*' ||
|
|
||||||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
only_if_pr_release_and_nightly: &ONLY_IF_PR_RELEASE_AND_NIGHTLY
|
|
||||||
only_if: >
|
|
||||||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
|
||||||
( $CIRRUS_CRON != 'weekly' ) &&
|
|
||||||
( $CIRRUS_PR != '' ||
|
|
||||||
$CIRRUS_BRANCH =~ 'release/.*' ||
|
|
||||||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
only_if_pr_nightly: &ONLY_IF_PR_NIGHTLY
|
|
||||||
only_if: >
|
|
||||||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
|
||||||
( $CIRRUS_CRON != 'weekly' ) &&
|
|
||||||
( $CIRRUS_PR != '' ||
|
|
||||||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
only_if_release_tag_nightly: &ONLY_IF_RELEASE_TAG_NIGHTLY
|
|
||||||
only_if: >
|
|
||||||
( ( $CIRRUS_REPO_NAME == 'zeek' ) &&
|
|
||||||
( $CIRRUS_CRON != 'weekly' ) &&
|
|
||||||
( ( $CIRRUS_BRANCH =~ 'release/.*' && $CIRRUS_TAG =~ 'v[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$' ) ||
|
|
||||||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
only_if_nightly: &ONLY_IF_NIGHTLY
|
|
||||||
only_if: >
|
|
||||||
( ( $CIRRUS_REPO_NAME == 'zeek' ) &&
|
|
||||||
( $CIRRUS_CRON == 'nightly' && $CIRRUS_BRANCH == 'master' )
|
|
||||||
)
|
|
||||||
|
|
||||||
only_if_weekly: &ONLY_IF_WEEKLY
|
|
||||||
only_if: >
|
|
||||||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
|
||||||
( $CIRRUS_CRON == 'weekly' && $CIRRUS_BRANCH == 'master' )
|
|
||||||
)
|
|
||||||
|
|
||||||
skip_if_pr_skip_all: &SKIP_IF_PR_SKIP_ALL
|
|
||||||
skip: >
|
|
||||||
( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
|
|
||||||
|
|
||||||
skip_if_pr_not_full_ci: &SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
skip: >
|
|
||||||
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: Full.*") ||
|
|
||||||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
|
|
||||||
)
|
|
||||||
|
|
||||||
skip_if_pr_not_full_or_benchmark: &SKIP_IF_PR_NOT_FULL_OR_BENCHMARK
|
|
||||||
skip: >
|
|
||||||
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Benchmark).*" ) ||
|
|
||||||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
|
|
||||||
)
|
|
||||||
|
|
||||||
skip_if_pr_not_full_or_cluster_test: &SKIP_IF_PR_NOT_FULL_OR_CLUSTER_TEST
|
|
||||||
skip: >
|
|
||||||
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Cluster Test).*" ) ||
|
|
||||||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
|
|
||||||
)
|
|
||||||
|
|
||||||
skip_if_pr_not_full_or_zam: &SKIP_IF_PR_NOT_FULL_OR_ZAM
|
|
||||||
skip: >
|
|
||||||
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|ZAM).*" ) ||
|
|
||||||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
|
|
||||||
)
|
|
||||||
|
|
||||||
skip_if_pr_not_full_or_zeekctl: &SKIP_IF_PR_NOT_FULL_OR_ZEEKCTL
|
|
||||||
skip: >
|
|
||||||
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Zeekctl).*" ) ||
|
|
||||||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
|
|
||||||
)
|
|
||||||
|
|
||||||
skip_if_pr_not_full_or_windows: &SKIP_IF_PR_NOT_FULL_OR_WINDOWS
|
|
||||||
skip: >
|
|
||||||
( ( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*CI: (Full|Windows).*" ) ||
|
|
||||||
( $CIRRUS_PR_LABELS =~ ".*CI: Skip All.*" )
|
|
||||||
)
|
|
||||||
|
|
||||||
ci_template: &CI_TEMPLATE
|
ci_template: &CI_TEMPLATE
|
||||||
|
<< : *BUILDS_ONLY_IF_TEMPLATE
|
||||||
|
|
||||||
# Default timeout is 60 minutes, Cirrus hard limit is 120 minutes for free
|
# Default timeout is 60 minutes, Cirrus hard limit is 120 minutes for free
|
||||||
# tasks, so may as well ask for full time.
|
# tasks, so may as well ask for full time.
|
||||||
timeout_in: 120m
|
timeout_in: 120m
|
||||||
|
@ -193,7 +130,6 @@ ci_template: &CI_TEMPLATE
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CIRRUS_WORKING_DIR: /zeek
|
CIRRUS_WORKING_DIR: /zeek
|
||||||
CIRRUS_LOG_TIMESTAMP: true
|
|
||||||
ZEEK_CI_CPUS: *CPUS
|
ZEEK_CI_CPUS: *CPUS
|
||||||
ZEEK_CI_BTEST_JOBS: *BTEST_JOBS
|
ZEEK_CI_BTEST_JOBS: *BTEST_JOBS
|
||||||
ZEEK_CI_BTEST_RETRIES: *BTEST_RETRIES
|
ZEEK_CI_BTEST_RETRIES: *BTEST_RETRIES
|
||||||
|
@ -238,88 +174,27 @@ env:
|
||||||
# Linux EOL timelines: https://linuxlifecycle.com/
|
# Linux EOL timelines: https://linuxlifecycle.com/
|
||||||
# Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle
|
# Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle
|
||||||
|
|
||||||
fedora42_task:
|
|
||||||
container:
|
|
||||||
# Fedora 42 EOL: Around May 2026
|
|
||||||
dockerfile: ci/fedora-42/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_SKIP_ALL
|
|
||||||
env:
|
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *BINARY_CONFIG
|
|
||||||
|
|
||||||
fedora41_task:
|
fedora41_task:
|
||||||
container:
|
container:
|
||||||
# Fedora 41 EOL: Around Nov 2025
|
# Fedora 41 EOL: Around Nov 2025
|
||||||
dockerfile: ci/fedora-41/Dockerfile
|
dockerfile: ci/fedora-41/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
fedora40_task:
|
||||||
|
container:
|
||||||
|
# Fedora 40 EOL: Around May 2025
|
||||||
|
dockerfile: ci/fedora-40/Dockerfile
|
||||||
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
<< : *CI_TEMPLATE
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
centosstream9_task:
|
centosstream9_task:
|
||||||
container:
|
container:
|
||||||
# Stream 9 EOL: 31 May 2027
|
# Stream 9 EOL: Around Dec 2027
|
||||||
dockerfile: ci/centos-stream-9/Dockerfile
|
dockerfile: ci/centos-stream-9/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
centosstream10_task:
|
|
||||||
container:
|
|
||||||
# Stream 10 EOL: 01 January 2030
|
|
||||||
dockerfile: ci/centos-stream-10/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
debian13_task:
|
|
||||||
container:
|
|
||||||
# Debian 13 (trixie) EOL: TBD
|
|
||||||
dockerfile: ci/debian-13/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
arm_debian13_task:
|
|
||||||
arm_container:
|
|
||||||
# Debian 13 (trixie) EOL: TBD
|
|
||||||
dockerfile: ci/debian-13/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_SKIP_ALL
|
|
||||||
|
|
||||||
debian13_static_task:
|
|
||||||
container:
|
|
||||||
# Just use a recent/common distro to run a static compile test.
|
|
||||||
# Debian 13 (trixie) EOL: TBD
|
|
||||||
dockerfile: ci/debian-13/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
env:
|
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *STATIC_CONFIG
|
|
||||||
|
|
||||||
debian13_binary_task:
|
|
||||||
container:
|
|
||||||
# Just use a recent/common distro to run binary mode compile test.
|
|
||||||
# As of 2024-03, the used configure flags are equivalent to the flags
|
|
||||||
# that we use to create binary packages.
|
|
||||||
# Just use a recent/common distro to run a static compile test.
|
|
||||||
# Debian 13 (trixie) EOL: TBD
|
|
||||||
dockerfile: ci/debian-13/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
env:
|
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *BINARY_CONFIG
|
|
||||||
|
|
||||||
debian12_task:
|
debian12_task:
|
||||||
container:
|
container:
|
||||||
|
@ -327,8 +202,48 @@ debian12_task:
|
||||||
dockerfile: ci/debian-12/Dockerfile
|
dockerfile: ci/debian-12/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
arm_debian12_task:
|
||||||
|
arm_container:
|
||||||
|
# Debian 12 (bookworm) EOL: TBD
|
||||||
|
dockerfile: ci/debian-12/Dockerfile
|
||||||
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
<< : *CI_TEMPLATE
|
||||||
|
env:
|
||||||
|
ZEEK_CI_CONFIGURE_FLAGS: *NO_SPICY_CONFIG
|
||||||
|
|
||||||
|
debian12_static_task:
|
||||||
|
container:
|
||||||
|
# Just use a recent/common distro to run a static compile test.
|
||||||
|
# Debian 12 (bookworm) EOL: TBD
|
||||||
|
dockerfile: ci/debian-12/Dockerfile
|
||||||
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
<< : *CI_TEMPLATE
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
env:
|
||||||
|
ZEEK_CI_CONFIGURE_FLAGS: *STATIC_CONFIG
|
||||||
|
|
||||||
|
debian12_binary_task:
|
||||||
|
container:
|
||||||
|
# Just use a recent/common distro to run binary mode compile test.
|
||||||
|
# As of 2024-03, the used configure flags are equivalent to the flags
|
||||||
|
# that we use to create binary packages.
|
||||||
|
# Just use a recent/common distro to run a static compile test.
|
||||||
|
# Debian 12 (bookworm) EOL: TBD
|
||||||
|
dockerfile: ci/debian-12/Dockerfile
|
||||||
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
<< : *CI_TEMPLATE
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
env:
|
||||||
|
ZEEK_CI_CONFIGURE_FLAGS: *BINARY_CONFIG
|
||||||
|
|
||||||
|
debian11_task:
|
||||||
|
container:
|
||||||
|
# Debian 11 EOL: June 2026
|
||||||
|
dockerfile: ci/debian-11/Dockerfile
|
||||||
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
<< : *CI_TEMPLATE
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
opensuse_leap_15_6_task:
|
opensuse_leap_15_6_task:
|
||||||
container:
|
container:
|
||||||
|
@ -336,8 +251,6 @@ opensuse_leap_15_6_task:
|
||||||
dockerfile: ci/opensuse-leap-15.6/Dockerfile
|
dockerfile: ci/opensuse-leap-15.6/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
opensuse_tumbleweed_task:
|
opensuse_tumbleweed_task:
|
||||||
container:
|
container:
|
||||||
|
@ -346,63 +259,30 @@ opensuse_tumbleweed_task:
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
prepare_script: ./ci/opensuse-tumbleweed/prepare.sh
|
prepare_script: ./ci/opensuse-tumbleweed/prepare.sh
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
# << : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
weekly_current_gcc_task:
|
ubuntu24_10_task:
|
||||||
container:
|
container:
|
||||||
# Opensuse Tumbleweed has no EOL
|
# Ubuntu 24.10 EOL: 2025-07-30
|
||||||
dockerfile: ci/opensuse-tumbleweed/Dockerfile
|
dockerfile: ci/ubuntu-24.10/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
prepare_script: ./ci/opensuse-tumbleweed/prepare-weekly.sh
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_WEEKLY
|
|
||||||
env:
|
|
||||||
ZEEK_CI_COMPILER: gcc
|
|
||||||
|
|
||||||
weekly_current_clang_task:
|
|
||||||
container:
|
|
||||||
# Opensuse Tumbleweed has no EOL
|
|
||||||
dockerfile: ci/opensuse-tumbleweed/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
prepare_script: ./ci/opensuse-tumbleweed/prepare-weekly.sh
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_WEEKLY
|
|
||||||
env:
|
|
||||||
ZEEK_CI_COMPILER: clang
|
|
||||||
|
|
||||||
ubuntu25_04_task:
|
|
||||||
container:
|
|
||||||
# Ubuntu 25.04 EOL: 2026-01-31
|
|
||||||
dockerfile: ci/ubuntu-25.04/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
<< : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
ubuntu24_04_task:
|
ubuntu24_task:
|
||||||
container:
|
container:
|
||||||
# Ubuntu 24.04 EOL: Jun 2029
|
# Ubuntu 24.04 EOL: Jun 2029
|
||||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
dockerfile: ci/ubuntu-24.04/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_SKIP_ALL
|
|
||||||
env:
|
|
||||||
ZEEK_CI_CREATE_ARTIFACT: 1
|
|
||||||
upload_binary_artifacts:
|
|
||||||
path: build.tgz
|
|
||||||
benchmark_script: ./ci/benchmark.sh
|
|
||||||
|
|
||||||
# Same as above, but running the ZAM tests instead of the regular tests.
|
# Same as above, but running the ZAM tests instead of the regular tests.
|
||||||
ubuntu24_04_zam_task:
|
ubuntu24_zam_task:
|
||||||
container:
|
container:
|
||||||
# Ubuntu 24.04 EOL: Jun 2029
|
# Ubuntu 24.04 EOL: Jun 2029
|
||||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
dockerfile: ci/ubuntu-24.04/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_ZAM
|
|
||||||
env:
|
env:
|
||||||
ZEEK_CI_SKIP_UNIT_TESTS: 1
|
ZEEK_CI_SKIP_UNIT_TESTS: 1
|
||||||
ZEEK_CI_SKIP_EXTERNAL_BTESTS: 1
|
ZEEK_CI_SKIP_EXTERNAL_BTESTS: 1
|
||||||
|
@ -411,41 +291,44 @@ ubuntu24_04_zam_task:
|
||||||
ZEEK_CI_BTEST_JOBS: 3
|
ZEEK_CI_BTEST_JOBS: 3
|
||||||
|
|
||||||
# Same as above, but using Clang and libc++
|
# Same as above, but using Clang and libc++
|
||||||
ubuntu24_04_clang_libcpp_task:
|
ubuntu24_clang_libcpp_task:
|
||||||
container:
|
container:
|
||||||
# Ubuntu 24.04 EOL: Jun 2029
|
# Ubuntu 24.04 EOL: Jun 2029
|
||||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
dockerfile: ci/ubuntu-24.04/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
<< : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
env:
|
env:
|
||||||
CC: clang-19
|
CC: clang-18
|
||||||
CXX: clang++-19
|
CXX: clang++-18
|
||||||
CXXFLAGS: -stdlib=libc++
|
CXXFLAGS: -stdlib=libc++
|
||||||
|
|
||||||
ubuntu24_04_clang_tidy_task:
|
ubuntu22_task:
|
||||||
container:
|
container:
|
||||||
# Ubuntu 24.04 EOL: Jun 2029
|
# Ubuntu 22.04 EOL: June 2027
|
||||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
dockerfile: ci/ubuntu-22.04/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
env:
|
env:
|
||||||
CC: clang-19
|
ZEEK_CI_CREATE_ARTIFACT: 1
|
||||||
CXX: clang++-19
|
upload_binary_artifacts:
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *CLANG_TIDY_CONFIG
|
path: build.tgz
|
||||||
|
benchmark_script: ./ci/benchmark.sh
|
||||||
|
# Run on PRs, merges to master and release/.* and benchmark-nightly cron.
|
||||||
|
only_if: >
|
||||||
|
( $CIRRUS_PR != '' && $CIRRUS_BRANCH !=~ 'dependabot/.*' ) ||
|
||||||
|
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
||||||
|
$CIRRUS_BRANCH == 'master' ||
|
||||||
|
$CIRRUS_BRANCH =~ 'release/.*' ||
|
||||||
|
$CIRRUS_CRON == 'benchmark-nightly' )
|
||||||
|
|
||||||
# Also enable Spicy SSL for this
|
# Also enable Spicy SSL for this
|
||||||
ubuntu24_04_spicy_task:
|
ubuntu22_spicy_task:
|
||||||
container:
|
container:
|
||||||
# Ubuntu 24.04 EOL: Jun 2029
|
# Ubuntu 22.04 EOL: April 2027
|
||||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
dockerfile: ci/ubuntu-22.04/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_BENCHMARK
|
|
||||||
env:
|
env:
|
||||||
ZEEK_CI_CREATE_ARTIFACT: 1
|
ZEEK_CI_CREATE_ARTIFACT: 1
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *SPICY_SSL_CONFIG
|
ZEEK_CI_CONFIGURE_FLAGS: *SPICY_SSL_CONFIG
|
||||||
|
@ -453,33 +336,26 @@ ubuntu24_04_spicy_task:
|
||||||
upload_binary_artifacts:
|
upload_binary_artifacts:
|
||||||
path: build.tgz
|
path: build.tgz
|
||||||
benchmark_script: ./ci/benchmark.sh
|
benchmark_script: ./ci/benchmark.sh
|
||||||
|
<< : *BENCHMARK_ONLY_IF_TEMPLATE
|
||||||
|
|
||||||
ubuntu24_04_spicy_head_task:
|
ubuntu22_spicy_head_task:
|
||||||
container:
|
container:
|
||||||
# Ubuntu 24.04 EOL: Jun 2029
|
# Ubuntu 22.04 EOL: April 2027
|
||||||
dockerfile: ci/ubuntu-24.04/Dockerfile
|
dockerfile: ci/ubuntu-22.04/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE_NIGHTLY
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_BENCHMARK
|
|
||||||
env:
|
env:
|
||||||
ZEEK_CI_CREATE_ARTIFACT: 1
|
ZEEK_CI_CREATE_ARTIFACT: 1
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *SPICY_SSL_CONFIG
|
|
||||||
# Pull auxil/spicy to the latest head version. May or may not build.
|
# Pull auxil/spicy to the latest head version. May or may not build.
|
||||||
ZEEK_CI_PREBUILD_COMMAND: 'cd auxil/spicy && git fetch && git reset --hard origin/main && git submodule update --init --recursive'
|
ZEEK_CI_PREBUILD_COMMAND: 'cd auxil/spicy && git fetch && git reset --hard origin/main && git submodule update --init --recursive'
|
||||||
spicy_install_analyzers_script: ./ci/spicy-install-analyzers.sh
|
spicy_install_analyzers_script: ./ci/spicy-install-analyzers.sh
|
||||||
upload_binary_artifacts:
|
upload_binary_artifacts:
|
||||||
path: build.tgz
|
path: build.tgz
|
||||||
benchmark_script: ./ci/benchmark.sh
|
benchmark_script: ./ci/benchmark.sh
|
||||||
|
# Don't run this job on release branches. It tests against spicy HEAD, which
|
||||||
ubuntu22_04_task:
|
# will frequently require other fixes that won't be in a release branch.
|
||||||
container:
|
skip: $CIRRUS_BRANCH =~ 'release/.*'
|
||||||
# Ubuntu 22.04 EOL: June 2027
|
<< : *BENCHMARK_ONLY_IF_TEMPLATE
|
||||||
dockerfile: ci/ubuntu-22.04/Dockerfile
|
|
||||||
<< : *RESOURCES_TEMPLATE
|
|
||||||
<< : *CI_TEMPLATE
|
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
alpine_task:
|
alpine_task:
|
||||||
container:
|
container:
|
||||||
|
@ -489,8 +365,6 @@ alpine_task:
|
||||||
dockerfile: ci/alpine/Dockerfile
|
dockerfile: ci/alpine/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
|
|
||||||
# Cirrus only supports the following macos runner currently, selecting
|
# Cirrus only supports the following macos runner currently, selecting
|
||||||
# anything else automatically upgrades to this one.
|
# anything else automatically upgrades to this one.
|
||||||
|
@ -503,8 +377,6 @@ macos_sequoia_task:
|
||||||
image: ghcr.io/cirruslabs/macos-runner:sequoia
|
image: ghcr.io/cirruslabs/macos-runner:sequoia
|
||||||
prepare_script: ./ci/macos/prepare.sh
|
prepare_script: ./ci/macos/prepare.sh
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_SKIP_ALL
|
|
||||||
<< : *MACOS_ENVIRONMENT
|
<< : *MACOS_ENVIRONMENT
|
||||||
|
|
||||||
# FreeBSD EOL timelines: https://www.freebsd.org/security/#sup
|
# FreeBSD EOL timelines: https://www.freebsd.org/security/#sup
|
||||||
|
@ -516,8 +388,6 @@ freebsd14_task:
|
||||||
|
|
||||||
prepare_script: ./ci/freebsd/prepare.sh
|
prepare_script: ./ci/freebsd/prepare.sh
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_SKIP_ALL
|
|
||||||
<< : *FREEBSD_ENVIRONMENT
|
<< : *FREEBSD_ENVIRONMENT
|
||||||
|
|
||||||
freebsd13_task:
|
freebsd13_task:
|
||||||
|
@ -528,8 +398,7 @@ freebsd13_task:
|
||||||
|
|
||||||
prepare_script: ./ci/freebsd/prepare.sh
|
prepare_script: ./ci/freebsd/prepare.sh
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
<< : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
<< : *FREEBSD_ENVIRONMENT
|
<< : *FREEBSD_ENVIRONMENT
|
||||||
|
|
||||||
asan_sanitizer_task:
|
asan_sanitizer_task:
|
||||||
|
@ -539,8 +408,6 @@ asan_sanitizer_task:
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_SKIP_ALL
|
|
||||||
test_fuzzers_script: ./ci/test-fuzzers.sh
|
test_fuzzers_script: ./ci/test-fuzzers.sh
|
||||||
coverage_script: ./ci/upload-coverage.sh
|
coverage_script: ./ci/upload-coverage.sh
|
||||||
env:
|
env:
|
||||||
|
@ -557,8 +424,6 @@ asan_sanitizer_zam_task:
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_NIGHTLY
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_ZAM
|
|
||||||
env:
|
env:
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *ASAN_SANITIZER_CONFIG
|
ZEEK_CI_CONFIGURE_FLAGS: *ASAN_SANITIZER_CONFIG
|
||||||
ASAN_OPTIONS: detect_leaks=1:detect_odr_violation=0
|
ASAN_OPTIONS: detect_leaks=1:detect_odr_violation=0
|
||||||
|
@ -567,6 +432,7 @@ asan_sanitizer_zam_task:
|
||||||
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
||||||
# Use a lower number of jobs due to OOM issues with ZAM tasks
|
# Use a lower number of jobs due to OOM issues with ZAM tasks
|
||||||
ZEEK_CI_BTEST_JOBS: 3
|
ZEEK_CI_BTEST_JOBS: 3
|
||||||
|
<< : *ZAM_SKIP_TASK_ON_PR
|
||||||
|
|
||||||
ubsan_sanitizer_task:
|
ubsan_sanitizer_task:
|
||||||
container:
|
container:
|
||||||
|
@ -575,12 +441,11 @@ ubsan_sanitizer_task:
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_NIGHTLY
|
<< : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
test_fuzzers_script: ./ci/test-fuzzers.sh
|
test_fuzzers_script: ./ci/test-fuzzers.sh
|
||||||
env:
|
env:
|
||||||
CC: clang-19
|
CC: clang-18
|
||||||
CXX: clang++-19
|
CXX: clang++-18
|
||||||
CXXFLAGS: -DZEEK_DICT_DEBUG
|
CXXFLAGS: -DZEEK_DICT_DEBUG
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
|
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
|
||||||
ZEEK_TAILORED_UB_CHECKS: 1
|
ZEEK_TAILORED_UB_CHECKS: 1
|
||||||
|
@ -592,11 +457,9 @@ ubsan_sanitizer_zam_task:
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_NIGHTLY
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_ZAM
|
|
||||||
env:
|
env:
|
||||||
CC: clang-19
|
CC: clang-18
|
||||||
CXX: clang++-19
|
CXX: clang++-18
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
|
ZEEK_CI_CONFIGURE_FLAGS: *UBSAN_SANITIZER_CONFIG
|
||||||
ZEEK_TAILORED_UB_CHECKS: 1
|
ZEEK_TAILORED_UB_CHECKS: 1
|
||||||
UBSAN_OPTIONS: print_stacktrace=1
|
UBSAN_OPTIONS: print_stacktrace=1
|
||||||
|
@ -605,6 +468,7 @@ ubsan_sanitizer_zam_task:
|
||||||
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
ZEEK_CI_BTEST_EXTRA_ARGS: -a zam
|
||||||
# Use a lower number of jobs due to OOM issues with ZAM tasks
|
# Use a lower number of jobs due to OOM issues with ZAM tasks
|
||||||
ZEEK_CI_BTEST_JOBS: 3
|
ZEEK_CI_BTEST_JOBS: 3
|
||||||
|
<< : *ZAM_SKIP_TASK_ON_PR
|
||||||
|
|
||||||
tsan_sanitizer_task:
|
tsan_sanitizer_task:
|
||||||
container:
|
container:
|
||||||
|
@ -613,11 +477,10 @@ tsan_sanitizer_task:
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_NIGHTLY
|
<< : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
|
||||||
env:
|
env:
|
||||||
CC: clang-19
|
CC: clang-18
|
||||||
CXX: clang++-19
|
CXX: clang++-18
|
||||||
ZEEK_CI_CONFIGURE_FLAGS: *TSAN_SANITIZER_CONFIG
|
ZEEK_CI_CONFIGURE_FLAGS: *TSAN_SANITIZER_CONFIG
|
||||||
ZEEK_CI_DISABLE_SCRIPT_PROFILING: 1
|
ZEEK_CI_DISABLE_SCRIPT_PROFILING: 1
|
||||||
# If this is defined directly in the environment, configure fails to find
|
# If this is defined directly in the environment, configure fails to find
|
||||||
|
@ -638,12 +501,11 @@ windows_task:
|
||||||
prepare_script: ci/windows/prepare.cmd
|
prepare_script: ci/windows/prepare.cmd
|
||||||
build_script: ci/windows/build.cmd
|
build_script: ci/windows/build.cmd
|
||||||
test_script: ci/windows/test.cmd
|
test_script: ci/windows/test.cmd
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_WINDOWS
|
|
||||||
env:
|
env:
|
||||||
ZEEK_CI_CPUS: 8
|
ZEEK_CI_CPUS: 8
|
||||||
# Give verbose error output on a test failure.
|
# Give verbose error output on a test failure.
|
||||||
CTEST_OUTPUT_ON_FAILURE: 1
|
CTEST_OUTPUT_ON_FAILURE: 1
|
||||||
|
<< : *BUILDS_ONLY_IF_TEMPLATE
|
||||||
|
|
||||||
|
|
||||||
# Container images
|
# Container images
|
||||||
|
@ -724,18 +586,22 @@ arm64_container_image_docker_builder:
|
||||||
env:
|
env:
|
||||||
CIRRUS_ARCH: arm64
|
CIRRUS_ARCH: arm64
|
||||||
<< : *DOCKER_BUILD_TEMPLATE
|
<< : *DOCKER_BUILD_TEMPLATE
|
||||||
<< : *ONLY_IF_RELEASE_TAG_NIGHTLY
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
amd64_container_image_docker_builder:
|
amd64_container_image_docker_builder:
|
||||||
env:
|
env:
|
||||||
CIRRUS_ARCH: amd64
|
CIRRUS_ARCH: amd64
|
||||||
<< : *DOCKER_BUILD_TEMPLATE
|
<< : *DOCKER_BUILD_TEMPLATE
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE_NIGHTLY
|
<< : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_CLUSTER_TEST
|
|
||||||
|
|
||||||
container_image_manifest_docker_builder:
|
container_image_manifest_docker_builder:
|
||||||
cpu: 1
|
cpu: 1
|
||||||
<< : *ONLY_IF_RELEASE_TAG_NIGHTLY
|
# Push master builds to zeek/zeek-dev, or tagged release branches to zeek/zeek
|
||||||
|
only_if: >
|
||||||
|
( $CIRRUS_CRON == '' ) &&
|
||||||
|
( $CIRRUS_REPO_FULL_NAME == 'zeek/zeek' &&
|
||||||
|
( $CIRRUS_BRANCH == 'master' ||
|
||||||
|
$CIRRUS_TAG =~ 'v[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$' ) )
|
||||||
env:
|
env:
|
||||||
DOCKER_USERNAME: ENCRYPTED[!505b3dee552a395730a7e79e6aab280ffbe1b84ec62ae7616774dfefe104e34f896d2e20ce3ad701f338987c13c33533!]
|
DOCKER_USERNAME: ENCRYPTED[!505b3dee552a395730a7e79e6aab280ffbe1b84ec62ae7616774dfefe104e34f896d2e20ce3ad701f338987c13c33533!]
|
||||||
DOCKER_PASSWORD: ENCRYPTED[!6c4b2f6f0e5379ef1091719cc5d2d74c90cfd2665ac786942033d6d924597ffb95dbbc1df45a30cc9ddeec76c07ac620!]
|
DOCKER_PASSWORD: ENCRYPTED[!6c4b2f6f0e5379ef1091719cc5d2d74c90cfd2665ac786942033d6d924597ffb95dbbc1df45a30cc9ddeec76c07ac620!]
|
||||||
|
@ -814,7 +680,8 @@ container_image_manifest_docker_builder:
|
||||||
# images from the public ECR repository to stay within free-tier bounds.
|
# images from the public ECR repository to stay within free-tier bounds.
|
||||||
public_ecr_cleanup_docker_builder:
|
public_ecr_cleanup_docker_builder:
|
||||||
cpu: 1
|
cpu: 1
|
||||||
<< : *ONLY_IF_NIGHTLY
|
only_if: >
|
||||||
|
$CIRRUS_CRON == '' && $CIRRUS_REPO_FULL_NAME == 'zeek/zeek' && $CIRRUS_BRANCH == 'master'
|
||||||
env:
|
env:
|
||||||
AWS_ACCESS_KEY_ID: ENCRYPTED[!eff52f6442e1bc78bce5b15a23546344df41bf519f6201924cb70c7af12db23f442c0e5f2b3687c2d856ceb11fcb8c49!]
|
AWS_ACCESS_KEY_ID: ENCRYPTED[!eff52f6442e1bc78bce5b15a23546344df41bf519f6201924cb70c7af12db23f442c0e5f2b3687c2d856ceb11fcb8c49!]
|
||||||
AWS_SECRET_ACCESS_KEY: ENCRYPTED[!748bc302dd196140a5fa8e89c9efd148882dc846d4e723787d2de152eb136fa98e8dea7e6d2d6779d94f72dd3c088228!]
|
AWS_SECRET_ACCESS_KEY: ENCRYPTED[!748bc302dd196140a5fa8e89c9efd148882dc846d4e723787d2de152eb136fa98e8dea7e6d2d6779d94f72dd3c088228!]
|
||||||
|
@ -854,23 +721,27 @@ cluster_testing_docker_builder:
|
||||||
path: "testing/external/zeek-testing-cluster/.tmp/**"
|
path: "testing/external/zeek-testing-cluster/.tmp/**"
|
||||||
depends_on:
|
depends_on:
|
||||||
- amd64_container_image
|
- amd64_container_image
|
||||||
<< : *ONLY_IF_PR_RELEASE_AND_NIGHTLY
|
<< : *SKIP_TASK_ON_PR
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_CLUSTER_TEST
|
|
||||||
|
|
||||||
|
|
||||||
# Test zeekctl upon master and release pushes and also when
|
# Test zeekctl upon master and release pushes and also when
|
||||||
# a PR has a "CI: Zeekctl" or "CI: Full" label.
|
# a PR has a zeekctlci or fullci label.
|
||||||
#
|
#
|
||||||
# Also triggers on CIRRUS_CRON == 'zeekctl-nightly' if that is configured
|
# Also triggers on CIRRUS_CRON == 'zeekctl-nightly' if that is configured
|
||||||
# through the Cirrus Web UI.
|
# through the Cirrus Web UI.
|
||||||
zeekctl_debian12_task:
|
zeekctl_debian12_task:
|
||||||
cpu: *CPUS
|
cpu: *CPUS
|
||||||
memory: *MEMORY
|
memory: *MEMORY
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
only_if: >
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_OR_ZEEKCTL
|
( $CIRRUS_CRON == 'zeekctl-nightly' ) ||
|
||||||
|
( $CIRRUS_PR != '' && $CIRRUS_PR_LABELS =~ '.*(zeekctlci|fullci).*' ) ||
|
||||||
|
( $CIRRUS_REPO_NAME == 'zeek' && (
|
||||||
|
$CIRRUS_BRANCH == 'master' ||
|
||||||
|
$CIRRUS_BRANCH =~ 'release/.*' )
|
||||||
|
)
|
||||||
container:
|
container:
|
||||||
# Debian 13 (trixie) EOL: TBD
|
# Debian 12 (bookworm) EOL: TBD
|
||||||
dockerfile: ci/debian-13/Dockerfile
|
dockerfile: ci/debian-12/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
sync_submodules_script: git submodule update --recursive --init
|
sync_submodules_script: git submodule update --recursive --init
|
||||||
always:
|
always:
|
||||||
|
@ -893,19 +764,15 @@ include_plugins_debian12_task:
|
||||||
cpu: *CPUS
|
cpu: *CPUS
|
||||||
memory: *MEMORY
|
memory: *MEMORY
|
||||||
container:
|
container:
|
||||||
# Debian 13 (trixie) EOL: TBD
|
# Debian 12 (bookworm) EOL: TBD
|
||||||
dockerfile: ci/debian-13/Dockerfile
|
dockerfile: ci/debian-12/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
sync_submodules_script: git submodule update --recursive --init
|
sync_submodules_script: git submodule update --recursive --init
|
||||||
fetch_external_plugins_script:
|
fetch_external_plugins_script:
|
||||||
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-perf-support.git
|
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-perf-support.git
|
||||||
- cd zeek-perf-support && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
|
|
||||||
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-more-hashes.git
|
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-more-hashes.git
|
||||||
- cd zeek-more-hashes && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
|
|
||||||
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-cluster-backend-nats.git
|
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/zeek/zeek-cluster-backend-nats.git
|
||||||
- cd zeek-cluster-backend-nats && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
|
|
||||||
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/SeisoLLC/zeek-kafka.git
|
- cd /zeek/testing/builtin-plugins/external && git clone https://github.com/SeisoLLC/zeek-kafka.git
|
||||||
- cd zeek-kafka && echo "Cloned $(git rev-parse HEAD) for $(basename $(pwd))"
|
|
||||||
always:
|
always:
|
||||||
ccache_cache:
|
ccache_cache:
|
||||||
folder: /tmp/ccache
|
folder: /tmp/ccache
|
||||||
|
@ -925,5 +792,5 @@ include_plugins_debian12_task:
|
||||||
on_failure:
|
on_failure:
|
||||||
upload_include_plugins_testing_artifacts:
|
upload_include_plugins_testing_artifacts:
|
||||||
path: "testing/builtin-plugins/.tmp/**"
|
path: "testing/builtin-plugins/.tmp/**"
|
||||||
<< : *ONLY_IF_PR_MASTER_RELEASE
|
<< : *BUILDS_ONLY_IF_TEMPLATE
|
||||||
<< : *SKIP_IF_PR_NOT_FULL_CI
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
77
.clang-tidy
77
.clang-tidy
|
@ -1,76 +1,5 @@
|
||||||
Checks: [-*,
|
Checks: '-*,
|
||||||
bugprone-*,
|
bugprone-*,
|
||||||
performance-*,
|
|
||||||
modernize-*,
|
|
||||||
readability-isolate-declaration,
|
|
||||||
readability-container-contains,
|
|
||||||
|
|
||||||
# Enable a very limited number of the cppcoreguidelines checkers.
|
|
||||||
# See the notes for some of the rest of them below.
|
|
||||||
cppcoreguidelines-macro-usage,
|
|
||||||
cppcoreguidelines-misleading-capture-default-by-value,
|
|
||||||
cppcoreguidelines-virtual-class-destructor,
|
|
||||||
|
|
||||||
# Skipping these temporarily because they are very noisy
|
|
||||||
-bugprone-forward-declaration-namespace,
|
|
||||||
-bugprone-narrowing-conversions,
|
|
||||||
-bugprone-unchecked-optional-access,
|
|
||||||
-performance-unnecessary-value-param,
|
|
||||||
-modernize-use-equals-default,
|
|
||||||
-modernize-use-integer-sign-comparison,
|
|
||||||
|
|
||||||
# The following cause either lots of pointless or advisory warnings
|
|
||||||
-bugprone-easily-swappable-parameters,
|
-bugprone-easily-swappable-parameters,
|
||||||
-bugprone-nondeterministic-pointer-iteration-order,
|
clang-analyzer-*,
|
||||||
|
performance-*'
|
||||||
# bifcl generates a lot of code with double underscores in their name.
|
|
||||||
# ZAM uses a few identifiers that start with underscores or have
|
|
||||||
# double-underscores in the name.
|
|
||||||
-bugprone-reserved-identifier,
|
|
||||||
|
|
||||||
# bifcl generates almost every switch statement without a default case
|
|
||||||
# and so this one generates a lot of warnings.
|
|
||||||
-bugprone-switch-missing-default-case,
|
|
||||||
|
|
||||||
# These report warnings that are rather difficult to fix or are things
|
|
||||||
# we simply don't want to fix.
|
|
||||||
-bugprone-undefined-memory-manipulation,
|
|
||||||
-bugprone-pointer-arithmetic-on-polymorphic-object,
|
|
||||||
-bugprone-empty-catch,
|
|
||||||
-bugprone-exception-escape,
|
|
||||||
-bugprone-suspicious-include,
|
|
||||||
-modernize-avoid-c-arrays,
|
|
||||||
-modernize-concat-nested-namespaces,
|
|
||||||
-modernize-raw-string-literal,
|
|
||||||
-modernize-use-auto,
|
|
||||||
-modernize-use-nodiscard,
|
|
||||||
-modernize-use-trailing-return-type,
|
|
||||||
-modernize-use-designated-initializers,
|
|
||||||
|
|
||||||
# This one returns a bunch of findings in DFA and the sqlite library.
|
|
||||||
# We're unlikely to fix either of them.
|
|
||||||
-performance-no-int-to-ptr,
|
|
||||||
|
|
||||||
# These cppcoreguidelines checkers are things we should investigate
|
|
||||||
# and possibly fix, but there are so many findings that we're holding
|
|
||||||
# off doing it for now.
|
|
||||||
#cppcoreguidelines-init-variables,
|
|
||||||
#cppcoreguidelines-prefer-member-initializer,
|
|
||||||
#cppcoreguidelines-pro-type-member-init,
|
|
||||||
#cppcoreguidelines-pro-type-cstyle-cast,
|
|
||||||
#cppcoreguidelines-pro-type-static-cast-downcast,
|
|
||||||
#cppcoreguidelines-special-member-functions,
|
|
||||||
|
|
||||||
# These are features in newer version of C++ that we don't have
|
|
||||||
# access to yet.
|
|
||||||
-modernize-use-std-format,
|
|
||||||
-modernize-use-std-print,
|
|
||||||
]
|
|
||||||
|
|
||||||
HeaderFilterRegex: '.h'
|
|
||||||
ExcludeHeaderFilterRegex: '.*(auxil|3rdparty)/.*'
|
|
||||||
SystemHeaders: false
|
|
||||||
CheckOptions:
|
|
||||||
- key: modernize-use-default-member-init.UseAssignment
|
|
||||||
value: 'true'
|
|
||||||
WarningsAsErrors: '*'
|
|
||||||
|
|
|
@ -33,6 +33,3 @@ f5a76c1aedc7f8886bc6abef0dfaa8065684b1f6
|
||||||
|
|
||||||
# clang-format: Format JSON with clang-format
|
# clang-format: Format JSON with clang-format
|
||||||
e6256446ddef5c5d5240eefff974556f2e12ac46
|
e6256446ddef5c5d5240eefff974556f2e12ac46
|
||||||
|
|
||||||
# analyzer/protocol: Reformat with spicy-format
|
|
||||||
d70bcd07b9b26036b16092fe950eca40e2f5a032
|
|
||||||
|
|
15
.github/workflows/generate-docs.yml
vendored
15
.github/workflows/generate-docs.yml
vendored
|
@ -16,7 +16,7 @@ jobs:
|
||||||
generate:
|
generate:
|
||||||
permissions:
|
permissions:
|
||||||
contents: write # for Git to git push
|
contents: write # for Git to git push
|
||||||
if: "github.repository == 'zeek/zeek' && contains(github.event.pull_request.labels.*.name, 'CI: Skip All') == false"
|
if: github.repository == 'zeek/zeek'
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
@ -66,14 +66,16 @@ jobs:
|
||||||
make \
|
make \
|
||||||
python3 \
|
python3 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
python3-pip \
|
python3-pip\
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
swig \
|
swig \
|
||||||
zlib1g-dev
|
zlib1g-dev
|
||||||
python3 -m venv ci-docs-venv
|
# Many distros adhere to PEP 394's recommendation for `python` =
|
||||||
source ci-docs-venv/bin/activate
|
# `python2` so this is a simple workaround until we drop Python 2
|
||||||
pip3 install -r doc/requirements.txt
|
# support and explicitly use `python3` for all invocations.
|
||||||
pip3 install pre-commit
|
sudo ln -sf /usr/bin/python3 /usr/local/bin/python
|
||||||
|
sudo pip3 install --break-system-packages -r doc/requirements.txt
|
||||||
|
sudo pip3 install --break-system-packages pre-commit
|
||||||
|
|
||||||
- name: ccache
|
- name: ccache
|
||||||
uses: hendrikmuhs/ccache-action@v1.2
|
uses: hendrikmuhs/ccache-action@v1.2
|
||||||
|
@ -110,7 +112,6 @@ jobs:
|
||||||
|
|
||||||
- name: Generate Docs
|
- name: Generate Docs
|
||||||
run: |
|
run: |
|
||||||
source ci-docs-venv/bin/activate
|
|
||||||
git config --global user.name zeek-bot
|
git config --global user.name zeek-bot
|
||||||
git config --global user.email info@zeek.org
|
git config --global user.email info@zeek.org
|
||||||
|
|
||||||
|
|
20
.gitmodules
vendored
20
.gitmodules
vendored
|
@ -1,6 +1,9 @@
|
||||||
[submodule "auxil/zeek-aux"]
|
[submodule "auxil/zeek-aux"]
|
||||||
path = auxil/zeek-aux
|
path = auxil/zeek-aux
|
||||||
url = https://github.com/zeek/zeek-aux
|
url = https://github.com/zeek/zeek-aux
|
||||||
|
[submodule "auxil/binpac"]
|
||||||
|
path = auxil/binpac
|
||||||
|
url = https://github.com/zeek/binpac
|
||||||
[submodule "auxil/zeekctl"]
|
[submodule "auxil/zeekctl"]
|
||||||
path = auxil/zeekctl
|
path = auxil/zeekctl
|
||||||
url = https://github.com/zeek/zeekctl
|
url = https://github.com/zeek/zeekctl
|
||||||
|
@ -10,12 +13,18 @@
|
||||||
[submodule "cmake"]
|
[submodule "cmake"]
|
||||||
path = cmake
|
path = cmake
|
||||||
url = https://github.com/zeek/cmake
|
url = https://github.com/zeek/cmake
|
||||||
|
[submodule "src/3rdparty"]
|
||||||
|
path = src/3rdparty
|
||||||
|
url = https://github.com/zeek/zeek-3rdparty
|
||||||
[submodule "auxil/broker"]
|
[submodule "auxil/broker"]
|
||||||
path = auxil/broker
|
path = auxil/broker
|
||||||
url = https://github.com/zeek/broker
|
url = https://github.com/zeek/broker
|
||||||
[submodule "auxil/netcontrol-connectors"]
|
[submodule "auxil/netcontrol-connectors"]
|
||||||
path = auxil/netcontrol-connectors
|
path = auxil/netcontrol-connectors
|
||||||
url = https://github.com/zeek/zeek-netcontrol
|
url = https://github.com/zeek/zeek-netcontrol
|
||||||
|
[submodule "auxil/bifcl"]
|
||||||
|
path = auxil/bifcl
|
||||||
|
url = https://github.com/zeek/bifcl
|
||||||
[submodule "doc"]
|
[submodule "doc"]
|
||||||
path = doc
|
path = doc
|
||||||
url = https://github.com/zeek/zeek-docs
|
url = https://github.com/zeek/zeek-docs
|
||||||
|
@ -37,6 +46,9 @@
|
||||||
[submodule "auxil/zeek-client"]
|
[submodule "auxil/zeek-client"]
|
||||||
path = auxil/zeek-client
|
path = auxil/zeek-client
|
||||||
url = https://github.com/zeek/zeek-client
|
url = https://github.com/zeek/zeek-client
|
||||||
|
[submodule "auxil/gen-zam"]
|
||||||
|
path = auxil/gen-zam
|
||||||
|
url = https://github.com/zeek/gen-zam
|
||||||
[submodule "auxil/c-ares"]
|
[submodule "auxil/c-ares"]
|
||||||
path = auxil/c-ares
|
path = auxil/c-ares
|
||||||
url = https://github.com/c-ares/c-ares
|
url = https://github.com/c-ares/c-ares
|
||||||
|
@ -46,6 +58,12 @@
|
||||||
[submodule "auxil/spicy"]
|
[submodule "auxil/spicy"]
|
||||||
path = auxil/spicy
|
path = auxil/spicy
|
||||||
url = https://github.com/zeek/spicy
|
url = https://github.com/zeek/spicy
|
||||||
|
[submodule "auxil/filesystem"]
|
||||||
|
path = auxil/filesystem
|
||||||
|
url = https://github.com/gulrak/filesystem.git
|
||||||
|
[submodule "auxil/zeek-af_packet-plugin"]
|
||||||
|
path = auxil/zeek-af_packet-plugin
|
||||||
|
url = https://github.com/zeek/zeek-af_packet-plugin.git
|
||||||
[submodule "auxil/libunistd"]
|
[submodule "auxil/libunistd"]
|
||||||
path = auxil/libunistd
|
path = auxil/libunistd
|
||||||
url = https://github.com/zeek/libunistd
|
url = https://github.com/zeek/libunistd
|
||||||
|
@ -63,7 +81,7 @@
|
||||||
url = https://github.com/zeromq/cppzmq
|
url = https://github.com/zeromq/cppzmq
|
||||||
[submodule "src/cluster/websocket/auxil/IXWebSocket"]
|
[submodule "src/cluster/websocket/auxil/IXWebSocket"]
|
||||||
path = src/cluster/websocket/auxil/IXWebSocket
|
path = src/cluster/websocket/auxil/IXWebSocket
|
||||||
url = https://github.com/machinezone/IXWebSocket
|
url = https://github.com/zeek/IXWebSocket.git
|
||||||
[submodule "auxil/expected-lite"]
|
[submodule "auxil/expected-lite"]
|
||||||
path = auxil/expected-lite
|
path = auxil/expected-lite
|
||||||
url = https://github.com/martinmoene/expected-lite.git
|
url = https://github.com/martinmoene/expected-lite.git
|
||||||
|
|
|
@ -10,7 +10,7 @@ repos:
|
||||||
language: python
|
language: python
|
||||||
files: '\.(h|c|cpp|cc|spicy|evt)$'
|
files: '\.(h|c|cpp|cc|spicy|evt)$'
|
||||||
types: [file]
|
types: [file]
|
||||||
exclude: '^(testing/btest/(Baseline|plugins|spicy|scripts)/.*|testing/builtin-plugins/.*|src/3rdparty/.*)$'
|
exclude: '^(testing/btest/(Baseline|plugins|spicy|scripts)/.*|testing/builtin-plugins/.*)$'
|
||||||
|
|
||||||
- id: btest-command-commented
|
- id: btest-command-commented
|
||||||
name: Check that all BTest command lines are commented out
|
name: Check that all BTest command lines are commented out
|
||||||
|
@ -19,26 +19,25 @@ repos:
|
||||||
files: '^testing/btest/.*$'
|
files: '^testing/btest/.*$'
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||||
rev: v20.1.8
|
rev: v20.1.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: clang-format
|
- id: clang-format
|
||||||
types_or:
|
types_or:
|
||||||
- "c"
|
- "c"
|
||||||
- "c++"
|
- "c++"
|
||||||
- "json"
|
- "json"
|
||||||
exclude: '^src/3rdparty/.*'
|
|
||||||
|
|
||||||
- repo: https://github.com/maxwinterstein/shfmt-py
|
- repo: https://github.com/maxwinterstein/shfmt-py
|
||||||
rev: v3.12.0.1
|
rev: v3.11.0.2
|
||||||
hooks:
|
hooks:
|
||||||
- id: shfmt
|
- id: shfmt
|
||||||
args: ["-w", "-i", "4", "-ci"]
|
args: ["-w", "-i", "4", "-ci"]
|
||||||
|
|
||||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
rev: v0.12.8
|
rev: v0.11.4
|
||||||
hooks:
|
hooks:
|
||||||
- id: ruff-check
|
- id: ruff
|
||||||
args: ["--fix"]
|
args: [--fix]
|
||||||
- id: ruff-format
|
- id: ruff-format
|
||||||
|
|
||||||
- repo: https://github.com/cheshirekow/cmake-format-precommit
|
- repo: https://github.com/cheshirekow/cmake-format-precommit
|
||||||
|
@ -47,13 +46,14 @@ repos:
|
||||||
- id: cmake-format
|
- id: cmake-format
|
||||||
|
|
||||||
- repo: https://github.com/crate-ci/typos
|
- repo: https://github.com/crate-ci/typos
|
||||||
rev: v1.35.3
|
rev: v1.30.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: typos
|
- id: typos
|
||||||
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES|scripts/base/protocols/ssl/mozilla-ca-list.zeek|src/3rdparty/.*)$'
|
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES|scripts/base/protocols/ssl/mozilla-ca-list.zeek)$'
|
||||||
|
|
||||||
- repo: https://github.com/bbannier/spicy-format
|
- repo: https://github.com/bbannier/spicy-format
|
||||||
rev: v0.26.0
|
rev: v0.24.2
|
||||||
hooks:
|
hooks:
|
||||||
- id: spicy-format
|
- id: spicy-format
|
||||||
exclude: '^testing/.*'
|
# TODO: Reformat existing large analyzers just before 8.0.
|
||||||
|
exclude: '(^testing/.*)|(protocol/ldap/.*)|(protocol/quic/.*)|(protocol/websocket/.*)'
|
||||||
|
|
|
@ -30,15 +30,12 @@ extend-ignore-re = [
|
||||||
"ot->Yield\\(\\)->InternalType\\(\\)",
|
"ot->Yield\\(\\)->InternalType\\(\\)",
|
||||||
"switch \\( ot \\)",
|
"switch \\( ot \\)",
|
||||||
"\\(ZAMOpType ot\\)",
|
"\\(ZAMOpType ot\\)",
|
||||||
"exat", # Redis expire at
|
|
||||||
"EXAT",
|
|
||||||
|
|
||||||
# News stuff
|
# News stuff
|
||||||
"SupressWeirds.*deprecated",
|
"SupressWeirds.*deprecated",
|
||||||
"\"BaR\"",
|
"\"BaR\"",
|
||||||
"\"xFoObar\"",
|
"\"xFoObar\"",
|
||||||
"\"FoO\"",
|
"\"FoO\"",
|
||||||
"Smoot",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
extend-ignore-identifiers-re = [
|
extend-ignore-identifiers-re = [
|
||||||
|
@ -56,7 +53,6 @@ extend-ignore-identifiers-re = [
|
||||||
"complte_flag", # Existing use in exported record in base.
|
"complte_flag", # Existing use in exported record in base.
|
||||||
"VidP(n|N)", # In SMB.
|
"VidP(n|N)", # In SMB.
|
||||||
"iin", # In DNP3.
|
"iin", # In DNP3.
|
||||||
"SCN[dioux]", # sccanf fixed-width identifiers
|
|
||||||
"(ScValidatePnPService|ScSendPnPMessage)", # In DCE-RPC.
|
"(ScValidatePnPService|ScSendPnPMessage)", # In DCE-RPC.
|
||||||
"snet", # Used as shorthand for subnet in base scripts.
|
"snet", # Used as shorthand for subnet in base scripts.
|
||||||
"typ",
|
"typ",
|
||||||
|
@ -84,9 +80,6 @@ have_2nd = "have_2nd"
|
||||||
ot1 = "ot1"
|
ot1 = "ot1"
|
||||||
ot2 = "ot2"
|
ot2 = "ot2"
|
||||||
uses_seh = "uses_seh"
|
uses_seh = "uses_seh"
|
||||||
ect0 = "ect0"
|
|
||||||
ect1 = "ect1"
|
|
||||||
tpe = "tpe"
|
|
||||||
|
|
||||||
[default.extend-words]
|
[default.extend-words]
|
||||||
caf = "caf"
|
caf = "caf"
|
||||||
|
|
312
CMakeLists.txt
312
CMakeLists.txt
|
@ -68,8 +68,7 @@ option(INSTALL_ZEEKCTL "Install zeekctl." ${ZEEK_INSTALL_TOOLS_DEFAULT})
|
||||||
option(INSTALL_ZEEK_CLIENT "Install the zeek-client." ${ZEEK_INSTALL_TOOLS_DEFAULT})
|
option(INSTALL_ZEEK_CLIENT "Install the zeek-client." ${ZEEK_INSTALL_TOOLS_DEFAULT})
|
||||||
option(INSTALL_ZKG "Install zkg." ${ZEEK_INSTALL_TOOLS_DEFAULT})
|
option(INSTALL_ZKG "Install zkg." ${ZEEK_INSTALL_TOOLS_DEFAULT})
|
||||||
option(PREALLOCATE_PORT_ARRAY "Pre-allocate all ports for zeek::Val." ON)
|
option(PREALLOCATE_PORT_ARRAY "Pre-allocate all ports for zeek::Val." ON)
|
||||||
option(ZEEK_STANDALONE "Build Zeek as stand-alone binary." ON)
|
option(ZEEK_STANDALONE "Build Zeek as stand-alone binary?" ON)
|
||||||
option(ZEEK_ENABLE_FUZZERS "Build Zeek fuzzing targets." OFF)
|
|
||||||
|
|
||||||
# Non-boolean options.
|
# Non-boolean options.
|
||||||
if (NOT WIN32)
|
if (NOT WIN32)
|
||||||
|
@ -90,6 +89,8 @@ set(ZEEK_ETC_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/etc"
|
||||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON CACHE INTERNAL
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON CACHE INTERNAL
|
||||||
"Whether to write a JSON compile commands database")
|
"Whether to write a JSON compile commands database")
|
||||||
|
|
||||||
|
set(ZEEK_CXX_STD cxx_std_17 CACHE STRING "The C++ standard to use.")
|
||||||
|
|
||||||
set(ZEEK_SANITIZERS "" CACHE STRING "Sanitizers to use when building.")
|
set(ZEEK_SANITIZERS "" CACHE STRING "Sanitizers to use when building.")
|
||||||
|
|
||||||
set(CPACK_SOURCE_IGNORE_FILES "" CACHE STRING "Files to be ignored by CPack")
|
set(CPACK_SOURCE_IGNORE_FILES "" CACHE STRING "Files to be ignored by CPack")
|
||||||
|
@ -192,36 +193,18 @@ if (MSVC)
|
||||||
# TODO: This is disabled for now because there a bunch of known
|
# TODO: This is disabled for now because there a bunch of known
|
||||||
# compiler warnings on Windows that we don't have good fixes for.
|
# compiler warnings on Windows that we don't have good fixes for.
|
||||||
#set(WERROR_FLAG "/WX")
|
#set(WERROR_FLAG "/WX")
|
||||||
#set(WNOERROR_FLAG "/WX:NO")
|
#set(WERROR_FLAG "/WX")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Always build binpac in static mode if building on Windows
|
|
||||||
set(BUILD_STATIC_BINPAC true)
|
|
||||||
|
|
||||||
else ()
|
else ()
|
||||||
include(GNUInstallDirs)
|
include(GNUInstallDirs)
|
||||||
if (BUILD_WITH_WERROR)
|
if (BUILD_WITH_WERROR)
|
||||||
set(WERROR_FLAG "-Werror")
|
set(WERROR_FLAG "-Werror")
|
||||||
set(WNOERROR_FLAG "-Wno-error")
|
|
||||||
|
|
||||||
# With versions >=13.0 GCC gained `-Warray-bounds` which reports false
|
|
||||||
# positives, see e.g., https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111273.
|
|
||||||
if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13.0)
|
|
||||||
list(APPEND WERROR_FLAG "-Wno-error=array-bounds")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# With versions >=11.0 GCC is returning false positives for -Wrestrict. See
|
|
||||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100366. It's more prevalent
|
|
||||||
# building with -std=c++20.
|
|
||||||
if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11.0)
|
|
||||||
list(APPEND WERROR_FLAG "-Wno-error=restrict")
|
|
||||||
endif ()
|
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include(cmake/CommonCMakeConfig.cmake)
|
include(cmake/CommonCMakeConfig.cmake)
|
||||||
|
include(cmake/FindClangTidy.cmake)
|
||||||
include(cmake/CheckCompilerArch.cmake)
|
include(cmake/CheckCompilerArch.cmake)
|
||||||
include(cmake/RequireCXXStd.cmake)
|
|
||||||
|
|
||||||
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
|
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
|
||||||
|
|
||||||
|
@ -250,7 +233,7 @@ set(ZEEK_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||||
# zeek-plugin-create-package.sh. Needed by ZeekPluginConfig.cmake.in.
|
# zeek-plugin-create-package.sh. Needed by ZeekPluginConfig.cmake.in.
|
||||||
set(ZEEK_PLUGIN_SCRIPTS_PATH "${PROJECT_SOURCE_DIR}/cmake")
|
set(ZEEK_PLUGIN_SCRIPTS_PATH "${PROJECT_SOURCE_DIR}/cmake")
|
||||||
|
|
||||||
# Our C++ base target for propagating compiler and linker flags. Note: for
|
# Our C++17 base target for propagating compiler and linker flags. Note: for
|
||||||
# now, we only use it for passing library dependencies around.
|
# now, we only use it for passing library dependencies around.
|
||||||
add_library(zeek_internal INTERFACE)
|
add_library(zeek_internal INTERFACE)
|
||||||
add_library(Zeek::Internal ALIAS zeek_internal)
|
add_library(Zeek::Internal ALIAS zeek_internal)
|
||||||
|
@ -369,7 +352,7 @@ endfunction ()
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
# Interface library for propagating extra flags and include paths to dynamically
|
# Interface library for propagating extra flags and include paths to dynamically
|
||||||
# loaded plugins. Also propagates include paths and c++ standard mode on the install
|
# loaded plugins. Also propagates include paths and C++17 mode on the install
|
||||||
# interface.
|
# interface.
|
||||||
add_library(zeek_dynamic_plugin_base INTERFACE)
|
add_library(zeek_dynamic_plugin_base INTERFACE)
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
|
@ -396,14 +379,14 @@ endfunction ()
|
||||||
|
|
||||||
add_zeek_dynamic_plugin_build_interface_include_directories(
|
add_zeek_dynamic_plugin_build_interface_include_directories(
|
||||||
${PROJECT_SOURCE_DIR}/src/include
|
${PROJECT_SOURCE_DIR}/src/include
|
||||||
${PROJECT_SOURCE_DIR}/tools/binpac/lib
|
${PROJECT_SOURCE_DIR}/auxil/binpac/lib
|
||||||
${PROJECT_SOURCE_DIR}/auxil/broker/libbroker
|
${PROJECT_SOURCE_DIR}/auxil/broker/libbroker
|
||||||
${PROJECT_SOURCE_DIR}/auxil/paraglob/include
|
${PROJECT_SOURCE_DIR}/auxil/paraglob/include
|
||||||
${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include
|
${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include
|
||||||
${PROJECT_SOURCE_DIR}/auxil/expected-lite/include
|
${PROJECT_SOURCE_DIR}/auxil/expected-lite/include
|
||||||
${CMAKE_BINARY_DIR}/src
|
${CMAKE_BINARY_DIR}/src
|
||||||
${CMAKE_BINARY_DIR}/src/include
|
${CMAKE_BINARY_DIR}/src/include
|
||||||
${CMAKE_BINARY_DIR}/tools/binpac/lib
|
${CMAKE_BINARY_DIR}/auxil/binpac/lib
|
||||||
${CMAKE_BINARY_DIR}/auxil/broker/libbroker
|
${CMAKE_BINARY_DIR}/auxil/broker/libbroker
|
||||||
${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include)
|
${CMAKE_BINARY_DIR}/auxil/prometheus-cpp/core/include)
|
||||||
|
|
||||||
|
@ -666,7 +649,6 @@ if (ENABLE_DEBUG)
|
||||||
set(VERSION_C_IDENT "${VERSION_C_IDENT}_debug")
|
set(VERSION_C_IDENT "${VERSION_C_IDENT}_debug")
|
||||||
target_compile_definitions(zeek_internal INTERFACE DEBUG)
|
target_compile_definitions(zeek_internal INTERFACE DEBUG)
|
||||||
target_compile_definitions(zeek_dynamic_plugin_base INTERFACE DEBUG)
|
target_compile_definitions(zeek_dynamic_plugin_base INTERFACE DEBUG)
|
||||||
set(SPICYZ_FLAGS "-d" CACHE STRING "Additional flags to pass to spicyz for builtin analyzers")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT BINARY_PACKAGING_MODE)
|
if (NOT BINARY_PACKAGING_MODE)
|
||||||
|
@ -883,35 +865,46 @@ endif ()
|
||||||
set(PY_MOD_INSTALL_DIR ${py_mod_install_dir} CACHE STRING "Installation path for Python modules"
|
set(PY_MOD_INSTALL_DIR ${py_mod_install_dir} CACHE STRING "Installation path for Python modules"
|
||||||
FORCE)
|
FORCE)
|
||||||
|
|
||||||
# BinPAC uses the same 'ENABLE_STATIC_ONLY' variable to define whether
|
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/auxil/binpac/CMakeLists.txt)
|
||||||
# to build statically. Save a local copy so it can be set based on the
|
|
||||||
# configure flag before we add the subdirectory.
|
|
||||||
set(ENABLE_STATIC_ONLY_SAVED ${ENABLE_STATIC_ONLY})
|
|
||||||
|
|
||||||
if (BUILD_STATIC_BINPAC)
|
set(ENABLE_STATIC_ONLY_SAVED ${ENABLE_STATIC_ONLY})
|
||||||
|
if (MSVC)
|
||||||
|
set(BUILD_STATIC_BINPAC true)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (BUILD_STATIC_BINPAC)
|
||||||
set(ENABLE_STATIC_ONLY true)
|
set(ENABLE_STATIC_ONLY true)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
add_subdirectory(auxil/binpac)
|
||||||
|
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
|
||||||
|
|
||||||
|
# FIXME: avoid hard-coding a path for multi-config generator support. See the
|
||||||
|
# TODO in ZeekPluginConfig.cmake.in.
|
||||||
|
set(BINPAC_EXE_PATH "${CMAKE_BINARY_DIR}/auxil/binpac/src/binpac${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
add_subdirectory(tools/binpac)
|
|
||||||
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
|
|
||||||
|
|
||||||
# FIXME: avoid hard-coding a path for multi-config generator support. See the
|
|
||||||
# TODO in ZeekPluginConfig.cmake.in.
|
|
||||||
set(BINPAC_EXE_PATH "${CMAKE_BINARY_DIR}/tools/binpac/src/binpac${CMAKE_EXECUTABLE_SUFFIX}")
|
|
||||||
set(_binpac_exe_path "included")
|
|
||||||
|
|
||||||
# Need to call find_package so it sets up the include paths used by plugin builds.
|
|
||||||
find_package(BinPAC REQUIRED)
|
find_package(BinPAC REQUIRED)
|
||||||
|
|
||||||
|
# Add an alias (used by our plugin setup).
|
||||||
add_executable(Zeek::BinPAC ALIAS binpac)
|
add_executable(Zeek::BinPAC ALIAS binpac)
|
||||||
|
|
||||||
add_subdirectory(tools/bifcl)
|
if (NOT BIFCL_EXE_PATH)
|
||||||
add_executable(Zeek::BifCl ALIAS bifcl)
|
add_subdirectory(auxil/bifcl)
|
||||||
# FIXME: avoid hard-coding a path for multi-config generator support. See the
|
add_executable(Zeek::BifCl ALIAS bifcl)
|
||||||
# TODO in ZeekPluginConfig.cmake.in.
|
# FIXME: avoid hard-coding a path for multi-config generator support. See the
|
||||||
set(BIFCL_EXE_PATH "${CMAKE_BINARY_DIR}/tools/bifcl/bifcl${CMAKE_EXECUTABLE_SUFFIX}")
|
# TODO in ZeekPluginConfig.cmake.in.
|
||||||
set(_bifcl_exe_path "included")
|
set(BIFCL_EXE_PATH "${CMAKE_BINARY_DIR}/auxil/bifcl/bifcl${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
|
set(_bifcl_exe_path "included")
|
||||||
|
else ()
|
||||||
|
add_executable(Zeek::BifCl IMPORTED)
|
||||||
|
set_property(TARGET Zeek::BifCl PROPERTY IMPORTED_LOCATION "${BIFCL_EXE_PATH}")
|
||||||
|
set(_bifcl_exe_path "BIFCL_EXE_PATH")
|
||||||
|
endif ()
|
||||||
|
|
||||||
add_subdirectory(tools/gen-zam)
|
if (NOT GEN_ZAM_EXE_PATH)
|
||||||
|
add_subdirectory(auxil/gen-zam)
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (ENABLE_JEMALLOC)
|
if (ENABLE_JEMALLOC)
|
||||||
if (${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
|
if (${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
|
||||||
|
@ -1016,7 +1009,6 @@ if (NOT DISABLE_SPICY)
|
||||||
set(Python3_EXECUTABLE ${Python_EXECUTABLE} CACHE STRING "Python3_EXECUTABLE hint")
|
set(Python3_EXECUTABLE ${Python_EXECUTABLE} CACHE STRING "Python3_EXECUTABLE hint")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(SPICY_ENABLE_TESTS OFF)
|
|
||||||
add_subdirectory(auxil/spicy)
|
add_subdirectory(auxil/spicy)
|
||||||
include(ConfigureSpicyBuild) # set some options different for building Spicy
|
include(ConfigureSpicyBuild) # set some options different for building Spicy
|
||||||
|
|
||||||
|
@ -1055,6 +1047,9 @@ include(BuiltInSpicyAnalyzer)
|
||||||
include_directories(BEFORE ${PCAP_INCLUDE_DIR} ${BIND_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
|
include_directories(BEFORE ${PCAP_INCLUDE_DIR} ${BIND_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
|
||||||
${ZLIB_INCLUDE_DIR} ${JEMALLOC_INCLUDE_DIR})
|
${ZLIB_INCLUDE_DIR} ${JEMALLOC_INCLUDE_DIR})
|
||||||
|
|
||||||
|
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc
|
||||||
|
DESTINATION include/zeek/3rdparty/)
|
||||||
|
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/prometheus-cpp/core/include/prometheus
|
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/prometheus-cpp/core/include/prometheus
|
||||||
DESTINATION include/zeek/3rdparty/prometheus-cpp/include)
|
DESTINATION include/zeek/3rdparty/prometheus-cpp/include)
|
||||||
|
|
||||||
|
@ -1064,8 +1059,15 @@ install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxil/prometheus-cpp/core/include/
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/expected-lite/include/nonstd
|
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/expected-lite/include/nonstd
|
||||||
DESTINATION include/zeek/3rdparty/)
|
DESTINATION include/zeek/3rdparty/)
|
||||||
|
|
||||||
|
# Create 3rdparty/ghc within the build directory so that the include for
|
||||||
|
# "zeek/3rdparty/ghc/filesystem.hpp" works within the build tree.
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory
|
execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory
|
||||||
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/")
|
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/")
|
||||||
|
execute_process(
|
||||||
|
COMMAND
|
||||||
|
"${CMAKE_COMMAND}" -E create_symlink
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc"
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/ghc")
|
||||||
|
|
||||||
# Do the same for nonstd.
|
# Do the same for nonstd.
|
||||||
execute_process(
|
execute_process(
|
||||||
|
@ -1080,16 +1082,18 @@ set(USE_GEOIP false)
|
||||||
find_package(LibMMDB)
|
find_package(LibMMDB)
|
||||||
if (LIBMMDB_FOUND)
|
if (LIBMMDB_FOUND)
|
||||||
set(USE_GEOIP true)
|
set(USE_GEOIP true)
|
||||||
include_directories(BEFORE SYSTEM ${LibMMDB_INCLUDE_DIR})
|
include_directories(BEFORE ${LibMMDB_INCLUDE_DIR})
|
||||||
list(APPEND OPTLIBS ${LibMMDB_LIBRARY})
|
list(APPEND OPTLIBS ${LibMMDB_LIBRARY})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(USE_KRB5 false)
|
set(USE_KRB5 false)
|
||||||
find_package(LibKrb5)
|
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||||
if (LIBKRB5_FOUND)
|
find_package(LibKrb5)
|
||||||
|
if (LIBKRB5_FOUND)
|
||||||
set(USE_KRB5 true)
|
set(USE_KRB5 true)
|
||||||
include_directories(BEFORE SYSTEM ${LibKrb5_INCLUDE_DIR})
|
include_directories(BEFORE ${LibKrb5_INCLUDE_DIR})
|
||||||
list(APPEND OPTLIBS ${LibKrb5_LIBRARY})
|
list(APPEND OPTLIBS ${LibKrb5_LIBRARY})
|
||||||
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(HAVE_PERFTOOLS false)
|
set(HAVE_PERFTOOLS false)
|
||||||
|
@ -1121,7 +1125,7 @@ endif ()
|
||||||
# dependencies which tend to be in standard system locations and thus cause the
|
# dependencies which tend to be in standard system locations and thus cause the
|
||||||
# system OpenSSL headers to still be picked up even if one specifies
|
# system OpenSSL headers to still be picked up even if one specifies
|
||||||
# --with-openssl (which may be common).
|
# --with-openssl (which may be common).
|
||||||
include_directories(BEFORE SYSTEM ${OPENSSL_INCLUDE_DIR})
|
include_directories(BEFORE ${OPENSSL_INCLUDE_DIR})
|
||||||
|
|
||||||
# Determine if libfts is external to libc, i.e. musl
|
# Determine if libfts is external to libc, i.e. musl
|
||||||
find_package(FTS)
|
find_package(FTS)
|
||||||
|
@ -1187,6 +1191,18 @@ endif ()
|
||||||
# Tell the plugin code that we're building as part of the main tree.
|
# Tell the plugin code that we're building as part of the main tree.
|
||||||
set(ZEEK_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)
|
set(ZEEK_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)
|
||||||
|
|
||||||
|
set(ZEEK_HAVE_AF_PACKET no)
|
||||||
|
if (${CMAKE_SYSTEM_NAME} MATCHES Linux)
|
||||||
|
if (NOT DISABLE_AF_PACKET)
|
||||||
|
if (NOT AF_PACKET_PLUGIN_PATH)
|
||||||
|
set(AF_PACKET_PLUGIN_PATH ${CMAKE_SOURCE_DIR}/auxil/zeek-af_packet-plugin)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
list(APPEND ZEEK_INCLUDE_PLUGINS ${AF_PACKET_PLUGIN_PATH})
|
||||||
|
set(ZEEK_HAVE_AF_PACKET yes)
|
||||||
|
endif ()
|
||||||
|
endif ()
|
||||||
|
|
||||||
set(ZEEK_HAVE_JAVASCRIPT no)
|
set(ZEEK_HAVE_JAVASCRIPT no)
|
||||||
if (NOT DISABLE_JAVASCRIPT)
|
if (NOT DISABLE_JAVASCRIPT)
|
||||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/auxil/zeekjs/cmake)
|
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/auxil/zeekjs/cmake)
|
||||||
|
@ -1206,7 +1222,6 @@ if (NOT DISABLE_JAVASCRIPT)
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(ZEEK_HAVE_AF_PACKET no CACHE INTERNAL "Zeek has AF_PACKET support")
|
|
||||||
set(ZEEK_HAVE_JAVASCRIPT ${ZEEK_HAVE_JAVASCRIPT} CACHE INTERNAL "Zeek has JavaScript support")
|
set(ZEEK_HAVE_JAVASCRIPT ${ZEEK_HAVE_JAVASCRIPT} CACHE INTERNAL "Zeek has JavaScript support")
|
||||||
|
|
||||||
set(DEFAULT_ZEEKPATH_PATHS
|
set(DEFAULT_ZEEKPATH_PATHS
|
||||||
|
@ -1225,7 +1240,11 @@ endif ()
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
|
include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink "." "${CMAKE_CURRENT_BINARY_DIR}/zeek")
|
execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink "." "${CMAKE_CURRENT_BINARY_DIR}/zeek")
|
||||||
|
|
||||||
set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${BinPAC_ROOT_DIR})
|
if (BinPAC_ROOT_DIR)
|
||||||
|
set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${BinPAC_ROOT_DIR})
|
||||||
|
else ()
|
||||||
|
set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${ZEEK_ROOT_DIR})
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (BROKER_ROOT_DIR)
|
if (BROKER_ROOT_DIR)
|
||||||
set(ZEEK_CONFIG_BROKER_ROOT_DIR ${BROKER_ROOT_DIR})
|
set(ZEEK_CONFIG_BROKER_ROOT_DIR ${BROKER_ROOT_DIR})
|
||||||
|
@ -1443,6 +1462,11 @@ else ()
|
||||||
set(_install_btest_tools_msg "no pcaps")
|
set(_install_btest_tools_msg "no pcaps")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
set(_binpac_exe_path "included")
|
||||||
|
if (BINPAC_EXE_PATH)
|
||||||
|
set(_binpac_exe_path ${BINPAC_EXE_PATH})
|
||||||
|
endif ()
|
||||||
|
|
||||||
set(_gen_zam_exe_path "included")
|
set(_gen_zam_exe_path "included")
|
||||||
if (GEN_ZAM_EXE_PATH)
|
if (GEN_ZAM_EXE_PATH)
|
||||||
set(_gen_zam_exe_path ${GEN_ZAM_EXE_PATH})
|
set(_gen_zam_exe_path ${GEN_ZAM_EXE_PATH})
|
||||||
|
@ -1472,118 +1496,68 @@ if (ZEEK_LEGACY_ANALYZERS OR ZEEK_SKIPPED_ANALYZERS)
|
||||||
)
|
)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(_zeek_builtin_plugins "${ZEEK_BUILTIN_PLUGINS}")
|
message(
|
||||||
if (NOT ZEEK_BUILTIN_PLUGINS)
|
"\n====================| Zeek Build Summary |===================="
|
||||||
set(_zeek_builtin_plugins "none")
|
"\n"
|
||||||
endif ()
|
"\nBuild type: ${CMAKE_BUILD_TYPE}"
|
||||||
|
"\nBuild dir: ${PROJECT_BINARY_DIR}"
|
||||||
set(_zeek_fuzzing_engine "${ZEEK_FUZZING_ENGINE}")
|
"\n"
|
||||||
if (NOT ZEEK_FUZZING_ENGINE)
|
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
|
||||||
if (ZEEK_ENABLE_FUZZERS)
|
"\nConfig file dir: ${ZEEK_ETC_INSTALL_DIR}"
|
||||||
# The default fuzzer used by gcc and clang is libFuzzer. This is if you
|
"\nLog dir: ${ZEEK_LOG_DIR}"
|
||||||
# simply pass '-fsanitize=fuzzer' to the compiler.
|
"\nPlugin dir: ${ZEEK_PLUGIN_DIR}"
|
||||||
set(_zeek_fuzzing_engine "libFuzzer")
|
"\nPython module dir: ${PY_MOD_INSTALL_DIR}"
|
||||||
endif ()
|
"\nScript dir: ${ZEEK_SCRIPT_INSTALL_PATH}"
|
||||||
endif ()
|
"\nSpool dir: ${ZEEK_SPOOL_DIR}"
|
||||||
|
"\nState dir: ${ZEEK_STATE_DIR}"
|
||||||
## Utility method for outputting status information for features that just have a
|
"\nSpicy modules dir: ${ZEEK_SPICY_MODULE_PATH}"
|
||||||
## string representation. This can also take an optional second argument that is a
|
"\n"
|
||||||
## value string to print.
|
"\nDebug mode: ${ENABLE_DEBUG}"
|
||||||
function (output_summary_line what)
|
"\nUnit tests: ${ENABLE_ZEEK_UNIT_TESTS}"
|
||||||
if ("${ARGV1}" MATCHES "^$")
|
"\nBuiltin Plugins: ${ZEEK_BUILTIN_PLUGINS}"
|
||||||
message("${what}:")
|
"\n"
|
||||||
return()
|
"\nCC: ${CMAKE_C_COMPILER}"
|
||||||
endif ()
|
"\nCFLAGS: ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${BuildType}}"
|
||||||
|
"\nCXX: ${CMAKE_CXX_COMPILER}"
|
||||||
set(_spaces " ")
|
"\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}"
|
||||||
string(LENGTH ${what} _what_length)
|
"\nCPP: ${CMAKE_CXX_COMPILER}"
|
||||||
math(EXPR _num_spaces "25 - ${_what_length}")
|
"\n"
|
||||||
string(SUBSTRING ${_spaces} 0 ${_num_spaces} _spacing)
|
"\nAF_PACKET: ${ZEEK_HAVE_AF_PACKET}"
|
||||||
message("${what}:${_spacing}${ARGV1}")
|
"\nAux. Tools: ${INSTALL_AUX_TOOLS}"
|
||||||
endfunction ()
|
"\nBifCL: ${_bifcl_exe_path}"
|
||||||
|
"\nBinPAC: ${_binpac_exe_path}"
|
||||||
## Utility method for outputting status information for features that have an ON/OFF
|
"\nBTest: ${INSTALL_BTEST}"
|
||||||
## state.
|
"\nBTest tooling: ${_install_btest_tools_msg}"
|
||||||
function (output_summary_bool what state)
|
"\nGen-ZAM: ${_gen_zam_exe_path}"
|
||||||
if (${state})
|
"\nJavaScript: ${ZEEK_HAVE_JAVASCRIPT}"
|
||||||
output_summary_line("${what}" "ON")
|
"\nSpicy: ${_spicy}"
|
||||||
else ()
|
"\nSpicy analyzers: ${USE_SPICY_ANALYZERS}"
|
||||||
output_summary_line("${what}" "OFF")
|
"\nzeek-client: ${INSTALL_ZEEK_CLIENT}"
|
||||||
endif ()
|
"\nZeekControl: ${INSTALL_ZEEKCTL}"
|
||||||
endfunction ()
|
"\nzkg: ${INSTALL_ZKG}"
|
||||||
|
"\n"
|
||||||
message("\n====================| Zeek Build Summary |====================\n")
|
"\nlibmaxminddb: ${USE_GEOIP}"
|
||||||
|
"\nKerberos: ${USE_KRB5}"
|
||||||
output_summary_line("Build type" "${CMAKE_BUILD_TYPE}")
|
"\ngperftools found: ${HAVE_PERFTOOLS}"
|
||||||
output_summary_line("Build dir" "${PROJECT_BINARY_DIR}")
|
"\n - tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
|
||||||
message("")
|
"\n - debugging: ${USE_PERFTOOLS_DEBUG}"
|
||||||
|
"\njemalloc: ${ENABLE_JEMALLOC}"
|
||||||
output_summary_line("Install prefix" "${CMAKE_INSTALL_PREFIX}")
|
"\n"
|
||||||
output_summary_line("Config file dir" "${ZEEK_ETC_INSTALL_DIR}")
|
"\nCluster backends:"
|
||||||
output_summary_line("Log dir" "${ZEEK_LOG_DIR}")
|
"\n - Broker: ON"
|
||||||
output_summary_line("Plugin dir" "${ZEEK_PLUGIN_DIR}")
|
"\n - ZeroMQ: ${ENABLE_CLUSTER_BACKEND_ZEROMQ}"
|
||||||
output_summary_line("Python module dir" "${PY_MOD_INSTALL_DIR}")
|
"\n"
|
||||||
output_summary_line("Script dir" "${ZEEK_SCRIPT_INSTALL_PATH}")
|
"\nStorage backends:"
|
||||||
output_summary_line("Spool dir" "${ZEEK_SPOOL_DIR}")
|
"\n - SQLite: ON"
|
||||||
output_summary_line("State dir" "${ZEEK_STATE_DIR}")
|
"\n - Redis: ${ENABLE_STORAGE_BACKEND_REDIS}"
|
||||||
output_summary_line("Spicy modules dir" "${ZEEK_SPICY_MODULE_PATH}")
|
"\n"
|
||||||
message("")
|
"\nFuzz Targets: ${ZEEK_ENABLE_FUZZERS}"
|
||||||
|
"\nFuzz Engine: ${ZEEK_FUZZING_ENGINE}"
|
||||||
output_summary_bool("Debug mode" ${ENABLE_DEBUG})
|
"\n"
|
||||||
output_summary_bool("Unit tests" ${ENABLE_ZEEK_UNIT_TESTS})
|
"\nInclude What You Use: ${ENABLE_IWYU}"
|
||||||
message("")
|
"\nClang-Tidy: ${ENABLE_CLANG_TIDY}"
|
||||||
|
"${_analyzer_warning}"
|
||||||
output_summary_line("Builtin Plugins" "${_zeek_builtin_plugins}")
|
"\n"
|
||||||
message("")
|
"\n================================================================\n")
|
||||||
|
|
||||||
output_summary_line("CC" "${CMAKE_C_COMPILER}")
|
|
||||||
output_summary_line("CFLAGS" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${BuildType}}")
|
|
||||||
output_summary_line("CXX" "${CMAKE_CXX_COMPILER}")
|
|
||||||
output_summary_line("CXXFLAGS" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}")
|
|
||||||
output_summary_line("CPP" "${CMAKE_CXX_COMPILER}")
|
|
||||||
message("")
|
|
||||||
|
|
||||||
output_summary_bool("AF_PACKET" ${ZEEK_HAVE_AF_PACKET})
|
|
||||||
output_summary_bool("Aux. Tools" ${INSTALL_AUX_TOOLS})
|
|
||||||
output_summary_bool("BTest" ${INSTALL_BTEST})
|
|
||||||
output_summary_line("BTest tooling" ${_install_btest_tools_msg})
|
|
||||||
output_summary_bool("JavaScript" ${ZEEK_HAVE_JAVASCRIPT})
|
|
||||||
output_summary_line("Spicy" ${_spicy})
|
|
||||||
output_summary_bool("Spicy analyzers" ${USE_SPICY_ANALYZERS})
|
|
||||||
output_summary_bool("zeek-client" ${INSTALL_ZEEK_CLIENT})
|
|
||||||
output_summary_bool("ZeekControl" ${INSTALL_ZEEKCTL})
|
|
||||||
output_summary_bool("zkg" ${INSTALL_ZKG})
|
|
||||||
message("")
|
|
||||||
|
|
||||||
output_summary_bool("libmaxminddb" ${USE_GEOIP})
|
|
||||||
output_summary_bool("Kerberos" ${USE_KRB5})
|
|
||||||
output_summary_bool("gperftools" ${HAVE_PERFTOOLS})
|
|
||||||
output_summary_bool(" - tcmalloc" ${USE_PERFTOOLS_TCMALLOC})
|
|
||||||
output_summary_bool(" - debugging" ${USE_PERFTOOLS_DEBUG})
|
|
||||||
output_summary_bool("jemalloc" ${ENABLE_JEMALLOC})
|
|
||||||
message("")
|
|
||||||
|
|
||||||
output_summary_line("Cluster backends")
|
|
||||||
output_summary_bool(" - Broker" ON)
|
|
||||||
output_summary_bool(" - ZeroMQ" ${ENABLE_CLUSTER_BACKEND_ZEROMQ})
|
|
||||||
message("")
|
|
||||||
|
|
||||||
output_summary_line("Storage backends")
|
|
||||||
output_summary_bool(" - SQLite" ON)
|
|
||||||
output_summary_bool(" - Redis" ${ENABLE_STORAGE_BACKEND_REDIS})
|
|
||||||
message("")
|
|
||||||
|
|
||||||
output_summary_bool("Fuzz Targets" ${ZEEK_ENABLE_FUZZERS})
|
|
||||||
output_summary_line("Fuzz Engine" "${_zeek_fuzzing_engine}")
|
|
||||||
message("")
|
|
||||||
|
|
||||||
output_summary_line("External Tools/Linters")
|
|
||||||
output_summary_bool(" - Include What You Use" ${ENABLE_IWYU})
|
|
||||||
output_summary_bool(" - Clang-Tidy" ${ENABLE_CLANG_TIDY})
|
|
||||||
|
|
||||||
if (${_analyzer_warning})
|
|
||||||
message("${_analyzer_warning}\n")
|
|
||||||
endif ()
|
|
||||||
message("\n================================================================")
|
|
||||||
|
|
||||||
include(UserChangedWarning)
|
include(UserChangedWarning)
|
||||||
|
|
|
@ -533,6 +533,32 @@ POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
==============================================================================
|
==============================================================================
|
||||||
|
|
||||||
|
%%% auxil/filesystem
|
||||||
|
|
||||||
|
==============================================================================
|
||||||
|
|
||||||
|
Copyright (c) 2018, Steffen Schümann <s.schuemann@pobox.com>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
==============================================================================
|
||||||
|
|
||||||
%%% auxil/highwayhash
|
%%% auxil/highwayhash
|
||||||
|
|
||||||
==============================================================================
|
==============================================================================
|
||||||
|
|
492
NEWS
492
NEWS
|
@ -3,508 +3,26 @@ This document summarizes the most important changes in the current Zeek
|
||||||
release. For an exhaustive list of changes, see the ``CHANGES`` file
|
release. For an exhaustive list of changes, see the ``CHANGES`` file
|
||||||
(note that submodules, such as Broker, come with their own ``CHANGES``.)
|
(note that submodules, such as Broker, come with their own ``CHANGES``.)
|
||||||
|
|
||||||
Zeek 8.1.0
|
|
||||||
==========
|
|
||||||
|
|
||||||
We would like to thank @chrisjlly, Klemens Nanni (@klemensn), and Klemens Nanni
|
|
||||||
(@klemens-ya) for their contributions to this release.
|
|
||||||
|
|
||||||
Breaking Changes
|
|
||||||
----------------
|
|
||||||
|
|
||||||
- Python 3.10 is now required for Zeek and all of its associated subprojects.
|
|
||||||
|
|
||||||
- The ``&optional`` script attribute will now error when applied to anything that's
|
|
||||||
not a record field. Previously, this would have surprising behavior.
|
|
||||||
|
|
||||||
- The BinPAC, Bifcl, and Gen-ZAM tools have all moved directly into the Zeek repo, which
|
|
||||||
should ease maintenance on them a bit. They were moved from the ``auxil`` directory to the
|
|
||||||
tools directory. Along with this, the ``--gen-zam`` argument for ``configure`` was
|
|
||||||
removed and the internal version will always be used.
|
|
||||||
|
|
||||||
- The zeek-af_packet-plugin git submodule was moved directly into the Zeek repo. This used
|
|
||||||
to live in the ``auxil`` directory, after having moved there from an external plugin.
|
|
||||||
It is now built as part of main Zeek build whenever building on Linux.
|
|
||||||
|
|
||||||
New Functionality
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
- A new TapAnalyzer class was added allowing to tap into all packets delivered
|
|
||||||
to child analyzers attached to session adapters.
|
|
||||||
|
|
||||||
- Two new hooks, ``Cluster::on_subscribe()`` and ``Cluster::on_unsubscribe()`` have
|
|
||||||
been added to allow observing ``Subscribe()`` and ``Unsubscribe()`` calls on
|
|
||||||
backends by Zeek scripts.
|
|
||||||
|
|
||||||
- The ability to control the length of strings and containers in log output was added. The
|
|
||||||
maximum length of individual log fields can be set, as well as the total length of all
|
|
||||||
string or container fields in a single log record. This feature is controlled via four
|
|
||||||
new script-level variables:
|
|
||||||
|
|
||||||
Log::default_max_field_string_bytes
|
|
||||||
Log::default_max_total_string_bytes
|
|
||||||
Log::default_max_field_container_elements
|
|
||||||
Log::default_max_total_container_elements
|
|
||||||
|
|
||||||
When one of the ``field`` limits is reached, the individual field is truncated. When one
|
|
||||||
of the ``total`` limits is reached, all further strings will returned as empty and all
|
|
||||||
further container elements will not be output. See the documentation for those variables
|
|
||||||
for more detail.
|
|
||||||
|
|
||||||
The above variables control the truncation globally, but they can also be set for log
|
|
||||||
streams individually. This is controlled by variables with the same names that can be
|
|
||||||
set when the log stream is created.
|
|
||||||
|
|
||||||
Two new weirds were added to report the truncation: ``log_string_field_truncated`` and
|
|
||||||
``log_container_field_truncated``. New metrics were added to track how many truncations
|
|
||||||
have occurred: ``zeek_log_writer_truncated_string_fields_total`` and
|
|
||||||
``zeek_log_writer_truncated_containers_total``. The metrics are reported for each log
|
|
||||||
stream.
|
|
||||||
|
|
||||||
- The DNS analyzer now returns the set of parameters for SVCB data. It previously handled
|
|
||||||
SVCB packets, but omitted the parameters while parsing.
|
|
||||||
|
|
||||||
Changed Functionality
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
- The var-extraction-uri.zeek policy does not include the path in the ``uri_vars``
|
|
||||||
field anymore.
|
|
||||||
|
|
||||||
- The ``get_current_packet_header()`` now populates the returned record also for
|
|
||||||
fragmented IP datagrams.
|
|
||||||
|
|
||||||
Removed Functionality
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
Deprecated Functionality
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
|
|
||||||
Zeek 8.0.0
|
Zeek 8.0.0
|
||||||
==========
|
==========
|
||||||
|
|
||||||
We would like to thank @aidans111, Anthony Verez (@netantho), Baa (@Baa14453),
|
We would like to thank ... for their contributions to this release.
|
||||||
Bhaskar Bhar (@bhaskarbhar), @dwhitemv25, EdKo (@ephikos), @edoardomich, Fupeng
|
|
||||||
Zhao (@AmazingPP), hendrik.schwartke@os-s.de (@hendrikschwartke), @i2z1, Jan
|
|
||||||
Grashöfer (@J-Gras) Jean-Samuel Marier, Justin Azoff (@JustinAzoff), Mario D
|
|
||||||
(@mari0d), Markus Elfring (@elfring), Peter Cullen (@pbcullen), Sean Donaghy,
|
|
||||||
Simeon Miteff (@simeonmiteff), Steve Smoot (@stevesmoot), @timo-mue,
|
|
||||||
@wojciech-graj, and Xiaochuan Ye (@XueSongTap) for their contributions to this
|
|
||||||
release.
|
|
||||||
|
|
||||||
Breaking Changes
|
Breaking Changes
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
- Zeek by default now depends on the availability of the ZeroMQ library for building
|
|
||||||
and running. This is in preparation of switching to the ZeroMQ-based cluster backend
|
|
||||||
by default in future Zeek versions. On an Ubuntu based system, the required system
|
|
||||||
packages are ``libzmq5``, ``libzmq3-dev`` and ``cppzmq-dev``. See the Dockerfiles
|
|
||||||
in the ``ci/`` directory for other supported platforms.
|
|
||||||
|
|
||||||
- Zeek and all of its associated submodules now require C++20-capable compilers to
|
|
||||||
build. This will let us move forward in using more modern C++ features and replace some
|
|
||||||
workarounds that we have been carrying. Minimum recommended versions of compilers are
|
|
||||||
GCC 10, Clang 8, and Visual Studio 2022.
|
|
||||||
|
|
||||||
- The ``zeek::Span`` class has been deprecated and the APIs in the telemetry subsystem
|
|
||||||
switched to use ``std::span`` instead of ``zeek::Span``. If your plugin instantiates
|
|
||||||
counter or gauge instances using the telemetry subsystem and you've previously used
|
|
||||||
``zeek::Span`` explicitly, updates may be needed.
|
|
||||||
|
|
||||||
- The code base underwent a big cleanup of #include usage, across almost all of the
|
|
||||||
files. We tested builds of all of the existing third-party packages and only noticed one
|
|
||||||
or two failures, but there is a possibility for breakage related to this cleanup.
|
|
||||||
|
|
||||||
- The ``lookup_connection()`` and ``connection_exists()`` builtin functions
|
|
||||||
now require ``conn_id`` instances as argument, rather than internally supporting
|
|
||||||
duck type matching ``conn_id``-like records.
|
|
||||||
|
|
||||||
- Network timestamps are not added to events by default anymore. Use the following
|
|
||||||
redef line to enable them:
|
|
||||||
|
|
||||||
redef EventMetadata::add_network_timestamp = T;
|
|
||||||
|
|
||||||
The background is that event metadata has become more generic and may incur
|
|
||||||
a small overhead when enabled. There's not enough users of network timestamp
|
|
||||||
metadata to justify the complexity of treating it separate.
|
|
||||||
|
|
||||||
- The ASCII writer's ``JSON::TS_MILLIS`` timestamp format was changed to produce
|
|
||||||
signed integers. This matters for the representation for timestamps that are
|
|
||||||
before the UNIX epoch. These are now written as negative values, while previously
|
|
||||||
the negative value was interpreted as an unsigned integer, resulting in very large
|
|
||||||
timestamps, potentially causing issues for downstream consumers.
|
|
||||||
|
|
||||||
If you prefer to always have unsigned values, it's possible to revert to the previous
|
|
||||||
behavior by setting:
|
|
||||||
|
|
||||||
redef LogAscii::json_timestamps = JSON::TS_MILLIS_UNSIGNED;
|
|
||||||
|
|
||||||
- The "endpoint" label of metrics exposed via Prometheus or the ``telemetry.log``
|
|
||||||
was renamed to "node". This is done for consistency with cluster terminology:
|
|
||||||
The label values have always been the value of ``Cluster::node`, so it's more intuitive
|
|
||||||
to call it. The "endpoint" name originated from a time when the telemetry framework
|
|
||||||
was implemented in Broker.
|
|
||||||
|
|
||||||
To revert to the "endpoint" label, you can do the following, but we strongly
|
|
||||||
suggest to migrate to the new default "node" instead:
|
|
||||||
|
|
||||||
redef Telemetry::metrics_endpoint_label = "endpoint";
|
|
||||||
|
|
||||||
- The ``current_event_time()`` builtin function as well as ``Event::Time()``
|
|
||||||
and ``EventMgr::CurrentEventTime()`` now return ``-1.0`` if no timestamp
|
|
||||||
metadata is available for the current event, or if no event is being
|
|
||||||
dispatched. Previously this would've been 0.0, or the timestamp of the previously
|
|
||||||
dispatched event.
|
|
||||||
|
|
||||||
- Missing network timestamp metadata on remote events is not set to the local
|
|
||||||
network time anymore by default. This potentially hid useful debugging information
|
|
||||||
about another node not sending timestamp metadata. The old behavior can be
|
|
||||||
re-enabled as follows:
|
|
||||||
|
|
||||||
redef EventMetadata::add_missing_remote_network_timestamp = T;
|
|
||||||
|
|
||||||
- The ``IsPacketSource()`` method on ``IOSource`` was removed. It was unused
|
|
||||||
and incorrectly returned ``false`` on all packet sources.
|
|
||||||
|
|
||||||
- The ``--with-binpac`` and ``--with-bifcl`` arguments for ``configure`` are now
|
|
||||||
deprecated. Both arguments have for a long time just used the internal version of the
|
|
||||||
tooling even if something was passed, so they were mostly useless. This may cause
|
|
||||||
breakage of cross-compiling, where the ``binpac`` and ``bifcl`` tooling needs to be run
|
|
||||||
on the host machine. We haven't heard from anyone that this is the case with the
|
|
||||||
arguments in their currently-broken state.
|
|
||||||
|
|
||||||
- The parsing of data for the ``ssl_session_ticket_handshake`` event was fixed.
|
|
||||||
In the past, the data contained two extra bytes before the session ticket
|
|
||||||
data. The event now contains only the session ticket data. You might have to
|
|
||||||
adjust your scripts if you manually worked around this bug in the past.
|
|
||||||
|
|
||||||
New Functionality
|
New Functionality
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
- Zeek now supports pluggable and customizable connection tracking. The default
|
|
||||||
behavior remains unchanged and uses a connection's five tuple based on the
|
|
||||||
IP/port pairs and proto field. Zeek 8 ships with one additional implementation,
|
|
||||||
to factor VLAN tags into the connection tracking. To switch to VLAN-aware
|
|
||||||
connection tracking:
|
|
||||||
|
|
||||||
@load frameworks/conn_key/vlan_fivetuple
|
|
||||||
|
|
||||||
By convention, additional fields used by alternative ConnKey implementations are
|
|
||||||
added into the new ``ctx`` field of ``conn_id``. The type of ``ctx`` is ``conn_id_ctx``.
|
|
||||||
|
|
||||||
The ``vlan_fivetuple`` script adds two additional fields to the ``conn_id_ctx``
|
|
||||||
record type, representing any VLAN tags involved. Accordingly, every log
|
|
||||||
using ``conn_id`` reflects the change as well as ``ctx`` and the VLAN fields have
|
|
||||||
the ``&log`` attribute. The columns used for logging will be named ``id.ctx.vlan``
|
|
||||||
and ``id.ctx.inner_vlan``.
|
|
||||||
|
|
||||||
This feature does not automatically provide a notion of endpoint that
|
|
||||||
corresponds with the effective connection tuple. For example, applications tracking
|
|
||||||
endpoints by IP address do not somehow become VLAN-aware when enabling
|
|
||||||
VLAN-aware tracking.
|
|
||||||
|
|
||||||
Users may experiment with their own notion of endpoint by combining the ``orig_h``
|
|
||||||
or ``resp_h`` field of ``conn_id`` with the new ``ctx`` field. For example, tracking
|
|
||||||
the number of connections from a given host in a VLAN-aware fashion can be done
|
|
||||||
as follows:
|
|
||||||
|
|
||||||
global connection_counts: table[conn_id_ctx, addr] of count &default=0;
|
|
||||||
|
|
||||||
event new_connection(c: connection) {
|
|
||||||
++connection_counts[c$id$ctx, c$id$orig_h];
|
|
||||||
}
|
|
||||||
|
|
||||||
Note that this script snippet isn't VLAN-specific, yet it is VLAN-aware if the
|
|
||||||
``vlan_fivetuple`` script is loaded. In future Zeek versions, this pattern is
|
|
||||||
likely to be used to adapt base and policy scripts for more "context awareness".
|
|
||||||
|
|
||||||
Users may add their own plugins (for example via a zkg package) to provide
|
|
||||||
alternative implementations. This involves implementing a factory for
|
|
||||||
connection "keys" that factor in additional flow information. See the VLAN
|
|
||||||
implementation in the ``src/packet_analysis/protocol/ip/conn_key/vlan_fivetuple``
|
|
||||||
directory for an example.
|
|
||||||
|
|
||||||
- Added support to ZeekControl for seamlessly switching to ZeroMQ as cluster
|
|
||||||
backend by adding the following settings to zeekctl.cfg:
|
|
||||||
|
|
||||||
ClusterBackend = ZeroMQ
|
|
||||||
UseWebSocket = 1
|
|
||||||
|
|
||||||
With the ZeroMQ cluster backend, Zeekctl requires to use Zeek's WebSocket API
|
|
||||||
to communicate with individual nodes for the ``print`` and ``netstats`` commands.
|
|
||||||
Setting the ``UseWebSocket`` option enables a WebSocket server on the manager
|
|
||||||
node, listening on 127.0.0.1:27759 by default (this is configurable with using
|
|
||||||
the newly introduced ``WebSocketHost`` and ``WebSocketPort`` options).
|
|
||||||
The ``UseWebSocket`` option can also be used when ``ClusterBackend`` is set
|
|
||||||
to ``Broker``, but isn't strictly required.
|
|
||||||
|
|
||||||
For ZeroMQ (or other future cluster backends), setting ``UseWebSocket`` is a
|
|
||||||
requirement as Zeekctl does not speak the native ZeroMQ protocol to communicate
|
|
||||||
with cluster nodes for executing commands. This functionality requires the
|
|
||||||
``websockets`` Python package with version 11.0 or higher.
|
|
||||||
|
|
||||||
- Cluster telemetry improvements. Zeek now exposes a configurable number of
|
|
||||||
metrics regarding outgoing and incoming cluster events. By default, the number
|
|
||||||
of events sent and received by a Zeek cluster node and any attached WebSocket
|
|
||||||
clients is tracked as four individual counters. It's possible to gather more
|
|
||||||
detailed information by adding ``Cluster::Telemetry::VERBOSE`` and
|
|
||||||
``Cluster::Telemetry::DEBUG`` to the variables ``Cluster::core_metrics`` and
|
|
||||||
``Cluster::webscoket_metrics``:
|
|
||||||
|
|
||||||
redef Cluster::core_metrics += { Cluster::Telemetry::VERBOSE };
|
|
||||||
redef Cluster::websocket_metrics += { Cluster::Telemetry::DEBUG };
|
|
||||||
|
|
||||||
Configuring verbose, adds metrics that are labeled with the event handler
|
|
||||||
and topic name. Configuring debug, uses histogram metrics to additionally track
|
|
||||||
the distribution of the serialized event size. Additionally, when debug is selected,
|
|
||||||
outgoing events are labeled with the script location from where they were published.
|
|
||||||
|
|
||||||
- Support for the X-Application-Name HTTP header was added to the WebSocket API at
|
|
||||||
``v1/messages/json``. A WebSocket application connecting to Zeek may set the
|
|
||||||
X-Application-Name header to a descriptive identifier. The value of this header
|
|
||||||
will be added to the cluster metrics as ``app`` label. This allows to gather
|
|
||||||
incoming and outgoing event metrics of a specific WebSocket application, simply
|
|
||||||
by setting the X-Application-Name header.
|
|
||||||
|
|
||||||
- The SMTP analyzer can now optionally forward the top-level RFC 822 message individual
|
|
||||||
SMTP transactions to the file analysis framework. This can be leveraged to extract
|
|
||||||
emails in form of ``.eml`` files from SMTP traffic to disk.
|
|
||||||
|
|
||||||
To enable this feature, set the ``SMTP::enable_rfc822_msg_file_analysis`` option
|
|
||||||
and implement an appropriate ``file_new()`` or ``file_over_new_connection()`` handler:
|
|
||||||
|
|
||||||
redef SMTP::enable_rfc822_msg_file_analysis = T;
|
|
||||||
|
|
||||||
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) {
|
|
||||||
if ( f$id == c$smtp$rfc822_msg_fuid )
|
|
||||||
Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename="email"]);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
- Generic event metadata support. A new ``EventMetadata`` module was added allowing
|
|
||||||
to register generic event metadata types and accessing the current event's metadata
|
|
||||||
using the functions ``current()`` and ``current_all()`` of this module.
|
|
||||||
|
|
||||||
- A new plugin hook, ``HookPublishEvent()``, has been added for intercepting
|
|
||||||
publishing of Zeek events. This hook may be used for monitoring purposes,
|
|
||||||
modifying or rerouting remote events.
|
|
||||||
|
|
||||||
Plugins can implement and enable this hook by calling the following method
|
|
||||||
within their Configure() implementation.
|
|
||||||
|
|
||||||
EnableHook(HOOK_PUBLISH_EVENT)
|
|
||||||
|
|
||||||
The signature of ``HookPublishEvent()`` is as follows.
|
|
||||||
|
|
||||||
bool HookPublishEvent(zeek::cluster::Backend& backend,
|
|
||||||
const std::string& topic,
|
|
||||||
zeek::cluster::detail::Event& event);
|
|
||||||
|
|
||||||
- Zeek now includes the Redis protocol analyzer from the evantypanski/spicy-redis
|
|
||||||
project (https://github.com/evantypanski/spicy-redis). This analyzer is enabled
|
|
||||||
by default. This analyzer logs Redis commands and their associated replies in
|
|
||||||
``redis.log``.
|
|
||||||
|
|
||||||
To disable the analyzer in case of issues, use the following snippet:
|
|
||||||
|
|
||||||
redef Analyzer::disabled_analyzers += {
|
|
||||||
Analyzer::ANALYZER_REDIS,
|
|
||||||
};
|
|
||||||
|
|
||||||
- The FTP analyzer now supports explicit TLS via AUTH TLS.
|
|
||||||
|
|
||||||
- Two new script-level hooks in the Intel framework have been added.
|
|
||||||
|
|
||||||
hook indicator_inserted(indicator_value: string, indicator_type: Intel::Type)
|
|
||||||
|
|
||||||
hook indicator_removed(indicator_value: string, indicator_type: Intel::Type)
|
|
||||||
|
|
||||||
These are reliably invoked on worker and manager nodes the first time an
|
|
||||||
indicator value is inserted into the store and once it has been completely
|
|
||||||
removed from the store.
|
|
||||||
|
|
||||||
- The ``frameworks/intel/seen`` scripts have been annotated with event groups
|
|
||||||
and a new ``frameworks/intel/seen/manage-event-groups`` policy script added.
|
|
||||||
|
|
||||||
The motivation is to allow Zeek distributors to load the ``intel/seen`` scripts
|
|
||||||
by default without incurring their event overhead when no Intel indicators are
|
|
||||||
loaded. Corresponding event handlers are enabled once the first Intel indicator
|
|
||||||
of a given ``Intel::Type`` is added. Event handlers are disabled when the last
|
|
||||||
indicator is removed, again.
|
|
||||||
|
|
||||||
Note that the ``manage-event-groups`` script interacts with the ``Intel::seen_policy``
|
|
||||||
hook: If no indicators for a given ``Intel::Type`` are loaded, the ``Intel::seen_policy``
|
|
||||||
will not be invoked as the event handlers extracting indicators aren't executed.
|
|
||||||
|
|
||||||
If you rely on the ``Intel::seen_policy`` hook to be invoked regardless of the
|
|
||||||
contents of the Intel store, do not load the ``manage-event-groups`` or set:
|
|
||||||
|
|
||||||
redef Intel::manage_seen_event_groups = F;
|
|
||||||
|
|
||||||
- The DNS analyzer was extended to support NAPTR RRs (RFC 2915, RFC 3403).
|
|
||||||
A corresponding ``dns_NAPTR_reply`` event was added.
|
|
||||||
|
|
||||||
- A new ``get_tags_by_category`` BIF method was added that returns a list of tags for a
|
|
||||||
specified plugin category. This can be used in lieu of calling ``zeek -NN`` and
|
|
||||||
parsing the output. For example, this will return the list of all analyzer plugins
|
|
||||||
currently loaded:
|
|
||||||
|
|
||||||
get_tags_by_category("ANALYZER");
|
|
||||||
|
|
||||||
- A new ``conn_generic_packet_threshold_crossed`` event was introduced. The event triggers
|
|
||||||
for any IP-based session that reaches a given threshold. Multiple packet thresholds can
|
|
||||||
be defined in ``ConnThreshold::generic_packet_thresholds``. The generic thresholds refer
|
|
||||||
to the total number of packets on a connection without taking direction into account
|
|
||||||
(i.e. the event also triggers on one-sided connections).
|
|
||||||
|
|
||||||
The event is intended as an alternative to the ``new_connection`` event that allows for
|
|
||||||
ignoring short-lived connections like DNS or scans. For example, it can be used to set
|
|
||||||
up traditional connection monitoring without introducing overhead for connections that
|
|
||||||
would never reach a larger threshold anyway.
|
|
||||||
|
|
||||||
- Zeek now supports extracting the PPPoE session ID. The ``PacketAnalyzer::PPPoE::session_id``
|
|
||||||
BiF can be used to get the session ID of the current packet.
|
|
||||||
|
|
||||||
The ``conn/pppoe-session-id-logging.zeek`` policy script adds pppoe session IDs to the
|
|
||||||
connection log.
|
|
||||||
|
|
||||||
The ``get_conn_stats()`` function's return value now includes the number of packets
|
|
||||||
that have not been processed by any analyzer. Using data from ``get_conn_stats()`` and
|
|
||||||
``get_net_stats()``, it's possible to determine the number of packets that have
|
|
||||||
been received and accepted by Zeek, but eventually discarded without processing.
|
|
||||||
|
|
||||||
Changed Functionality
|
Changed Functionality
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
- The `Conn::set_conn` function is now always run in `new_connection`, instead of only
|
|
||||||
being run in `connection_state_remove`.
|
|
||||||
|
|
||||||
- Logging of failed analyzers has been overhauled. `dpd.log` was replaced
|
|
||||||
by a new `analyzer.log` that presents a more unified and consistent view
|
|
||||||
of failed analyzers. The previous `analyzer.log` was renamed to `analyzer-debug.log`;
|
|
||||||
see below for more details.
|
|
||||||
|
|
||||||
For protocol analyzers, `analyzer.log` now reports initially confirmed analyzers that
|
|
||||||
Zeek subsequently removed from the connection due to a protocol violation.
|
|
||||||
|
|
||||||
For file and packet analyzers, all errors will be logged to `analyzer.log`.
|
|
||||||
|
|
||||||
As part of this work, a new `analyzer_failed` event has been introduced. This event
|
|
||||||
is raised when an analyzer is removed because of raising a violation.
|
|
||||||
|
|
||||||
- `analyzer.log` was renamed to `analyzer_debug.log`, and is no longer created
|
|
||||||
by default. The log file will be created if the `frameworks/analyzer/debug-logging.zeek`
|
|
||||||
policy script is loaded.
|
|
||||||
|
|
||||||
Note that the namespace for options in the script changed to
|
|
||||||
`Analyzer::DebugLogging`. Furthermore the default options changed to enable
|
|
||||||
more detailed output by default.
|
|
||||||
|
|
||||||
- Record fields with a ``&default`` attribute are now consistently re-initialized
|
|
||||||
after deleting such fields. Previously, this would only work for constant
|
|
||||||
expressions, but has been extended to apply to arbitrary expressions.
|
|
||||||
|
|
||||||
- Publishing remote events with vector arguments that contain holes is now
|
|
||||||
rejected. The receiver side never had a chance to figure out where these
|
|
||||||
holes would have been. There's a chance this breaks scripts that accidentally
|
|
||||||
published vectors with holes. A reporter error is produced at runtime when
|
|
||||||
serialization of vectors with holes is attempted.
|
|
||||||
|
|
||||||
- Kerberos support on macOS has been enabled. Due to incompatibilities, the system
|
|
||||||
provided libkrb5 is ignored, however. Only versions from homebrew are supported and
|
|
||||||
found/picked-up by default. Use --with-krb5 for pointing at a custom librkb5
|
|
||||||
installation.
|
|
||||||
|
|
||||||
- The ``$listen_host`` configuration for ``Cluster::listen_websocket()``'s
|
|
||||||
``WebSocketServerOptions`` was deprecated. Use the new ``$listen_addr`` field
|
|
||||||
instead.
|
|
||||||
|
|
||||||
- The `service_violation` field of the connection record was marked as deprecated.
|
|
||||||
Consider using the new `failed_analyzers` field of the connection record instead.
|
|
||||||
|
|
||||||
- `detect-protocol.zeek was the last non-deprecated policy script left in
|
|
||||||
`frameworks/dpd`. It was moved to `frameworks/analyzer/detect-protocol.zeek`.
|
|
||||||
|
|
||||||
- Running Zeek with Zeekygen for documentation extraction (-X|--zeekygen
|
|
||||||
<cfgfile>) now implies -a, i.e., parse-only mode.
|
|
||||||
|
|
||||||
- The `not_valid_before` and `not_valid_after` times of X509 certificates are
|
|
||||||
now logged as GMT timestamps. Before, they were logged as local times; thus
|
|
||||||
the output was dependent on the timezone that your system is set to.
|
|
||||||
Similarly, the related events and the Zeek data structures all interpreted
|
|
||||||
times in X509 certificates as local times.
|
|
||||||
|
|
||||||
- The PPPoE parser now respects the size value given in the PPPoE header. Data
|
|
||||||
beyond the size given in the header will be truncated.
|
|
||||||
|
|
||||||
- Record fields with ``&default`` attributes initializing empty ``vector``, ``table``
|
|
||||||
or ``set`` instances are now deferred until they are accessed, potentially
|
|
||||||
improving memory usage when such fields are never accessed.
|
|
||||||
|
|
||||||
Removed Functionality
|
Removed Functionality
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
- The ``--with-bind`` argument for ``configure`` was removed. We removed the need for the
|
|
||||||
BIND library from our CMake setup in the v7.2 release, but this non-functional argument
|
|
||||||
was left behind.
|
|
||||||
|
|
||||||
- The ``--disable-archiver`` argument for ``configure`` was removed. This was deprecated
|
|
||||||
and scheduled to be removed in v7.1, but we apparently missed it during the cleanup for
|
|
||||||
that release.
|
|
||||||
|
|
||||||
Deprecated Functionality
|
Deprecated Functionality
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
- The `dpd.log` is now deprecated and replaced by `analyzer.log` (see above).
|
|
||||||
`dpd.log` is no longer created by default, but can be loaded using the
|
|
||||||
`frameworks/analyzer/deprecated-dpd-log.zeek` policy script.
|
|
||||||
|
|
||||||
Relatedly, the `service_violation` field of the connection record is
|
|
||||||
deprecated and will only be present if the
|
|
||||||
`frameworks/analyzer/deprecated-dpd-log.zeek` policy script is loaded.
|
|
||||||
|
|
||||||
- The ``protocols/http/detect-sqli.zeek`` script has been deprecated in favor of a
|
|
||||||
new ``protocols/http/detect-sql-injection.zeek`` script to switch from the victim
|
|
||||||
host being placed into the ``src`` field of a notice to instead use ``dst``.
|
|
||||||
The attacker host is now placed into ``src``. Further, notices hold the first
|
|
||||||
sampled connection uid.
|
|
||||||
|
|
||||||
Note that the ``Notice::Type`` enumeration names remain the same. You can determine
|
|
||||||
which script was used by the presence of populated ``uid`` and ``dst`` fields in the
|
|
||||||
``notice.log`` entries.
|
|
||||||
|
|
||||||
The replacement script doesn't populate the ``email_body_sections`` anymore either.
|
|
||||||
|
|
||||||
- Using ``&default`` and ``&optional`` together on a record field has been deprecated
|
|
||||||
as it would only result in ``&default`` behavior. This will become an error starting
|
|
||||||
with Zeek 8.1.
|
|
||||||
|
|
||||||
- The ``zeek::Event()`` constructor was deprecated. Use ``event_mgr::Enqueue()``
|
|
||||||
or ``event_mgr::Dispatch()`` instead.
|
|
||||||
|
|
||||||
- Passing ``ts`` as the last argument to ``EventMgr::Enqueue()`` has been deprecated
|
|
||||||
and will lead to compile time warnings. Use ``EventMgr::Enqueue(detail::MetadataVectorPtr meta, ...)``
|
|
||||||
for populating ``meta`` accordingly.
|
|
||||||
|
|
||||||
- For plugin authors: in the core, the constructor for Connection instances has
|
|
||||||
been deprecated in favor of a new one to support pluggable connection
|
|
||||||
tuples. The ConnTuple struct, used by this deprecated Connection constructor,
|
|
||||||
is now deprecated as well.
|
|
||||||
|
|
||||||
|
|
||||||
- The ``zeek::filesystem`` namespace alias is deprecated in favor of using
|
|
||||||
``std::filesystem`` directly. Similarly, the ``ghc::filesystem`` submodule stored in
|
|
||||||
``auxil/filessytem`` has been removed and the files included from it in the Zeek
|
|
||||||
installation will no longer be installed. Builds won't warn about the deprecation of
|
|
||||||
``zeek::filesystem`` due to limitations of how we can mark deprecations in C++.
|
|
||||||
|
|
||||||
- The ``zeek::util::starts_with`` and ``zeek::util::ends_with`` functions are deprecated.
|
|
||||||
``std::string`` and ``std::string_view`` added ``begins_with`` and ``ends_with`` methods
|
|
||||||
in C++ 20, and those should be used instead.
|
|
||||||
|
|
||||||
- The ``record_type_to_vector`` BIF is deprecated in favor of using the newly ordered
|
|
||||||
``record_fields`` BIF.
|
|
||||||
|
|
||||||
Zeek 7.2.0
|
Zeek 7.2.0
|
||||||
==========
|
==========
|
||||||
|
|
||||||
|
@ -608,9 +126,9 @@ New Functionality
|
||||||
metrics are available to understand the health of each peering's buffer,
|
metrics are available to understand the health of each peering's buffer,
|
||||||
regardless of the overflow policy active. These are:
|
regardless of the overflow policy active. These are:
|
||||||
|
|
||||||
- zeek_broker_peer_buffer_messages: a gauge of the current buffer fill level,
|
- zeek_broker_peer_buffer_levels: a gauge of the current buffer fill level,
|
||||||
|
|
||||||
- zeek_broker_peer_buffer_recent_max_messages: a gauge that tracks the maximum
|
- zeek_broker_peer_buffer_recent_max_levels: a gauge that tracks the maximum
|
||||||
buffer fill level seen over the last ``Broker::buffer_stats_reset_interval`.
|
buffer fill level seen over the last ``Broker::buffer_stats_reset_interval`.
|
||||||
|
|
||||||
- zeek_broker_peer_buffer_overflows_total: a counter that tracks the number
|
- zeek_broker_peer_buffer_overflows_total: a counter that tracks the number
|
||||||
|
@ -827,7 +345,7 @@ New Functionality
|
||||||
some updates to Zeek's internal DNS resolver due to changes in the c-ares
|
some updates to Zeek's internal DNS resolver due to changes in the c-ares
|
||||||
API. At least version v1.28.0 is now required to build Zeek.
|
API. At least version v1.28.0 is now required to build Zeek.
|
||||||
|
|
||||||
- Python 3.9 is now required for Zeek and all of its associated subprojects.
|
- Python 3.9 is now required for Zeek and all of it's associated subprojects.
|
||||||
|
|
||||||
- IP-based connections that were previously not logged due to using an unknown
|
- IP-based connections that were previously not logged due to using an unknown
|
||||||
IP protocol (e.g. not TCP, UDP, or ICMP) now appear in conn.log. All conn.log
|
IP protocol (e.g. not TCP, UDP, or ICMP) now appear in conn.log. All conn.log
|
||||||
|
@ -918,7 +436,7 @@ New Functionality
|
||||||
analyzer used for processing the packet when the event is raised. The
|
analyzer used for processing the packet when the event is raised. The
|
||||||
``unknown_protocol.log`` file was extended to include this information.
|
``unknown_protocol.log`` file was extended to include this information.
|
||||||
|
|
||||||
- The MySQL analyzer now generates a ``mysql_change_user()`` event when the user
|
- The MySQL analyzer now generates a ``mysql_user_change()`` event when the user
|
||||||
changes mid-session via the ``COM_USER_CHANGE`` command.
|
changes mid-session via the ``COM_USER_CHANGE`` command.
|
||||||
|
|
||||||
- The DNS analyzer was extended to support TKEY RRs (RFC 2390). A corresponding
|
- The DNS analyzer was extended to support TKEY RRs (RFC 2390). A corresponding
|
||||||
|
|
2
README
2
README
|
@ -3,7 +3,7 @@ The Zeek Network Security Monitor
|
||||||
=================================
|
=================================
|
||||||
|
|
||||||
Zeek is a powerful framework for network traffic analysis and security
|
Zeek is a powerful framework for network traffic analysis and security
|
||||||
monitoring.
|
monitoring. Follow us on Twitter at @zeekurity.
|
||||||
|
|
||||||
Key Features
|
Key Features
|
||||||
============
|
============
|
||||||
|
|
|
@ -15,15 +15,14 @@ traffic analysis and security monitoring.
|
||||||
[_Development_](#development) —
|
[_Development_](#development) —
|
||||||
[_License_](#license)
|
[_License_](#license)
|
||||||
|
|
||||||
|
Follow us on Twitter at [@zeekurity](https://twitter.com/zeekurity).
|
||||||
|
|
||||||
[](https://coveralls.io/github/zeek/zeek?branch=master)
|
[](https://coveralls.io/github/zeek/zeek?branch=master)
|
||||||
[](https://cirrus-ci.com/github/zeek/zeek)
|
[](https://cirrus-ci.com/github/zeek/zeek)
|
||||||
|
|
||||||
[](https://zeek.org/slack)
|
[](https://zeek.org/slack)
|
||||||
[](https://community.zeek.org)
|
[](https://community.zeek.org)
|
||||||
|
|
||||||
[](https://infosec.exchange/@zeek)
|
|
||||||
[](https://bsky.app/profile/zeek.org)
|
|
||||||
|
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
|
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
8.1.0-dev.626
|
8.0.0-dev.0
|
||||||
|
|
1
auxil/bifcl
Submodule
1
auxil/bifcl
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 49e956cd278ad0ca72040536ff606f4bb8d4224f
|
1
auxil/binpac
Submodule
1
auxil/binpac
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 894afb64d954f6858ba9101061e56be93b621aca
|
|
@ -1 +1 @@
|
||||||
Subproject commit 06d491943f4bee6c2d1e17a5c7c31836d725273d
|
Subproject commit 5b6cbb8c2d9124aa1fb0bea5799433138dc64cf9
|
|
@ -1 +1 @@
|
||||||
Subproject commit 8c0fbfd74325b6c9be022a98bcd414b6f103d09e
|
Subproject commit 1092e9c03ca62c16fd3d9065117f708630ec2573
|
1
auxil/filesystem
Submodule
1
auxil/filesystem
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 72a76d774e4c7c605141fd6d11c33cc211209ed9
|
1
auxil/gen-zam
Submodule
1
auxil/gen-zam
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit f113c5f3220263eca87c3ffaafae43fda3295ae3
|
|
@ -1 +1 @@
|
||||||
Subproject commit ea30540c77679ced3ce7886199384e8743628921
|
Subproject commit 10d93cff9fd6c8d8c3e0bae58312aed470843ff8
|
|
@ -1 +1 @@
|
||||||
Subproject commit 7e3670aa1f6ab7623a87ff1e770f7f6b5a1c59f1
|
Subproject commit d2bfec929540c1fec5d1d45f0bcee3cff1eb7fa5
|
|
@ -1 +1 @@
|
||||||
Subproject commit ad301651ad0a7426757f8bc94cfc8e8cd98451a8
|
Subproject commit ab6aff89296d11363427beab34f88258c0abd467
|
|
@ -1 +1 @@
|
||||||
Subproject commit 4505c4323283b56ea59935210e105da26ab7bb0b
|
Subproject commit 45ce017874aac9ffabac0ddc4d016f1747804234
|
|
@ -1 +1 @@
|
||||||
Subproject commit 7635e113080be6fc20cb308636c8c38565c95c8a
|
Subproject commit e15e0bd959a03d06822ae76b53eef6181daf01a2
|
1
auxil/zeek-af_packet-plugin
Submodule
1
auxil/zeek-af_packet-plugin
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit a3fe59b3f1ded5c3461995134b66c6db182fa56f
|
|
@ -1 +1 @@
|
||||||
Subproject commit 9a51ce1940a808aaad253077905c2b34f15f1e08
|
Subproject commit ee706c54e665dab92a54253f934d2acf1f79137d
|
|
@ -1 +1 @@
|
||||||
Subproject commit 16849ca3ec2f8637e3f8ef8ee27e2c279724387f
|
Subproject commit 4440c7a05ba4be229ac88d70e8f4eef2465afc50
|
|
@ -1 +1 @@
|
||||||
Subproject commit 485abcad45daeea6d09680e5fc7d29e97d2e3fbe
|
Subproject commit a824eedf2fdd28298f09d96ed10c7c74802dc8e4
|
|
@ -1 +1 @@
|
||||||
Subproject commit e5985abfffc1ef5ead3a0bab196fa5d86bc5276f
|
Subproject commit 614380100480b6b4ddcf8d868119865d1f97abad
|
|
@ -2,7 +2,7 @@ FROM alpine:latest
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20250306
|
||||||
|
|
||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
bash \
|
bash \
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
FROM quay.io/centos/centos:stream10
|
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
|
||||||
|
|
||||||
# dnf config-manager isn't available at first, and
|
|
||||||
# we need it to install the CRB repo below.
|
|
||||||
RUN dnf -y install 'dnf-command(config-manager)'
|
|
||||||
|
|
||||||
# What used to be powertools is now called "CRB".
|
|
||||||
# We need it for some of the packages installed below.
|
|
||||||
# https://docs.fedoraproject.org/en-US/epel/
|
|
||||||
RUN dnf config-manager --set-enabled crb
|
|
||||||
RUN dnf -y install \
|
|
||||||
https://dl.fedoraproject.org/pub/epel/epel-release-latest-10.noarch.rpm
|
|
||||||
|
|
||||||
# The --nobest flag is hopefully temporary. Without it we currently hit
|
|
||||||
# package versioning conflicts around OpenSSL.
|
|
||||||
RUN dnf -y --nobest install \
|
|
||||||
bison \
|
|
||||||
ccache \
|
|
||||||
cmake \
|
|
||||||
cppzmq-devel \
|
|
||||||
diffutils \
|
|
||||||
flex \
|
|
||||||
gcc \
|
|
||||||
gcc-c++ \
|
|
||||||
git \
|
|
||||||
jq \
|
|
||||||
libpcap-devel \
|
|
||||||
make \
|
|
||||||
openssl \
|
|
||||||
openssl-devel \
|
|
||||||
procps-ng \
|
|
||||||
python3 \
|
|
||||||
python3-devel \
|
|
||||||
python3-pip\
|
|
||||||
sqlite \
|
|
||||||
swig \
|
|
||||||
tar \
|
|
||||||
which \
|
|
||||||
zlib-devel \
|
|
||||||
&& dnf clean all && rm -rf /var/cache/dnf
|
|
||||||
|
|
||||||
# Set the crypto policy to allow SHA-1 certificates - which we have in our tests
|
|
||||||
RUN dnf -y --nobest install crypto-policies-scripts && update-crypto-policies --set LEGACY
|
|
||||||
|
|
||||||
RUN pip3 install websockets junit2html
|
|
|
@ -2,7 +2,7 @@ FROM quay.io/centos/centos:stream9
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241024
|
||||||
|
|
||||||
# dnf config-manager isn't available at first, and
|
# dnf config-manager isn't available at first, and
|
||||||
# we need it to install the CRB repo below.
|
# we need it to install the CRB repo below.
|
||||||
|
@ -34,9 +34,9 @@ RUN dnf -y --nobest install \
|
||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
procps-ng \
|
procps-ng \
|
||||||
python3.13 \
|
python3 \
|
||||||
python3.13-devel \
|
python3-devel \
|
||||||
python3.13-pip\
|
python3-pip\
|
||||||
sqlite \
|
sqlite \
|
||||||
swig \
|
swig \
|
||||||
tar \
|
tar \
|
||||||
|
@ -47,8 +47,4 @@ RUN dnf -y --nobest install \
|
||||||
# Set the crypto policy to allow SHA-1 certificates - which we have in our tests
|
# Set the crypto policy to allow SHA-1 certificates - which we have in our tests
|
||||||
RUN dnf -y --nobest install crypto-policies-scripts && update-crypto-policies --set LEGACY
|
RUN dnf -y --nobest install crypto-policies-scripts && update-crypto-policies --set LEGACY
|
||||||
|
|
||||||
# Override the default python3.9 installation paths with 3.13
|
|
||||||
RUN alternatives --install /usr/bin/python3 python3 /usr/bin/python3.13 10
|
|
||||||
RUN alternatives --install /usr/bin/pip3 pip3 /usr/bin/pip3.13 10
|
|
||||||
|
|
||||||
RUN pip3 install websockets junit2html
|
RUN pip3 install websockets junit2html
|
||||||
|
|
|
@ -1,36 +1,32 @@
|
||||||
FROM debian:13
|
FROM debian:11
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241024
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y install \
|
RUN apt-get update && apt-get -y install \
|
||||||
bison \
|
bison \
|
||||||
bsdmainutils \
|
bsdmainutils \
|
||||||
ccache \
|
ccache \
|
||||||
cmake \
|
cmake \
|
||||||
cppzmq-dev \
|
|
||||||
curl \
|
curl \
|
||||||
dnsmasq \
|
|
||||||
flex \
|
flex \
|
||||||
g++ \
|
g++ \
|
||||||
gcc \
|
gcc \
|
||||||
git \
|
git \
|
||||||
jq \
|
jq \
|
||||||
libkrb5-dev \
|
libkrb5-dev \
|
||||||
libnats-dev \
|
|
||||||
libnode-dev \
|
libnode-dev \
|
||||||
libpcap-dev \
|
libpcap-dev \
|
||||||
librdkafka-dev \
|
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
libuv1-dev \
|
libuv1-dev \
|
||||||
|
libzmq3-dev \
|
||||||
make \
|
make \
|
||||||
python3 \
|
python3 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
python3-pip\
|
python3-pip\
|
||||||
python3-websockets \
|
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
swig \
|
swig \
|
||||||
wget \
|
wget \
|
||||||
|
@ -39,6 +35,4 @@ RUN apt-get update && apt-get -y install \
|
||||||
&& apt autoclean \
|
&& apt autoclean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Debian trixie really doesn't like using pip to install system wide stuff, but
|
RUN pip3 install websockets junit2html
|
||||||
# doesn't seem there's a python3-junit2html package, so not sure what we'd break.
|
|
||||||
RUN pip3 install --break-system-packages junit2html
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241024
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y install \
|
RUN apt-get update && apt-get -y install \
|
||||||
bison \
|
bison \
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
FROM fedora:42
|
FROM fedora:40
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241024
|
||||||
|
|
||||||
RUN dnf -y install \
|
RUN dnf -y install \
|
||||||
bison \
|
bison \
|
||||||
|
@ -10,9 +10,8 @@ RUN dnf -y install \
|
||||||
cmake \
|
cmake \
|
||||||
cppzmq-devel \
|
cppzmq-devel \
|
||||||
diffutils \
|
diffutils \
|
||||||
findutils \
|
dnsmasq \
|
||||||
flex \
|
flex \
|
||||||
gawk \
|
|
||||||
gcc \
|
gcc \
|
||||||
gcc-c++ \
|
gcc-c++ \
|
||||||
git \
|
git \
|
||||||
|
@ -23,14 +22,12 @@ RUN dnf -y install \
|
||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
procps-ng \
|
procps-ng \
|
||||||
python3 \
|
|
||||||
python3-devel \
|
python3-devel \
|
||||||
python3-pip\
|
python3-pip\
|
||||||
sqlite \
|
sqlite \
|
||||||
swig \
|
swig \
|
||||||
which \
|
which \
|
||||||
zlib-devel \
|
zlib-devel \
|
||||||
crypto-policies-scripts \
|
|
||||||
&& dnf clean all && rm -rf /var/cache/dnf
|
&& dnf clean all && rm -rf /var/cache/dnf
|
||||||
|
|
||||||
RUN pip3 install websockets junit2html
|
RUN pip3 install websockets junit2html
|
|
@ -2,7 +2,7 @@ FROM fedora:41
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20250203
|
||||||
|
|
||||||
RUN dnf -y install \
|
RUN dnf -y install \
|
||||||
bison \
|
bison \
|
||||||
|
|
|
@ -7,7 +7,7 @@ set -x
|
||||||
|
|
||||||
brew update
|
brew update
|
||||||
brew upgrade cmake
|
brew upgrade cmake
|
||||||
brew install cppzmq openssl@3 python@3 swig bison flex ccache libmaxminddb dnsmasq krb5
|
brew install cppzmq openssl@3 python@3 swig bison flex ccache libmaxminddb dnsmasq
|
||||||
|
|
||||||
which python3
|
which python3
|
||||||
python3 --version
|
python3 --version
|
||||||
|
|
|
@ -2,7 +2,7 @@ FROM opensuse/leap:15.6
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241024
|
||||||
|
|
||||||
RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.6:Update/standard/openSUSE:Leap:15.6:Update.repo \
|
RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.6:Update/standard/openSUSE:Leap:15.6:Update.repo \
|
||||||
&& zypper refresh \
|
&& zypper refresh \
|
||||||
|
|
|
@ -2,7 +2,7 @@ FROM opensuse/tumbleweed
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20250311
|
||||||
|
|
||||||
# Remove the repo-openh264 repository, it caused intermittent issues
|
# Remove the repo-openh264 repository, it caused intermittent issues
|
||||||
# and we should not be needing any packages from it.
|
# and we should not be needing any packages from it.
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
zypper refresh
|
|
||||||
zypper patch -y --with-update --with-optional
|
|
||||||
|
|
||||||
LATEST_VERSION=$(zypper search -n ${ZEEK_CI_COMPILER} |
|
|
||||||
awk -F "|" "match(\$2, / ${ZEEK_CI_COMPILER}([0-9]{2})[^-]/, a) {print a[1]}" |
|
|
||||||
sort | tail -1)
|
|
||||||
|
|
||||||
echo "Installing ${ZEEK_CI_COMPILER} ${LATEST_VERSION}"
|
|
||||||
|
|
||||||
zypper install -y "${ZEEK_CI_COMPILER}${LATEST_VERSION}"
|
|
||||||
|
|
||||||
if [ "${ZEEK_CI_COMPILER}" == "gcc" ]; then
|
|
||||||
zypper install -y "${ZEEK_CI_COMPILER}${LATEST_VERSION}-c++"
|
|
||||||
fi
|
|
||||||
|
|
||||||
update-alternatives --install /usr/bin/cc cc "/usr/bin/${ZEEK_CI_COMPILER}-${LATEST_VERSION}" 100
|
|
||||||
update-alternatives --set cc "/usr/bin/${ZEEK_CI_COMPILER}-${LATEST_VERSION}"
|
|
||||||
|
|
||||||
if [ "${ZEEK_CI_COMPILER}" == "gcc" ]; then
|
|
||||||
update-alternatives --install /usr/bin/c++ c++ "/usr/bin/g++-${LATEST_VERSION}" 100
|
|
||||||
update-alternatives --set c++ "/usr/bin/g++-${LATEST_VERSION}"
|
|
||||||
else
|
|
||||||
update-alternatives --install /usr/bin/c++ c++ "/usr/bin/clang++-${LATEST_VERSION}" 100
|
|
||||||
update-alternatives --set c++ "/usr/bin/clang++-${LATEST_VERSION}"
|
|
||||||
fi
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241024
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y install \
|
RUN apt-get update && apt-get -y install \
|
||||||
bc \
|
bc \
|
||||||
|
|
|
@ -4,16 +4,15 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241024
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y install \
|
RUN apt-get update && apt-get -y install \
|
||||||
bc \
|
bc \
|
||||||
bison \
|
bison \
|
||||||
bsdmainutils \
|
bsdmainutils \
|
||||||
ccache \
|
ccache \
|
||||||
clang-19 \
|
clang-18 \
|
||||||
clang++-19 \
|
clang++-18 \
|
||||||
clang-tidy-19 \
|
|
||||||
cmake \
|
cmake \
|
||||||
cppzmq-dev \
|
cppzmq-dev \
|
||||||
curl \
|
curl \
|
||||||
|
@ -32,9 +31,7 @@ RUN apt-get update && apt-get -y install \
|
||||||
make \
|
make \
|
||||||
python3 \
|
python3 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
python3-git \
|
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3-semantic-version \
|
|
||||||
redis-server \
|
redis-server \
|
||||||
ruby \
|
ruby \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
|
@ -50,10 +47,6 @@ RUN apt-get update && apt-get -y install \
|
||||||
RUN pip3 install --break-system-packages websockets junit2html
|
RUN pip3 install --break-system-packages websockets junit2html
|
||||||
RUN gem install coveralls-lcov
|
RUN gem install coveralls-lcov
|
||||||
|
|
||||||
# Ubuntu installs clang versions with the binaries having the version number
|
|
||||||
# appended. Create a symlink for clang-tidy so cmake finds it correctly.
|
|
||||||
RUN update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-19 1000
|
|
||||||
|
|
||||||
# Download a newer pre-built ccache version that recognizes -fprofile-update=atomic
|
# Download a newer pre-built ccache version that recognizes -fprofile-update=atomic
|
||||||
# which is used when building with --coverage.
|
# which is used when building with --coverage.
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
FROM ubuntu:25.04
|
FROM ubuntu:24.10
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20241115
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y install \
|
RUN apt-get update && apt-get -y install \
|
||||||
bc \
|
bc \
|
|
@ -28,7 +28,7 @@ cd $build_dir
|
||||||
export ZEEK_SEED_FILE=$source_dir/testing/btest/random.seed
|
export ZEEK_SEED_FILE=$source_dir/testing/btest/random.seed
|
||||||
|
|
||||||
function run_zeek {
|
function run_zeek {
|
||||||
ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen
|
ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen >/dev/null
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Failed running zeek with zeekygen config file $conf_file" >&2
|
echo "Failed running zeek with zeekygen config file $conf_file" >&2
|
||||||
|
|
|
@ -5,7 +5,7 @@ SHELL [ "powershell" ]
|
||||||
|
|
||||||
# A version field to invalidatea Cirrus's build cache when needed, as suggested in
|
# A version field to invalidatea Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION=20250905
|
ENV DOCKERFILE_VERSION 20230801
|
||||||
|
|
||||||
RUN Set-ExecutionPolicy Unrestricted -Force
|
RUN Set-ExecutionPolicy Unrestricted -Force
|
||||||
|
|
||||||
|
@ -14,8 +14,8 @@ RUN [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePoin
|
||||||
iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
|
iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
|
||||||
|
|
||||||
# Install prerequisites
|
# Install prerequisites
|
||||||
RUN choco install -y --no-progress visualstudio2022buildtools --version=117.14.1
|
RUN choco install -y --no-progress visualstudio2019buildtools --version=16.11.11.0
|
||||||
RUN choco install -y --no-progress visualstudio2022-workload-vctools --version=1.0.0 --package-parameters '--add Microsoft.VisualStudio.Component.VC.ATLMFC'
|
RUN choco install -y --no-progress visualstudio2019-workload-vctools --version=1.0.0 --package-parameters '--add Microsoft.VisualStudio.Component.VC.ATLMFC'
|
||||||
RUN choco install -y --no-progress sed
|
RUN choco install -y --no-progress sed
|
||||||
RUN choco install -y --no-progress winflexbison3
|
RUN choco install -y --no-progress winflexbison3
|
||||||
RUN choco install -y --no-progress msysgit
|
RUN choco install -y --no-progress msysgit
|
||||||
|
@ -30,4 +30,4 @@ RUN mkdir C:\build
|
||||||
WORKDIR C:\build
|
WORKDIR C:\build
|
||||||
|
|
||||||
# This entry point starts the developer command prompt and launches the PowerShell shell.
|
# This entry point starts the developer command prompt and launches the PowerShell shell.
|
||||||
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools\\Common7\\Tools\\VsDevCmd.bat", "-arch=x64", "&&", "powershell.exe", "-NoLogo", "-ExecutionPolicy", "Unrestricted"]
|
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\BuildTools\\Common7\\Tools\\VsDevCmd.bat", "-arch=x64", "&&", "powershell.exe", "-NoLogo", "-ExecutionPolicy", "Unrestricted"]
|
|
@ -2,7 +2,7 @@
|
||||||
:: cmd current shell. This path is hard coded to the one on the CI image, but
|
:: cmd current shell. This path is hard coded to the one on the CI image, but
|
||||||
:: can be adjusted if running builds locally. Unfortunately, the initial path
|
:: can be adjusted if running builds locally. Unfortunately, the initial path
|
||||||
:: isn't in the environment so we have to hardcode the whole path.
|
:: isn't in the environment so we have to hardcode the whole path.
|
||||||
call "c:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
|
call "c:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
|
||||||
|
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
:: See build.cmd for documentation on this call.
|
:: See build.cmd for documentation on this call.
|
||||||
call "c:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
|
call "c:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
|
||||||
|
|
||||||
cd build
|
cd build
|
||||||
|
|
||||||
|
|
2
cmake
2
cmake
|
@ -1 +1 @@
|
||||||
Subproject commit d51c6990446cf70cb9c01bca17dad171a1db05d3
|
Subproject commit fd0696f9077933660f7da5f81978e86b3e967647
|
|
@ -2,9 +2,10 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
constexpr char ZEEK_SCRIPT_INSTALL_PATH[] = "@ZEEK_SCRIPT_INSTALL_PATH@";
|
#define ZEEK_SCRIPT_INSTALL_PATH "@ZEEK_SCRIPT_INSTALL_PATH@"
|
||||||
constexpr char ZEEK_PLUGIN_INSTALL_PATH[] = "@ZEEK_PLUGIN_DIR@";
|
#define BRO_PLUGIN_INSTALL_PATH "@ZEEK_PLUGIN_DIR@"
|
||||||
constexpr char DEFAULT_ZEEKPATH[] = "@DEFAULT_ZEEKPATH@";
|
#define ZEEK_PLUGIN_INSTALL_PATH "@ZEEK_PLUGIN_DIR@"
|
||||||
constexpr char ZEEK_SPICY_MODULE_PATH[] = "@ZEEK_SPICY_MODULE_PATH@";
|
#define DEFAULT_ZEEKPATH "@DEFAULT_ZEEKPATH@"
|
||||||
constexpr char ZEEK_SPICY_LIBRARY_PATH[] = "@ZEEK_SPICY_LIBRARY_PATH@";
|
#define ZEEK_SPICY_MODULE_PATH "@ZEEK_SPICY_MODULE_PATH@"
|
||||||
constexpr char ZEEK_SPICY_DATA_PATH[] = "@ZEEK_SPICY_DATA_PATH@";
|
#define ZEEK_SPICY_LIBRARY_PATH "@ZEEK_SPICY_LIBRARY_PATH@"
|
||||||
|
#define ZEEK_SPICY_DATA_PATH "@ZEEK_SPICY_DATA_PATH@"
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
// See the file "COPYING" in the main distribution directory for copyright.
|
// See the file "COPYING" in the main distribution directory for copyright.
|
||||||
// NOLINTBEGIN(modernize-macro-to-enum)
|
|
||||||
// NOLINTBEGIN(cppcoreguidelines-macro-usage)
|
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
@ -308,6 +306,3 @@
|
||||||
|
|
||||||
/* compiled with Spicy support */
|
/* compiled with Spicy support */
|
||||||
#cmakedefine HAVE_SPICY
|
#cmakedefine HAVE_SPICY
|
||||||
|
|
||||||
// NOLINTEND(cppcoreguidelines-macro-usage)
|
|
||||||
// NOLINTEND(modernize-macro-to-enum)
|
|
||||||
|
|
24
configure
vendored
24
configure
vendored
|
@ -90,9 +90,16 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
--disable-zkg don't install zkg
|
--disable-zkg don't install zkg
|
||||||
|
|
||||||
Required Packages in Non-Standard Locations:
|
Required Packages in Non-Standard Locations:
|
||||||
|
--with-bifcl=PATH path to Zeek BIF compiler executable
|
||||||
|
(useful for cross-compiling)
|
||||||
|
--with-bind=PATH path to BIND install root
|
||||||
|
--with-binpac=PATH path to BinPAC executable
|
||||||
|
(useful for cross-compiling)
|
||||||
--with-bison=PATH path to bison executable
|
--with-bison=PATH path to bison executable
|
||||||
--with-broker=PATH path to Broker install root
|
--with-broker=PATH path to Broker install root
|
||||||
(Zeek uses an embedded version by default)
|
(Zeek uses an embedded version by default)
|
||||||
|
--with-gen-zam=PATH path to Gen-ZAM code generator
|
||||||
|
(Zeek uses an embedded version by default)
|
||||||
--with-flex=PATH path to flex executable
|
--with-flex=PATH path to flex executable
|
||||||
--with-libkqueue=PATH path to libkqueue install root
|
--with-libkqueue=PATH path to libkqueue install root
|
||||||
(Zeek uses an embedded version by default)
|
(Zeek uses an embedded version by default)
|
||||||
|
@ -321,6 +328,9 @@ while [ $# -ne 0 ]; do
|
||||||
--disable-af-packet)
|
--disable-af-packet)
|
||||||
append_cache_entry DISABLE_AF_PACKET BOOL true
|
append_cache_entry DISABLE_AF_PACKET BOOL true
|
||||||
;;
|
;;
|
||||||
|
--disable-archiver)
|
||||||
|
has_disable_archiver=1
|
||||||
|
;;
|
||||||
--disable-auxtools)
|
--disable-auxtools)
|
||||||
append_cache_entry INSTALL_AUX_TOOLS BOOL false
|
append_cache_entry INSTALL_AUX_TOOLS BOOL false
|
||||||
;;
|
;;
|
||||||
|
@ -361,9 +371,15 @@ while [ $# -ne 0 ]; do
|
||||||
--disable-zkg)
|
--disable-zkg)
|
||||||
append_cache_entry INSTALL_ZKG BOOL false
|
append_cache_entry INSTALL_ZKG BOOL false
|
||||||
;;
|
;;
|
||||||
|
--with-bifcl=*)
|
||||||
|
append_cache_entry BIFCL_EXE_PATH PATH $optarg
|
||||||
|
;;
|
||||||
--with-bind=*)
|
--with-bind=*)
|
||||||
append_cache_entry BIND_ROOT_DIR PATH $optarg
|
append_cache_entry BIND_ROOT_DIR PATH $optarg
|
||||||
;;
|
;;
|
||||||
|
--with-binpac=*)
|
||||||
|
append_cache_entry BINPAC_EXE_PATH PATH $optarg
|
||||||
|
;;
|
||||||
--with-bison=*)
|
--with-bison=*)
|
||||||
append_cache_entry BISON_EXECUTABLE PATH $optarg
|
append_cache_entry BISON_EXECUTABLE PATH $optarg
|
||||||
;;
|
;;
|
||||||
|
@ -376,6 +392,9 @@ while [ $# -ne 0 ]; do
|
||||||
--with-flex=*)
|
--with-flex=*)
|
||||||
append_cache_entry FLEX_EXECUTABLE PATH $optarg
|
append_cache_entry FLEX_EXECUTABLE PATH $optarg
|
||||||
;;
|
;;
|
||||||
|
--with-gen-zam=*)
|
||||||
|
append_cache_entry GEN_ZAM_EXE_PATH PATH $optarg
|
||||||
|
;;
|
||||||
--with-geoip=*)
|
--with-geoip=*)
|
||||||
append_cache_entry LibMMDB_ROOT_DIR PATH $optarg
|
append_cache_entry LibMMDB_ROOT_DIR PATH $optarg
|
||||||
;;
|
;;
|
||||||
|
@ -491,3 +510,8 @@ eval ${cmake} 2>&1
|
||||||
echo "# This is the command used to configure this build" >config.status
|
echo "# This is the command used to configure this build" >config.status
|
||||||
echo $command >>config.status
|
echo $command >>config.status
|
||||||
chmod u+x config.status
|
chmod u+x config.status
|
||||||
|
|
||||||
|
if [ $has_disable_archiver -eq 1 ]; then
|
||||||
|
echo
|
||||||
|
echo "NOTE: The --disable-archiver argument no longer has any effect and will be removed in v7.1. zeek-archiver is now part of zeek-aux, so consider --disable-auxtools instead."
|
||||||
|
fi
|
||||||
|
|
2
doc
2
doc
|
@ -1 +1 @@
|
||||||
Subproject commit f28baefb4dbd8a9606f952471d625de8c1c3c658
|
Subproject commit 858dd108b10a7d88852e01dc0134d6c0032f3c60
|
|
@ -1,7 +1,7 @@
|
||||||
# See the file "COPYING" in the main distribution directory for copyright.
|
# See the file "COPYING" in the main distribution directory for copyright.
|
||||||
|
|
||||||
# Layer to build Zeek.
|
# Layer to build Zeek.
|
||||||
FROM debian:13-slim
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
# Make the shell split commands in the log so we can determine reasons for
|
# Make the shell split commands in the log so we can determine reasons for
|
||||||
# failures more easily.
|
# failures more easily.
|
||||||
|
@ -16,7 +16,6 @@ RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
|
||||||
|
|
||||||
# Configure system for build.
|
# Configure system for build.
|
||||||
RUN apt-get -q update \
|
RUN apt-get -q update \
|
||||||
&& apt-get upgrade -q -y \
|
|
||||||
&& apt-get install -q -y --no-install-recommends \
|
&& apt-get install -q -y --no-install-recommends \
|
||||||
bind9 \
|
bind9 \
|
||||||
bison \
|
bison \
|
||||||
|
@ -37,7 +36,7 @@ RUN apt-get -q update \
|
||||||
libz-dev \
|
libz-dev \
|
||||||
make \
|
make \
|
||||||
python3-minimal \
|
python3-minimal \
|
||||||
python3-dev \
|
python3.11-dev \
|
||||||
swig \
|
swig \
|
||||||
ninja-build \
|
ninja-build \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# See the file "COPYING" in the main distribution directory for copyright.
|
# See the file "COPYING" in the main distribution directory for copyright.
|
||||||
|
|
||||||
# Final layer containing all artifacts.
|
# Final layer containing all artifacts.
|
||||||
FROM debian:13-slim
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
# Make the shell split commands in the log so we can determine reasons for
|
# Make the shell split commands in the log so we can determine reasons for
|
||||||
# failures more easily.
|
# failures more easily.
|
||||||
|
@ -15,21 +15,18 @@ RUN echo 'Acquire::http::timeout "180";' > /etc/apt/apt.conf.d/99-timeouts
|
||||||
RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
|
RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
|
||||||
|
|
||||||
RUN apt-get -q update \
|
RUN apt-get -q update \
|
||||||
&& apt-get upgrade -q -y \
|
|
||||||
&& apt-get install -q -y --no-install-recommends \
|
&& apt-get install -q -y --no-install-recommends \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
git \
|
git \
|
||||||
jq \
|
jq \
|
||||||
libmaxminddb0 \
|
libmaxminddb0 \
|
||||||
libnode115 \
|
libnode108 \
|
||||||
libpcap0.8 \
|
libpcap0.8 \
|
||||||
libpython3.13 \
|
libpython3.11 \
|
||||||
libssl3 \
|
libssl3 \
|
||||||
libuv1 \
|
libuv1 \
|
||||||
libz1 \
|
libz1 \
|
||||||
libzmq5 \
|
libzmq5 \
|
||||||
net-tools \
|
|
||||||
procps \
|
|
||||||
python3-git \
|
python3-git \
|
||||||
python3-minimal \
|
python3-minimal \
|
||||||
python3-semantic-version \
|
python3-semantic-version \
|
||||||
|
|
|
@ -60,13 +60,13 @@ const pe_mime_types = { "application/x-dosexec" };
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Files::register_for_mime_types(Files::ANALYZER_PE, pe_mime_types);
|
Files::register_for_mime_types(Files::ANALYZER_PE, pe_mime_types);
|
||||||
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_pe, $path="pe", $policy=log_policy));
|
Log::create_stream(LOG, [$columns=Info, $ev=log_pe, $path="pe", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
hook set_file(f: fa_file) &priority=5
|
hook set_file(f: fa_file) &priority=5
|
||||||
{
|
{
|
||||||
if ( ! f?$pe )
|
if ( ! f?$pe )
|
||||||
f$pe = PE::Info($ts=f$info$ts, $id=f$id);
|
f$pe = [$ts=f$info$ts, $id=f$id];
|
||||||
}
|
}
|
||||||
|
|
||||||
event pe_dos_header(f: fa_file, h: PE::DOSHeader) &priority=5
|
event pe_dos_header(f: fa_file, h: PE::DOSHeader) &priority=5
|
||||||
|
|
|
@ -40,7 +40,7 @@ export {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_ocsp, $path="ocsp", $policy=log_policy));
|
Log::create_stream(LOG, [$columns=Info, $ev=log_ocsp, $path="ocsp", $policy=log_policy]);
|
||||||
Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response");
|
Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,29 +105,6 @@ export {
|
||||||
|
|
||||||
## Event for accessing logged records.
|
## Event for accessing logged records.
|
||||||
global log_x509: event(rec: Info);
|
global log_x509: event(rec: Info);
|
||||||
|
|
||||||
## The maximum number of bytes that a single string field can contain when
|
|
||||||
## logging. If a string reaches this limit, the log output for the field will be
|
|
||||||
## truncated. Setting this to zero disables the limiting.
|
|
||||||
##
|
|
||||||
## .. zeek:see:: Log::default_max_field_string_bytes
|
|
||||||
const default_max_field_string_bytes = Log::default_max_field_string_bytes &redef;
|
|
||||||
|
|
||||||
## The maximum number of elements a single container field can contain when
|
|
||||||
## logging. If a container reaches this limit, the log output for the field will
|
|
||||||
## be truncated. Setting this to zero disables the limiting.
|
|
||||||
##
|
|
||||||
## .. zeek:see:: Log::default_max_field_container_elements
|
|
||||||
const default_max_field_container_elements = 500 &redef;
|
|
||||||
|
|
||||||
## The maximum total number of container elements a record may log. This is the
|
|
||||||
## sum of all container elements logged for the record. If this limit is reached,
|
|
||||||
## all further containers will be logged as empty containers. If the limit is
|
|
||||||
## reached while processing a container, the container will be truncated in the
|
|
||||||
## output. Setting this to zero disables the limiting.
|
|
||||||
##
|
|
||||||
## .. zeek:see:: Log::default_max_total_container_elements
|
|
||||||
const default_max_total_container_elements = 1500 &redef;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
global known_log_certs_with_broker: set[LogCertHash] &create_expire=relog_known_certificates_after &backend=Broker::MEMORY;
|
global known_log_certs_with_broker: set[LogCertHash] &create_expire=relog_known_certificates_after &backend=Broker::MEMORY;
|
||||||
|
@ -140,12 +117,7 @@ redef record Files::Info += {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
# x509 can have some very large certificates and very large sets of URIs. Expand the log size filters
|
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509, $path="x509", $policy=log_policy]);
|
||||||
# so that we're not truncating those.
|
|
||||||
Log::create_stream(X509::LOG, Log::Stream($columns=Info, $ev=log_x509, $path="x509", $policy=log_policy,
|
|
||||||
$max_field_string_bytes=X509::default_max_field_string_bytes,
|
|
||||||
$max_field_container_elements=X509::default_max_field_container_elements,
|
|
||||||
$max_total_container_elements=X509::default_max_total_container_elements));
|
|
||||||
|
|
||||||
# We use MIME types internally to distinguish between user and CA certificates.
|
# We use MIME types internally to distinguish between user and CA certificates.
|
||||||
# The first certificate in a connection always gets tagged as user-cert, all
|
# The first certificate in a connection always gets tagged as user-cert, all
|
||||||
|
@ -195,7 +167,7 @@ event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certifi
|
||||||
{
|
{
|
||||||
local der_cert = x509_get_certificate_string(cert_ref);
|
local der_cert = x509_get_certificate_string(cert_ref);
|
||||||
local fp = hash_function(der_cert);
|
local fp = hash_function(der_cert);
|
||||||
f$info$x509 = X509::Info($ts=f$info$ts, $fingerprint=fp, $certificate=cert, $handle=cert_ref);
|
f$info$x509 = [$ts=f$info$ts, $fingerprint=fp, $certificate=cert, $handle=cert_ref];
|
||||||
if ( f$info$mime_type == "application/x-x509-user-cert" )
|
if ( f$info$mime_type == "application/x-x509-user-cert" )
|
||||||
f$info$x509$host_cert = T;
|
f$info$x509$host_cert = T;
|
||||||
if ( f$is_orig )
|
if ( f$is_orig )
|
||||||
|
@ -253,3 +225,4 @@ event file_state_remove(f: fa_file) &priority=5
|
||||||
|
|
||||||
Log::write(LOG, f$info$x509);
|
Log::write(LOG, f$info$x509);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,35 @@
|
||||||
##! Disables analyzers if protocol violations occur, and adds service information
|
##! Activates port-independent protocol detection and selectively disables
|
||||||
##! to connection log.
|
##! analyzers if protocol violations occur.
|
||||||
|
|
||||||
@load ./main
|
|
||||||
|
|
||||||
module DPD;
|
module DPD;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Analyzers which you don't want to remove on violations.
|
## Add the DPD logging stream identifier.
|
||||||
|
redef enum Log::ID += { LOG };
|
||||||
|
|
||||||
|
## A default logging policy hook for the stream.
|
||||||
|
global log_policy: Log::PolicyHook;
|
||||||
|
|
||||||
|
## The record type defining the columns to log in the DPD logging stream.
|
||||||
|
type Info: record {
|
||||||
|
## Timestamp for when protocol analysis failed.
|
||||||
|
ts: time &log;
|
||||||
|
## Connection unique ID.
|
||||||
|
uid: string &log;
|
||||||
|
## Connection ID containing the 4-tuple which identifies endpoints.
|
||||||
|
id: conn_id &log;
|
||||||
|
## Transport protocol for the violation.
|
||||||
|
proto: transport_proto &log;
|
||||||
|
## The analyzer that generated the violation.
|
||||||
|
analyzer: string &log;
|
||||||
|
## The textual reason for the analysis failure.
|
||||||
|
failure_reason: string &log;
|
||||||
|
};
|
||||||
|
|
||||||
|
## Deprecated, please see https://github.com/zeek/zeek/pull/4200 for details
|
||||||
|
option max_violations: table[Analyzer::Tag] of count = table() &deprecated="Remove in v8.1: This has become non-functional in Zeek 7.2, see PR #4200" &default = 5;
|
||||||
|
|
||||||
|
## Analyzers which you don't want to throw
|
||||||
option ignore_violations: set[Analyzer::Tag] = set();
|
option ignore_violations: set[Analyzer::Tag] = set();
|
||||||
|
|
||||||
## Ignore violations which go this many bytes into the connection.
|
## Ignore violations which go this many bytes into the connection.
|
||||||
|
@ -22,12 +45,17 @@ export {
|
||||||
}
|
}
|
||||||
|
|
||||||
redef record connection += {
|
redef record connection += {
|
||||||
## The set of prototol analyzers that were removed due to a protocol
|
dpd: Info &optional;
|
||||||
## violation after the same analyzer had previously been confirmed.
|
## The set of services (analyzers) for which Zeek has observed a
|
||||||
failed_analyzers: set[string] &default=set() &ordered;
|
## violation after the same service had previously been confirmed.
|
||||||
|
service_violation: set[string] &default=set() &ordered;
|
||||||
};
|
};
|
||||||
|
|
||||||
# Add confirmed protocol analyzers to conn.log service field
|
event zeek_init() &priority=5
|
||||||
|
{
|
||||||
|
Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd", $policy=log_policy]);
|
||||||
|
}
|
||||||
|
|
||||||
event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirmationInfo) &priority=10
|
event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirmationInfo) &priority=10
|
||||||
{
|
{
|
||||||
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
|
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
|
||||||
|
@ -41,11 +69,9 @@ event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirm
|
||||||
add c$service[analyzer];
|
add c$service[analyzer];
|
||||||
}
|
}
|
||||||
|
|
||||||
# Remove failed analyzers from service field and add them to c$failed_analyzers
|
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=10
|
||||||
# Low priority to allow other handlers to check if the analyzer was confirmed
|
|
||||||
event analyzer_failed(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=-5
|
|
||||||
{
|
{
|
||||||
if ( ! is_protocol_analyzer(atype) )
|
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ( ! info?$c )
|
if ( ! info?$c )
|
||||||
|
@ -64,21 +90,35 @@ event analyzer_failed(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolatio
|
||||||
|
|
||||||
# if statement is separate, to allow repeated removal of service, in case there are several
|
# if statement is separate, to allow repeated removal of service, in case there are several
|
||||||
# confirmation and violation events
|
# confirmation and violation events
|
||||||
if ( analyzer !in c$failed_analyzers )
|
if ( analyzer in c$service_violation )
|
||||||
add c$failed_analyzers[analyzer];
|
return;
|
||||||
|
|
||||||
# add "-service" to the list of services on removal due to violation, if analyzer was confirmed before
|
add c$service_violation[analyzer];
|
||||||
if ( track_removed_services_in_connection && Analyzer::name(atype) in c$service )
|
|
||||||
|
local dpd: Info;
|
||||||
|
dpd$ts = network_time();
|
||||||
|
dpd$uid = c$uid;
|
||||||
|
dpd$id = c$id;
|
||||||
|
dpd$proto = get_port_transport_proto(c$id$orig_p);
|
||||||
|
dpd$analyzer = analyzer;
|
||||||
|
|
||||||
|
# Encode data into the reason if there's any as done for the old
|
||||||
|
# analyzer_violation event, previously.
|
||||||
|
local reason = info$reason;
|
||||||
|
if ( info?$data )
|
||||||
{
|
{
|
||||||
local rname = cat("-", Analyzer::name(atype));
|
local ellipsis = |info$data| > 40 ? "..." : "";
|
||||||
if ( rname !in c$service )
|
local data = info$data[0:40];
|
||||||
add c$service[rname];
|
reason = fmt("%s [%s%s]", reason, data, ellipsis);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dpd$failure_reason = reason;
|
||||||
|
c$dpd = dpd;
|
||||||
}
|
}
|
||||||
|
|
||||||
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo ) &priority=5
|
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo ) &priority=5
|
||||||
{
|
{
|
||||||
if ( ! is_protocol_analyzer(atype) )
|
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ( ! info?$c || ! info?$aid )
|
if ( ! info?$c || ! info?$aid )
|
||||||
|
@ -93,17 +133,29 @@ event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationI
|
||||||
if ( ignore_violations_after > 0 && size > ignore_violations_after )
|
if ( ignore_violations_after > 0 && size > ignore_violations_after )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
# analyzer already was removed or connection finished
|
|
||||||
# let's still log this.
|
|
||||||
if ( lookup_connection_analyzer_id(c$id, atype) == 0 )
|
|
||||||
{
|
|
||||||
event analyzer_failed(network_time(), atype, info);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
local disabled = disable_analyzer(c$id, aid, F);
|
local disabled = disable_analyzer(c$id, aid, F);
|
||||||
|
|
||||||
# If analyzer was disabled, send failed event
|
# add "-service" to the list of services on removal due to violation, if analyzer was confirmed before
|
||||||
if ( disabled )
|
if ( track_removed_services_in_connection && disabled && Analyzer::name(atype) in c$service )
|
||||||
event analyzer_failed(network_time(), atype, info);
|
{
|
||||||
|
local rname = cat("-", Analyzer::name(atype));
|
||||||
|
if ( rname !in c$service )
|
||||||
|
add c$service[rname];
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo ) &priority=-5
|
||||||
|
{
|
||||||
|
if ( ! is_protocol_analyzer(atype) && ! is_packet_analyzer(atype) )
|
||||||
|
return;
|
||||||
|
|
||||||
|
if ( ! info?$c )
|
||||||
|
return;
|
||||||
|
|
||||||
|
if ( info$c?$dpd )
|
||||||
|
{
|
||||||
|
Log::write(DPD::LOG, info$c$dpd);
|
||||||
|
delete info$c$dpd;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
##! Logging analyzer violations into analyzer.log
|
##! Logging analyzer confirmations and violations into analyzer.log
|
||||||
|
|
||||||
|
@load base/frameworks/config
|
||||||
@load base/frameworks/logging
|
@load base/frameworks/logging
|
||||||
|
|
||||||
@load ./main
|
@load ./main
|
||||||
|
|
||||||
module Analyzer::Logging;
|
module Analyzer::Logging;
|
||||||
|
@ -9,10 +11,16 @@ export {
|
||||||
## Add the analyzer logging stream identifier.
|
## Add the analyzer logging stream identifier.
|
||||||
redef enum Log::ID += { LOG };
|
redef enum Log::ID += { LOG };
|
||||||
|
|
||||||
|
## A default logging policy hook for the stream.
|
||||||
|
global log_policy: Log::PolicyHook;
|
||||||
|
|
||||||
## The record type defining the columns to log in the analyzer logging stream.
|
## The record type defining the columns to log in the analyzer logging stream.
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Timestamp of the violation.
|
## Timestamp of confirmation or violation.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
|
## What caused this log entry to be produced. This can
|
||||||
|
## currently be "violation" or "confirmation".
|
||||||
|
cause: string &log;
|
||||||
## The kind of analyzer involved. Currently "packet", "file"
|
## The kind of analyzer involved. Currently "packet", "file"
|
||||||
## or "protocol".
|
## or "protocol".
|
||||||
analyzer_kind: string &log;
|
analyzer_kind: string &log;
|
||||||
|
@ -23,58 +31,117 @@ export {
|
||||||
uid: string &log &optional;
|
uid: string &log &optional;
|
||||||
## File UID if available.
|
## File UID if available.
|
||||||
fuid: string &log &optional;
|
fuid: string &log &optional;
|
||||||
## Connection identifier if available.
|
## Connection identifier if available
|
||||||
id: conn_id &log &optional;
|
id: conn_id &log &optional;
|
||||||
## Transport protocol for the violation, if available.
|
|
||||||
proto: transport_proto &log &optional;
|
|
||||||
## Failure or violation reason, if available.
|
## Failure or violation reason, if available.
|
||||||
failure_reason: string &log;
|
failure_reason: string &log &optional;
|
||||||
|
|
||||||
## Data causing failure or violation if available. Truncated
|
## Data causing failure or violation if available. Truncated
|
||||||
## to :zeek:see:`Analyzer::Logging::failure_data_max_size`.
|
## to :zeek:see:`Analyzer::Logging::failure_data_max_size`.
|
||||||
failure_data: string &log &optional;
|
failure_data: string &log &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
## Enable logging of analyzer violations and optionally confirmations
|
||||||
|
## when :zeek:see:`Analyzer::Logging::include_confirmations` is set.
|
||||||
|
option enable = T;
|
||||||
|
|
||||||
|
## Enable analyzer_confirmation. They are usually less interesting
|
||||||
|
## outside of development of analyzers or troubleshooting scenarios.
|
||||||
|
## Setting this option may also generated multiple log entries per
|
||||||
|
## connection, minimally one for each conn.log entry with a populated
|
||||||
|
## service field.
|
||||||
|
option include_confirmations = F;
|
||||||
|
|
||||||
|
## Enable tracking of analyzers getting disabled. This is mostly
|
||||||
|
## interesting for troubleshooting of analyzers in DPD scenarios.
|
||||||
|
## Setting this option may also generated multiple log entries per
|
||||||
|
## connection.
|
||||||
|
option include_disabling = F;
|
||||||
|
|
||||||
## If a violation contains information about the data causing it,
|
## If a violation contains information about the data causing it,
|
||||||
## include at most this many bytes of it in the log.
|
## include at most this many bytes of it in the log.
|
||||||
option failure_data_max_size = 40;
|
option failure_data_max_size = 40;
|
||||||
|
|
||||||
## An event that can be handled to access the :zeek:type:`Analyzer::Logging::Info`
|
## Set of analyzers for which to not log confirmations or violations.
|
||||||
## record as it is sent on to the logging framework.
|
option ignore_analyzers: set[AllAnalyzers::Tag] = set();
|
||||||
global log_analyzer: event(rec: Info);
|
|
||||||
|
|
||||||
## A default logging policy hook for the stream.
|
|
||||||
global log_policy: Log::PolicyHook;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(LOG, Log::Stream($columns=Info, $path="analyzer", $ev=log_analyzer, $policy=log_policy));
|
Log::create_stream(LOG, [$columns=Info, $path="analyzer", $policy=log_policy,
|
||||||
|
$event_groups=set("Analyzer::Logging")]);
|
||||||
|
|
||||||
|
local enable_handler = function(id: string, new_value: bool): bool {
|
||||||
|
if ( new_value )
|
||||||
|
Log::enable_stream(LOG);
|
||||||
|
else
|
||||||
|
Log::disable_stream(LOG);
|
||||||
|
|
||||||
|
return new_value;
|
||||||
|
};
|
||||||
|
Option::set_change_handler("Analyzer::Logging::enable", enable_handler);
|
||||||
|
|
||||||
|
local include_confirmations_handler = function(id: string, new_value: bool): bool {
|
||||||
|
if ( new_value )
|
||||||
|
enable_event_group("Analyzer::Logging::include_confirmations");
|
||||||
|
else
|
||||||
|
disable_event_group("Analyzer::Logging::include_confirmations");
|
||||||
|
|
||||||
|
return new_value;
|
||||||
|
};
|
||||||
|
Option::set_change_handler("Analyzer::Logging::include_confirmations",
|
||||||
|
include_confirmations_handler);
|
||||||
|
|
||||||
|
local include_disabling_handler = function(id: string, new_value: bool): bool {
|
||||||
|
if ( new_value )
|
||||||
|
enable_event_group("Analyzer::Logging::include_disabling");
|
||||||
|
else
|
||||||
|
disable_event_group("Analyzer::Logging::include_disabling");
|
||||||
|
|
||||||
|
return new_value;
|
||||||
|
};
|
||||||
|
Option::set_change_handler("Analyzer::Logging::include_disabling",
|
||||||
|
include_disabling_handler);
|
||||||
|
|
||||||
|
# Call the handlers directly with the current values to avoid config
|
||||||
|
# framework interactions like creating entries in config.log.
|
||||||
|
enable_handler("Analyzer::Logging::enable", Analyzer::Logging::enable);
|
||||||
|
include_confirmations_handler("Analyzer::Logging::include_confirmations",
|
||||||
|
Analyzer::Logging::include_confirmations);
|
||||||
|
include_disabling_handler("Analyzer::Logging::include_disabling",
|
||||||
|
Analyzer::Logging::include_disabling);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function log_analyzer_failure(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo)
|
function analyzer_kind(atype: AllAnalyzers::Tag): string
|
||||||
{
|
{
|
||||||
local rec = Info(
|
if ( is_protocol_analyzer(atype) )
|
||||||
$ts=ts,
|
return "protocol";
|
||||||
$analyzer_kind=Analyzer::kind(atype),
|
else if ( is_packet_analyzer(atype) )
|
||||||
$analyzer_name=Analyzer::name(atype),
|
return "packet";
|
||||||
$failure_reason=info$reason
|
else if ( is_file_analyzer(atype) )
|
||||||
);
|
return "file";
|
||||||
|
|
||||||
if ( info?$c )
|
Reporter::warning(fmt("Unknown kind of analyzer %s", atype));
|
||||||
{
|
return "unknown";
|
||||||
rec$id = info$c$id;
|
|
||||||
rec$uid = info$c$uid;
|
|
||||||
rec$proto = get_port_transport_proto(info$c$id$orig_p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( info?$f )
|
function populate_from_conn(rec: Info, c: connection)
|
||||||
{
|
{
|
||||||
rec$fuid = info$f$id;
|
rec$id = c$id;
|
||||||
|
rec$uid = c$uid;
|
||||||
|
}
|
||||||
|
|
||||||
|
function populate_from_file(rec: Info, f: fa_file)
|
||||||
|
{
|
||||||
|
rec$fuid = f$id;
|
||||||
# If the confirmation didn't have a connection, but the
|
# If the confirmation didn't have a connection, but the
|
||||||
# fa_file object has exactly one, use it.
|
# fa_file object has exactly one, use it.
|
||||||
if ( ! rec?$uid && info$f?$conns && |info$f$conns| == 1 )
|
if ( ! rec?$uid && f?$conns && |f$conns| == 1 )
|
||||||
{
|
{
|
||||||
for ( _, c in info$f$conns )
|
for ( _, c in f$conns )
|
||||||
{
|
{
|
||||||
rec$id = c$id;
|
rec$id = c$id;
|
||||||
rec$uid = c$uid;
|
rec$uid = c$uid;
|
||||||
|
@ -82,6 +149,46 @@ function log_analyzer_failure(ts: time, atype: AllAnalyzers::Tag, info: Analyzer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirmationInfo) &group="Analyzer::Logging::include_confirmations"
|
||||||
|
{
|
||||||
|
if ( atype in ignore_analyzers )
|
||||||
|
return;
|
||||||
|
|
||||||
|
local rec = Info(
|
||||||
|
$ts=network_time(),
|
||||||
|
$cause="confirmation",
|
||||||
|
$analyzer_kind=analyzer_kind(atype),
|
||||||
|
$analyzer_name=Analyzer::name(atype),
|
||||||
|
);
|
||||||
|
|
||||||
|
if ( info?$c )
|
||||||
|
populate_from_conn(rec, info$c);
|
||||||
|
|
||||||
|
if ( info?$f )
|
||||||
|
populate_from_file(rec, info$f);
|
||||||
|
|
||||||
|
Log::write(LOG, rec);
|
||||||
|
}
|
||||||
|
|
||||||
|
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo) &priority=6
|
||||||
|
{
|
||||||
|
if ( atype in ignore_analyzers )
|
||||||
|
return;
|
||||||
|
|
||||||
|
local rec = Info(
|
||||||
|
$ts=network_time(),
|
||||||
|
$cause="violation",
|
||||||
|
$analyzer_kind=analyzer_kind(atype),
|
||||||
|
$analyzer_name=Analyzer::name(atype),
|
||||||
|
$failure_reason=info$reason,
|
||||||
|
);
|
||||||
|
|
||||||
|
if ( info?$c )
|
||||||
|
populate_from_conn(rec, info$c);
|
||||||
|
|
||||||
|
if ( info?$f )
|
||||||
|
populate_from_file(rec, info$f);
|
||||||
|
|
||||||
if ( info?$data )
|
if ( info?$data )
|
||||||
{
|
{
|
||||||
if ( failure_data_max_size > 0 )
|
if ( failure_data_max_size > 0 )
|
||||||
|
@ -93,31 +200,19 @@ function log_analyzer_failure(ts: time, atype: AllAnalyzers::Tag, info: Analyzer
|
||||||
Log::write(LOG, rec);
|
Log::write(LOG, rec);
|
||||||
}
|
}
|
||||||
|
|
||||||
# event currently is only raised for protocol analyzers; we do not fail packet and file analyzers
|
hook Analyzer::disabling_analyzer(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=-1000 &group="Analyzer::Logging::include_disabling"
|
||||||
event analyzer_failed(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo)
|
|
||||||
{
|
{
|
||||||
if ( ! is_protocol_analyzer(atype) )
|
if ( atype in ignore_analyzers )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ( ! info?$c )
|
local rec = Info(
|
||||||
return;
|
$ts=network_time(),
|
||||||
|
$cause="disabled",
|
||||||
|
$analyzer_kind=analyzer_kind(atype),
|
||||||
|
$analyzer_name=Analyzer::name(atype),
|
||||||
|
);
|
||||||
|
|
||||||
# log only for previously confirmed service that did not already log violation
|
populate_from_conn(rec, c);
|
||||||
# note that analyzers can fail repeatedly in some circumstances - e.g. when they
|
|
||||||
# are re-attached by the dynamic protocol detection due to later data.
|
|
||||||
local analyzer_name = Analyzer::name(atype);
|
|
||||||
if ( analyzer_name !in info$c$service || analyzer_name in info$c$failed_analyzers )
|
|
||||||
return;
|
|
||||||
|
|
||||||
log_analyzer_failure(ts, atype, info);
|
Log::write(LOG, rec);
|
||||||
}
|
}
|
||||||
|
|
||||||
# log packet and file analyzers here separately
|
|
||||||
event analyzer_violation_info(atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo )
|
|
||||||
{
|
|
||||||
if ( is_protocol_analyzer(atype) )
|
|
||||||
return;
|
|
||||||
|
|
||||||
log_analyzer_failure(network_time(), atype, info);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -88,15 +88,6 @@ export {
|
||||||
## Returns: The analyzer name corresponding to the tag.
|
## Returns: The analyzer name corresponding to the tag.
|
||||||
global name: function(tag: Analyzer::Tag) : string;
|
global name: function(tag: Analyzer::Tag) : string;
|
||||||
|
|
||||||
## Translates an analyzer type to a string with the analyzer's type.
|
|
||||||
##
|
|
||||||
## Possible values are "protocol", "packet", "file", or "unknown".
|
|
||||||
##
|
|
||||||
## tag: The analyzer tag.
|
|
||||||
##
|
|
||||||
## Returns: The analyzer kind corresponding to the tag.
|
|
||||||
global kind: function(tag: Analyzer::Tag) : string;
|
|
||||||
|
|
||||||
## Check whether the given analyzer name exists.
|
## Check whether the given analyzer name exists.
|
||||||
##
|
##
|
||||||
## This can be used before calling :zeek:see:`Analyzer::get_tag` to
|
## This can be used before calling :zeek:see:`Analyzer::get_tag` to
|
||||||
|
@ -172,23 +163,6 @@ export {
|
||||||
##
|
##
|
||||||
## This set can be added to via :zeek:see:`redef`.
|
## This set can be added to via :zeek:see:`redef`.
|
||||||
global requested_analyzers: set[AllAnalyzers::Tag] = {} &redef;
|
global requested_analyzers: set[AllAnalyzers::Tag] = {} &redef;
|
||||||
|
|
||||||
## Event that is raised when an analyzer raised a service violation and was
|
|
||||||
## removed.
|
|
||||||
##
|
|
||||||
## The event is also raised if the analyzer already was no longer active by
|
|
||||||
## the time that the violation was handled - so if it happens at the very
|
|
||||||
## end of a connection.
|
|
||||||
##
|
|
||||||
## Currently this event is only raised for protocol analyzers, as packet
|
|
||||||
## and file analyzers are never actively removed/disabled.
|
|
||||||
##
|
|
||||||
## ts: time at which the violation occurred
|
|
||||||
##
|
|
||||||
## atype: atype: The analyzer tag, such as ``Analyzer::ANALYZER_HTTP``.
|
|
||||||
##
|
|
||||||
##info: Details about the violation. This record should include a :zeek:type:`connection`
|
|
||||||
global analyzer_failed: event(ts: time, atype: AllAnalyzers::Tag, info: AnalyzerViolationInfo);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@load base/bif/analyzer.bif
|
@load base/bif/analyzer.bif
|
||||||
|
@ -272,19 +246,6 @@ function name(atype: AllAnalyzers::Tag) : string
|
||||||
return __name(atype);
|
return __name(atype);
|
||||||
}
|
}
|
||||||
|
|
||||||
function kind(atype: AllAnalyzers::Tag): string
|
|
||||||
{
|
|
||||||
if ( is_protocol_analyzer(atype) )
|
|
||||||
return "protocol";
|
|
||||||
else if ( is_packet_analyzer(atype) )
|
|
||||||
return "packet";
|
|
||||||
else if ( is_file_analyzer(atype) )
|
|
||||||
return "file";
|
|
||||||
|
|
||||||
Reporter::warning(fmt("Unknown kind of analyzer %s", atype));
|
|
||||||
return "unknown";
|
|
||||||
}
|
|
||||||
|
|
||||||
function has_tag(name: string): bool
|
function has_tag(name: string): bool
|
||||||
{
|
{
|
||||||
return __has_tag(name);
|
return __has_tag(name);
|
||||||
|
|
|
@ -47,17 +47,17 @@ export {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Broker::LOG, Log::Stream($columns=Info, $path="broker", $policy=log_policy));
|
Log::create_stream(Broker::LOG, [$columns=Info, $path="broker", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function log_status(ev: string, endpoint: EndpointInfo, msg: string)
|
function log_status(ev: string, endpoint: EndpointInfo, msg: string)
|
||||||
{
|
{
|
||||||
local r: Info;
|
local r: Info;
|
||||||
|
|
||||||
r = Broker::Info($ts = network_time(),
|
r = [$ts = network_time(),
|
||||||
$ev = ev,
|
$ev = ev,
|
||||||
$ty = STATUS,
|
$ty = STATUS,
|
||||||
$message = msg);
|
$message = msg];
|
||||||
|
|
||||||
if ( endpoint?$network )
|
if ( endpoint?$network )
|
||||||
r$peer = endpoint$network;
|
r$peer = endpoint$network;
|
||||||
|
@ -87,10 +87,10 @@ event Broker::error(code: ErrorCode, msg: string)
|
||||||
ev = subst_string(ev, "_", "-");
|
ev = subst_string(ev, "_", "-");
|
||||||
ev = to_lower(ev);
|
ev = to_lower(ev);
|
||||||
|
|
||||||
Log::write(Broker::LOG, Info($ts = network_time(),
|
Log::write(Broker::LOG, [$ts = network_time(),
|
||||||
$ev = ev,
|
$ev = ev,
|
||||||
$ty = ERROR,
|
$ty = ERROR,
|
||||||
$message = msg));
|
$message = msg]);
|
||||||
|
|
||||||
Reporter::error(fmt("Broker error (%s): %s", code, msg));
|
Reporter::error(fmt("Broker error (%s): %s", code, msg));
|
||||||
}
|
}
|
||||||
|
@ -115,8 +115,8 @@ event Broker::internal_log_event(lvl: LogSeverityLevel, id: string, description:
|
||||||
severity = Broker::DEBUG_EVENT;
|
severity = Broker::DEBUG_EVENT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Log::write(Broker::LOG, Info($ts = network_time(),
|
Log::write(Broker::LOG, [$ts = network_time(),
|
||||||
$ty = severity,
|
$ty = severity,
|
||||||
$ev = id,
|
$ev = id,
|
||||||
$message = description));
|
$message = description]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ export {
|
||||||
|
|
||||||
## Default address on which to listen for WebSocket connections.
|
## Default address on which to listen for WebSocket connections.
|
||||||
##
|
##
|
||||||
## .. zeek:see:: Cluster::listen_websocket
|
## .. zeek:see:: Broker::listen_websocket
|
||||||
const default_listen_address_websocket = getenv("ZEEK_DEFAULT_LISTEN_ADDRESS") &redef;
|
const default_listen_address_websocket = getenv("ZEEK_DEFAULT_LISTEN_ADDRESS") &redef;
|
||||||
|
|
||||||
## Default interval to retry connecting to a peer if it cannot be made to
|
## Default interval to retry connecting to a peer if it cannot be made to
|
||||||
|
@ -69,6 +69,11 @@ export {
|
||||||
## all peers.
|
## all peers.
|
||||||
const ssl_keyfile = "" &redef;
|
const ssl_keyfile = "" &redef;
|
||||||
|
|
||||||
|
## The number of buffered messages at the Broker/CAF layer after which
|
||||||
|
## a subscriber considers themselves congested (i.e. tune the congestion
|
||||||
|
## control mechanisms).
|
||||||
|
const congestion_queue_size = 200 &redef &deprecated="Remove in v8.1. Non-functional since v5.0";
|
||||||
|
|
||||||
## The max number of log entries per log stream to batch together when
|
## The max number of log entries per log stream to batch together when
|
||||||
## sending log messages to a remote logger.
|
## sending log messages to a remote logger.
|
||||||
const log_batch_size = 400 &redef;
|
const log_batch_size = 400 &redef;
|
||||||
|
@ -314,6 +319,27 @@ export {
|
||||||
p: port &default = default_port,
|
p: port &default = default_port,
|
||||||
retry: interval &default = default_listen_retry): port;
|
retry: interval &default = default_listen_retry): port;
|
||||||
|
|
||||||
|
## Listen for remote connections using WebSocket.
|
||||||
|
##
|
||||||
|
## a: an address string on which to accept connections, e.g.
|
||||||
|
## "127.0.0.1". An empty string refers to INADDR_ANY.
|
||||||
|
##
|
||||||
|
## p: the TCP port to listen on. The value 0 means that the OS should choose
|
||||||
|
## the next available free port.
|
||||||
|
##
|
||||||
|
## retry: If non-zero, retries listening in regular intervals if the port cannot be
|
||||||
|
## acquired immediately. 0 disables retries. If the
|
||||||
|
## ZEEK_DEFAULT_LISTEN_RETRY environment variable is set (as number
|
||||||
|
## of seconds), it overrides any value given here.
|
||||||
|
##
|
||||||
|
## Returns: the bound port or 0/? on failure.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: Broker::status
|
||||||
|
global listen_websocket: function(a: string &default = default_listen_address_websocket,
|
||||||
|
p: port &default = default_port_websocket,
|
||||||
|
retry: interval &default = default_listen_retry): port
|
||||||
|
&deprecated="Remove in v8.1. Switch to Cluster::listen_websocket() instead.";
|
||||||
|
|
||||||
## Initiate a remote connection.
|
## Initiate a remote connection.
|
||||||
##
|
##
|
||||||
## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
|
## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
|
||||||
|
@ -424,6 +450,29 @@ export {
|
||||||
##
|
##
|
||||||
## Returns: true if a new event forwarding/subscription is now registered.
|
## Returns: true if a new event forwarding/subscription is now registered.
|
||||||
global forward: function(topic_prefix: string): bool;
|
global forward: function(topic_prefix: string): bool;
|
||||||
|
|
||||||
|
## Automatically send an event to any interested peers whenever it is
|
||||||
|
## locally dispatched. (For example, using "event my_event(...);" in a
|
||||||
|
## script.)
|
||||||
|
##
|
||||||
|
## topic: a topic string associated with the event message.
|
||||||
|
## Peers advertise interest by registering a subscription to some
|
||||||
|
## prefix of this topic name.
|
||||||
|
##
|
||||||
|
## ev: a Zeek event value.
|
||||||
|
##
|
||||||
|
## Returns: true if automatic event sending is now enabled.
|
||||||
|
global auto_publish: function(topic: string, ev: any): bool &deprecated="Remove in v8.1. Switch to explicit Cluster::publish() calls. Auto-publish won't work with all cluster backends.";
|
||||||
|
|
||||||
|
## Stop automatically sending an event to peers upon local dispatch.
|
||||||
|
##
|
||||||
|
## topic: a topic originally given to :zeek:see:`Broker::auto_publish`.
|
||||||
|
##
|
||||||
|
## ev: an event originally given to :zeek:see:`Broker::auto_publish`.
|
||||||
|
##
|
||||||
|
## Returns: true if automatic events will not occur for the topic/event
|
||||||
|
## pair.
|
||||||
|
global auto_unpublish: function(topic: string, ev: any): bool &deprecated="Remove in v8.1. See Broker::auto_publish()";
|
||||||
}
|
}
|
||||||
|
|
||||||
@load base/bif/comm.bif
|
@load base/bif/comm.bif
|
||||||
|
@ -465,6 +514,31 @@ function listen(a: string, p: port, retry: interval): port
|
||||||
return bound;
|
return bound;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event retry_listen_websocket(a: string, p: port, retry: interval)
|
||||||
|
{
|
||||||
|
@pragma push ignore-deprecations
|
||||||
|
listen_websocket(a, p, retry);
|
||||||
|
@pragma pop ignore-deprecations
|
||||||
|
}
|
||||||
|
|
||||||
|
function listen_websocket(a: string, p: port, retry: interval): port
|
||||||
|
{
|
||||||
|
local bound = __listen(a, p, Broker::WEBSOCKET);
|
||||||
|
|
||||||
|
if ( bound == 0/tcp )
|
||||||
|
{
|
||||||
|
local e = getenv("ZEEK_DEFAULT_LISTEN_RETRY");
|
||||||
|
|
||||||
|
if ( e != "" )
|
||||||
|
retry = double_to_interval(to_double(e));
|
||||||
|
|
||||||
|
if ( retry != 0secs )
|
||||||
|
schedule retry { retry_listen_websocket(a, p, retry) };
|
||||||
|
}
|
||||||
|
|
||||||
|
return bound;
|
||||||
|
}
|
||||||
|
|
||||||
function peer(a: string, p: port, retry: interval): bool
|
function peer(a: string, p: port, retry: interval): bool
|
||||||
{
|
{
|
||||||
return __peer(a, p, retry);
|
return __peer(a, p, retry);
|
||||||
|
@ -519,3 +593,13 @@ function unsubscribe(topic_prefix: string): bool
|
||||||
{
|
{
|
||||||
return __unsubscribe(topic_prefix);
|
return __unsubscribe(topic_prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function auto_publish(topic: string, ev: any): bool
|
||||||
|
{
|
||||||
|
return __auto_publish(topic, ev);
|
||||||
|
}
|
||||||
|
|
||||||
|
function auto_unpublish(topic: string, ev: any): bool
|
||||||
|
{
|
||||||
|
return __auto_unpublish(topic, ev);
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# Load the core cluster support.
|
# Load the core cluster support.
|
||||||
@load ./main
|
@load ./main
|
||||||
@load ./pools
|
@load ./pools
|
||||||
@load ./telemetry
|
|
||||||
|
|
||||||
@if ( Cluster::is_enabled() )
|
@if ( Cluster::is_enabled() )
|
||||||
|
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
|
|
||||||
module Cluster;
|
module Cluster;
|
||||||
|
|
||||||
global broker_backpressure_disconnects_cf = Telemetry::register_counter_family(Telemetry::MetricOpts(
|
global broker_backpressure_disconnects_cf = Telemetry::register_counter_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="broker-backpressure-disconnects",
|
$name="broker-backpressure-disconnects",
|
||||||
$unit="",
|
$unit="",
|
||||||
$label_names=vector("peer"),
|
$label_names=vector("peer"),
|
||||||
$help_text="Number of Broker peerings dropped due to a neighbor falling behind in message I/O",
|
$help_text="Number of Broker peerings dropped due to a neighbor falling behind in message I/O",
|
||||||
));
|
]);
|
||||||
|
|
||||||
event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string)
|
event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,13 +7,13 @@ module Cluster;
|
||||||
## This gauge tracks the current number of locally queued messages in each
|
## This gauge tracks the current number of locally queued messages in each
|
||||||
## Broker peering's send buffer. The "peer" label identifies the remote side of
|
## Broker peering's send buffer. The "peer" label identifies the remote side of
|
||||||
## the peering, containing a Zeek cluster node name.
|
## the peering, containing a Zeek cluster node name.
|
||||||
global broker_peer_buffer_messages_gf = Telemetry::register_gauge_family(Telemetry::MetricOpts(
|
global broker_peer_buffer_messages_gf = Telemetry::register_gauge_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="broker-peer-buffer-messages",
|
$name="broker-peer-buffer-messages",
|
||||||
$unit="",
|
$unit="",
|
||||||
$label_names=vector("peer"),
|
$label_names=vector("peer"),
|
||||||
$help_text="Number of messages queued in Broker's send buffers",
|
$help_text="Number of messages queued in Broker's send buffers",
|
||||||
));
|
]);
|
||||||
|
|
||||||
## This gauge tracks recent maximum queue lengths for each Broker peering's send
|
## This gauge tracks recent maximum queue lengths for each Broker peering's send
|
||||||
## buffer. Most of the time the send buffers are nearly empty, so this gauge
|
## buffer. Most of the time the send buffers are nearly empty, so this gauge
|
||||||
|
@ -23,82 +23,47 @@ global broker_peer_buffer_messages_gf = Telemetry::register_gauge_family(Telemet
|
||||||
## observed message. That is, Zeek keeps a timestamp of when the window started,
|
## observed message. That is, Zeek keeps a timestamp of when the window started,
|
||||||
## and once it notices that the interval has passed, it moves the start of the
|
## and once it notices that the interval has passed, it moves the start of the
|
||||||
## window to current time.
|
## window to current time.
|
||||||
global broker_peer_buffer_recent_max_messages_gf = Telemetry::register_gauge_family(Telemetry::MetricOpts(
|
global broker_peer_buffer_recent_max_messages_gf = Telemetry::register_gauge_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="broker-peer-buffer-recent-max-messages",
|
$name="broker-peer-buffer-recent-max-messages",
|
||||||
$unit="",
|
$unit="",
|
||||||
$label_names=vector("peer"),
|
$label_names=vector("peer"),
|
||||||
$help_text="Maximum number of messages recently queued in Broker's send buffers",
|
$help_text="Maximum number of messages recently queued in Broker's send buffers",
|
||||||
));
|
]);
|
||||||
|
|
||||||
## This counter tracks for each Broker peering the number of times its send
|
## This counter tracks for each Broker peering the number of times its send
|
||||||
## buffer has overflowed. For the "disconnect" policy this can at most be 1,
|
## buffer has overflowed. For the "disconnect" policy this can at most be 1,
|
||||||
## since Broker stops the peering at this time. For the "drop_oldest" and
|
## since Broker stops the peering at this time. For the "drop_oldest" and
|
||||||
## "drop_newest" policies (see :zeek:see:`Broker:peer_overflow_policy`) the count
|
## "drop_newest" policies (see :zeek:see:`Broker:peer_overflow_policy`) the count
|
||||||
## instead reflects the number of messages lost.
|
## instead reflects the number of messages lost.
|
||||||
global broker_peer_buffer_overflows_cf = Telemetry::register_counter_family(Telemetry::MetricOpts(
|
global broker_peer_buffer_overflows_cf = Telemetry::register_counter_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="broker-peer-buffer-overflows",
|
$name="broker-peer-buffer-overflows",
|
||||||
$unit="",
|
$unit="",
|
||||||
$label_names=vector("peer"),
|
$label_names=vector("peer"),
|
||||||
$help_text="Number of overflows in Broker's send buffers",
|
$help_text="Number of overflows in Broker's send buffers",
|
||||||
));
|
]);
|
||||||
|
|
||||||
|
|
||||||
# A helper to track overflow counts over past peerings as well as the current
|
|
||||||
# one. The peer_id field allows us to identify when the counter has reset: a
|
|
||||||
# Broker ID different from the one on file means it's a new peering.
|
|
||||||
type EpochData: record {
|
|
||||||
peer_id: string;
|
|
||||||
num_overflows: count &default=0;
|
|
||||||
num_past_overflows: count &default=0;
|
|
||||||
};
|
|
||||||
|
|
||||||
# This maps from a cluster node name to its EpochData.
|
|
||||||
global peering_epoch_data: table[string] of EpochData;
|
|
||||||
|
|
||||||
hook Telemetry::sync()
|
hook Telemetry::sync()
|
||||||
{
|
{
|
||||||
local peers = Broker::peering_stats();
|
local peers = Broker::peering_stats();
|
||||||
local nn: NamedNode;
|
local nn: NamedNode;
|
||||||
local labels: vector of string;
|
|
||||||
local ed: EpochData;
|
|
||||||
|
|
||||||
for ( peer_id, stats in peers )
|
for ( peer, stats in peers )
|
||||||
{
|
{
|
||||||
# Translate the Broker IDs to Zeek-level node names. We skip
|
# Translate the Broker IDs to Zeek-level node names. We skip
|
||||||
# telemetry for peers where this mapping fails, i.e. ones for
|
# telemetry for peers where this mapping fails, i.e. ones for
|
||||||
# connections to external systems.
|
# connections to external systems.
|
||||||
nn = nodeid_to_node(peer_id);
|
nn = nodeid_to_node(peer);
|
||||||
|
|
||||||
if ( |nn$name| == 0 )
|
if ( |nn$name| > 0 )
|
||||||
next;
|
|
||||||
|
|
||||||
labels = vector(nn$name);
|
|
||||||
|
|
||||||
Telemetry::gauge_family_set(broker_peer_buffer_messages_gf,
|
|
||||||
labels, stats$num_queued);
|
|
||||||
Telemetry::gauge_family_set(broker_peer_buffer_recent_max_messages_gf,
|
|
||||||
labels, stats$max_queued_recently);
|
|
||||||
|
|
||||||
if ( nn$name !in peering_epoch_data )
|
|
||||||
peering_epoch_data[nn$name] = EpochData($peer_id=peer_id);
|
|
||||||
|
|
||||||
ed = peering_epoch_data[nn$name];
|
|
||||||
|
|
||||||
if ( peer_id != ed$peer_id )
|
|
||||||
{
|
{
|
||||||
# A new peering. Ensure that we account for overflows in
|
Telemetry::gauge_family_set(broker_peer_buffer_messages_gf,
|
||||||
# past ones. There is a risk here that we might have
|
vector(nn$name), stats$num_queued);
|
||||||
# missed a peering altogether if we scrape infrequently,
|
Telemetry::gauge_family_set(broker_peer_buffer_recent_max_messages_gf,
|
||||||
# but re-peering should be a rare event.
|
vector(nn$name), stats$max_queued_recently);
|
||||||
ed$peer_id = peer_id;
|
|
||||||
ed$num_past_overflows += ed$num_overflows;
|
|
||||||
}
|
|
||||||
|
|
||||||
ed$num_overflows = stats$num_overflows;
|
|
||||||
|
|
||||||
Telemetry::counter_family_set(broker_peer_buffer_overflows_cf,
|
Telemetry::counter_family_set(broker_peer_buffer_overflows_cf,
|
||||||
labels, ed$num_past_overflows + ed$num_overflows);
|
vector(nn$name), stats$num_overflows);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,9 +85,6 @@ export {
|
||||||
## is incremented when the maximum queue size is reached.
|
## is incremented when the maximum queue size is reached.
|
||||||
const default_websocket_max_event_queue_size = 32 &redef;
|
const default_websocket_max_event_queue_size = 32 &redef;
|
||||||
|
|
||||||
## The default ping interval for WebSocket clients.
|
|
||||||
const default_websocket_ping_interval = 5 sec &redef;
|
|
||||||
|
|
||||||
## Setting a default dir will, for persistent backends that have not
|
## Setting a default dir will, for persistent backends that have not
|
||||||
## been given an explicit file path via :zeek:see:`Cluster::stores`,
|
## been given an explicit file path via :zeek:see:`Cluster::stores`,
|
||||||
## automatically create a path within this dir that is based on the name of
|
## automatically create a path within this dir that is based on the name of
|
||||||
|
@ -362,16 +359,12 @@ export {
|
||||||
|
|
||||||
## WebSocket server options to pass to :zeek:see:`Cluster::listen_websocket`.
|
## WebSocket server options to pass to :zeek:see:`Cluster::listen_websocket`.
|
||||||
type WebSocketServerOptions: record {
|
type WebSocketServerOptions: record {
|
||||||
## The address to listen on, cannot be used together with ``listen_host``.
|
## The host address to listen on.
|
||||||
listen_addr: addr &optional;
|
listen_host: string;
|
||||||
## The port the WebSocket server is supposed to listen on.
|
## The port the WebSocket server is supposed to listen on.
|
||||||
listen_port: port;
|
listen_port: port;
|
||||||
## The maximum event queue size for this server.
|
## The maximum event queue size for this server.
|
||||||
max_event_queue_size: count &default=default_websocket_max_event_queue_size;
|
max_event_queue_size: count &default=default_websocket_max_event_queue_size;
|
||||||
## Ping interval to use. A WebSocket client not responding to
|
|
||||||
## the pings will be disconnected. Set to a negative value to
|
|
||||||
## disable pings. Subsecond intervals are currently not supported.
|
|
||||||
ping_interval: interval &default=default_websocket_ping_interval;
|
|
||||||
## The TLS options used for this WebSocket server. By default,
|
## The TLS options used for this WebSocket server. By default,
|
||||||
## TLS is disabled. See also :zeek:see:`Cluster::WebSocketTLSOptions`.
|
## TLS is disabled. See also :zeek:see:`Cluster::WebSocketTLSOptions`.
|
||||||
tls_options: WebSocketTLSOptions &default=WebSocketTLSOptions();
|
tls_options: WebSocketTLSOptions &default=WebSocketTLSOptions();
|
||||||
|
@ -396,23 +389,7 @@ export {
|
||||||
type EndpointInfo: record {
|
type EndpointInfo: record {
|
||||||
id: string;
|
id: string;
|
||||||
network: NetworkInfo;
|
network: NetworkInfo;
|
||||||
## The value of the X-Application-Name HTTP header, if any.
|
|
||||||
application_name: string &optional;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
## A hook invoked for every :zeek:see:`Cluster::subscribe` call.
|
|
||||||
##
|
|
||||||
## Breaking from this hook has no effect.
|
|
||||||
##
|
|
||||||
## topic: The topic string as given to :zeek:see:`Cluster::subscribe`.
|
|
||||||
global on_subscribe: hook(topic: string);
|
|
||||||
|
|
||||||
## A hook invoked for every :zeek:see:`Cluster::subscribe` call.
|
|
||||||
##
|
|
||||||
## Breaking from this hook has no effect.
|
|
||||||
##
|
|
||||||
## topic: The topic string as given to :zeek:see:`Cluster::subscribe`.
|
|
||||||
global on_unsubscribe: hook(topic: string);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Needs declaration of Cluster::Event type.
|
# Needs declaration of Cluster::Event type.
|
||||||
|
@ -504,7 +481,7 @@ function nodeid_to_node(id: string): NamedNode
|
||||||
return NamedNode($name=name, $node=n);
|
return NamedNode($name=name, $node=n);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NamedNode($name="", $node=Node($node_type=NONE, $ip=0.0.0.0));
|
return NamedNode($name="", $node=[$node_type=NONE, $ip=0.0.0.0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event Cluster::hello(name: string, id: string) &priority=10
|
event Cluster::hello(name: string, id: string) &priority=10
|
||||||
|
@ -584,7 +561,7 @@ event zeek_init() &priority=5
|
||||||
terminate();
|
terminate();
|
||||||
}
|
}
|
||||||
|
|
||||||
Log::create_stream(Cluster::LOG, Log::Stream($columns=Info, $path="cluster", $policy=log_policy));
|
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
|
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
|
||||||
|
@ -666,7 +643,7 @@ function create_store(name: string, persistent: bool &default=F): Cluster::Store
|
||||||
|
|
||||||
function log(msg: string)
|
function log(msg: string)
|
||||||
{
|
{
|
||||||
Log::write(Cluster::LOG, Info($ts = network_time(), $node = node, $message = msg));
|
Log::write(Cluster::LOG, [$ts = network_time(), $node = node, $message = msg]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function init(): bool
|
function init(): bool
|
||||||
|
@ -689,26 +666,17 @@ function listen_websocket(options: WebSocketServerOptions): bool
|
||||||
return Cluster::__listen_websocket(options);
|
return Cluster::__listen_websocket(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
function format_endpoint_info(ei: EndpointInfo): string
|
|
||||||
{
|
|
||||||
local s = fmt("'%s' (%s:%d)", ei$id, ei$network$address, ei$network$bound_port);
|
|
||||||
if ( ei?$application_name )
|
|
||||||
s += fmt(" application_name=%s", ei$application_name);
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
|
|
||||||
event websocket_client_added(endpoint: EndpointInfo, subscriptions: string_vec)
|
event websocket_client_added(endpoint: EndpointInfo, subscriptions: string_vec)
|
||||||
{
|
{
|
||||||
local msg = fmt("WebSocket client %s subscribed to %s",
|
local msg = fmt("WebSocket client '%s' (%s:%d) subscribed to %s",
|
||||||
format_endpoint_info(endpoint), subscriptions);
|
endpoint$id, endpoint$network$address, endpoint$network$bound_port, subscriptions);
|
||||||
Cluster::log(msg);
|
Cluster::log(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
event websocket_client_lost(endpoint: EndpointInfo, code: count, reason: string)
|
event websocket_client_lost(endpoint: EndpointInfo)
|
||||||
{
|
{
|
||||||
local msg = fmt("WebSocket client %s gone with code %d%s",
|
local msg = fmt("WebSocket client '%s' (%s:%d) gone",
|
||||||
format_endpoint_info(endpoint), code,
|
endpoint$id, endpoint$network$address, endpoint$network$bound_port);
|
||||||
|reason| > 0 ? fmt(" and reason '%s'", reason) : "");
|
|
||||||
Cluster::log(msg);
|
Cluster::log(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ function __init_cluster_nodes(): bool
|
||||||
if ( endp$role in rolemap )
|
if ( endp$role in rolemap )
|
||||||
typ = rolemap[endp$role];
|
typ = rolemap[endp$role];
|
||||||
|
|
||||||
cnode = Cluster::Node($node_type=typ, $ip=endp$host, $p=endp$p);
|
cnode = [$node_type=typ, $ip=endp$host, $p=endp$p];
|
||||||
if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER )
|
if ( |manager_name| > 0 && cnode$node_type != Cluster::MANAGER )
|
||||||
cnode$manager = manager_name;
|
cnode$manager = manager_name;
|
||||||
if ( endp?$metrics_port )
|
if ( endp?$metrics_port )
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
## Module for cluster telemetry.
|
|
||||||
module Cluster::Telemetry;
|
|
||||||
|
|
||||||
export {
|
|
||||||
type Type: enum {
|
|
||||||
## Creates counter metrics for incoming and for outgoing
|
|
||||||
## events without labels.
|
|
||||||
INFO,
|
|
||||||
## Creates counter metrics for incoming and outgoing events
|
|
||||||
## labeled with handler and normalized topic names.
|
|
||||||
VERBOSE,
|
|
||||||
## Creates histogram metrics using the serialized message size
|
|
||||||
## for events, labeled by topic, handler and script location
|
|
||||||
## (outgoing only).
|
|
||||||
DEBUG,
|
|
||||||
};
|
|
||||||
|
|
||||||
## The telemetry types to enable for the core backend.
|
|
||||||
const core_metrics: set[Type] = {
|
|
||||||
INFO,
|
|
||||||
} &redef;
|
|
||||||
|
|
||||||
## The telemetry types to enable for WebSocket backends.
|
|
||||||
const websocket_metrics: set[Type] = {
|
|
||||||
INFO,
|
|
||||||
} &redef;
|
|
||||||
|
|
||||||
## Table used for normalizing topic names that contain random parts.
|
|
||||||
## Map to an empty string to skip recording a specific metric
|
|
||||||
## completely.
|
|
||||||
const topic_normalizations: table[pattern] of string = {
|
|
||||||
[/^zeek\/cluster\/nodeid\/.*/] = "zeek/cluster/nodeid/__normalized__",
|
|
||||||
} &ordered &redef;
|
|
||||||
|
|
||||||
## For the DEBUG metrics, the histogram buckets to use.
|
|
||||||
const message_size_bounds: vector of double = {
|
|
||||||
10.0, 50.0, 100.0, 500.0, 1000.0, 5000.0, 10000.0, 50000.0,
|
|
||||||
} &redef;
|
|
||||||
}
|
|
|
@ -40,14 +40,14 @@ event zeek_init() &priority=5
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for ( fi in config_files )
|
for ( fi in config_files )
|
||||||
Input::add_table(Input::TableDescription($reader=Input::READER_CONFIG,
|
Input::add_table([$reader=Input::READER_CONFIG,
|
||||||
$mode=Input::REREAD,
|
$mode=Input::REREAD,
|
||||||
$source=fi,
|
$source=fi,
|
||||||
$name=cat("config-", fi),
|
$name=cat("config-", fi),
|
||||||
$idx=ConfigItem,
|
$idx=ConfigItem,
|
||||||
$val=ConfigItem,
|
$val=ConfigItem,
|
||||||
$want_record=F,
|
$want_record=F,
|
||||||
$destination=current_config));
|
$destination=current_config]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event InputConfig::new_value(name: string, source: string, id: string, value: any)
|
event InputConfig::new_value(name: string, source: string, id: string, value: any)
|
||||||
|
@ -67,11 +67,11 @@ function read_config(filename: string)
|
||||||
|
|
||||||
local iname = cat("config-oneshot-", filename);
|
local iname = cat("config-oneshot-", filename);
|
||||||
|
|
||||||
Input::add_event(Input::EventDescription($reader=Input::READER_CONFIG,
|
Input::add_event([$reader=Input::READER_CONFIG,
|
||||||
$mode=Input::MANUAL,
|
$mode=Input::MANUAL,
|
||||||
$source=filename,
|
$source=filename,
|
||||||
$name=iname,
|
$name=iname,
|
||||||
$fields=EventFields,
|
$fields=EventFields,
|
||||||
$ev=config_line));
|
$ev=config_line]);
|
||||||
Input::remove(iname);
|
Input::remove(iname);
|
||||||
}
|
}
|
||||||
|
|
|
@ -153,7 +153,7 @@ function config_option_changed(ID: string, new_value: any, location: string): an
|
||||||
|
|
||||||
event zeek_init() &priority=10
|
event zeek_init() &priority=10
|
||||||
{
|
{
|
||||||
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_config, $path="config", $policy=log_policy));
|
Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config", $policy=log_policy]);
|
||||||
|
|
||||||
# Limit logging to the manager - everyone else just feeds off it.
|
# Limit logging to the manager - everyone else just feeds off it.
|
||||||
@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
||||||
|
|
|
@ -341,7 +341,7 @@ global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: A
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Files::LOG, Log::Stream($columns=Info, $ev=log_files, $path="files", $policy=log_policy));
|
Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files, $path="files", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function set_info(f: fa_file)
|
function set_info(f: fa_file)
|
||||||
|
|
|
@ -105,30 +105,10 @@ event Intel::insert_indicator(item: Intel::Item) &priority=5
|
||||||
Intel::_insert(item, F);
|
Intel::_insert(item, F);
|
||||||
}
|
}
|
||||||
|
|
||||||
function invoke_indicator_hook(store: MinDataStore, h: hook(v: string, t: Intel::Type))
|
|
||||||
{
|
|
||||||
for ( a in store$host_data )
|
|
||||||
hook h(cat(a), Intel::ADDR);
|
|
||||||
|
|
||||||
for ( sn in store$subnet_data)
|
|
||||||
hook h(cat(sn), Intel::SUBNET);
|
|
||||||
|
|
||||||
for ( [indicator_value, indicator_type] in store$string_data )
|
|
||||||
hook h(indicator_value, indicator_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
# Handling of a complete MinDataStore snapshot
|
# Handling of a complete MinDataStore snapshot
|
||||||
#
|
|
||||||
# Invoke the removed and inserted hooks using the old and new min data store
|
|
||||||
# instances, respectively. The way this event is used, the original
|
|
||||||
# min_data_store should essentially be empty.
|
|
||||||
event new_min_data_store(store: MinDataStore)
|
event new_min_data_store(store: MinDataStore)
|
||||||
{
|
{
|
||||||
invoke_indicator_hook(min_data_store, Intel::indicator_removed);
|
|
||||||
|
|
||||||
min_data_store = store;
|
min_data_store = store;
|
||||||
|
|
||||||
invoke_indicator_hook(min_data_store, Intel::indicator_inserted);
|
|
||||||
}
|
}
|
||||||
@endif
|
@endif
|
||||||
|
|
||||||
|
|
|
@ -68,13 +68,13 @@ event zeek_init() &priority=5
|
||||||
if ( |path_prefix| > 0 && sub_bytes(a_file, 0, 1) != "/" )
|
if ( |path_prefix| > 0 && sub_bytes(a_file, 0, 1) != "/" )
|
||||||
source = cat(rstrip(path_prefix, "/"), "/", a_file);
|
source = cat(rstrip(path_prefix, "/"), "/", a_file);
|
||||||
|
|
||||||
Input::add_event(Input::EventDescription($source=source,
|
Input::add_event([$source=source,
|
||||||
$reader=Input::READER_ASCII,
|
$reader=Input::READER_ASCII,
|
||||||
$mode=Input::REREAD,
|
$mode=Input::REREAD,
|
||||||
$name=cat("intel-", a_file),
|
$name=cat("intel-", a_file),
|
||||||
$fields=Intel::Item,
|
$fields=Intel::Item,
|
||||||
$ev=Intel::read_entry,
|
$ev=Intel::read_entry,
|
||||||
$error_ev=Intel::read_error));
|
$error_ev=Intel::read_error]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,35 +207,6 @@ export {
|
||||||
## item: The intel item that should be inserted.
|
## item: The intel item that should be inserted.
|
||||||
global filter_item: hook(item: Intel::Item);
|
global filter_item: hook(item: Intel::Item);
|
||||||
|
|
||||||
## This hook is invoked when a new indicator has been inserted into
|
|
||||||
## the min data store for the first time.
|
|
||||||
##
|
|
||||||
## Calls to :zeek:see:`Intel::seen` with a matching indicator value
|
|
||||||
## and type will result in matches.
|
|
||||||
##
|
|
||||||
## Subsequent inserts of the same indicator type and value do not
|
|
||||||
## invoke this hook. Breaking from this hook has no effect.
|
|
||||||
##
|
|
||||||
## indicator: The indicator value.
|
|
||||||
##
|
|
||||||
## indicator_type: The indicator type.
|
|
||||||
##
|
|
||||||
## .. zeek::see:: Intel::indicator_removed
|
|
||||||
global indicator_inserted: hook(indicator: string, indiator_type: Type);
|
|
||||||
|
|
||||||
## This hook is invoked when an indicator has been removed from
|
|
||||||
## the min data store.
|
|
||||||
##
|
|
||||||
## After this hooks runs, :zeek:see:`Intel::seen` for the indicator
|
|
||||||
## will not return any matches. Breaking from this hook has no effect.
|
|
||||||
##
|
|
||||||
## indicator: The indicator value.
|
|
||||||
##
|
|
||||||
## indicator_type: The indicator type.
|
|
||||||
##
|
|
||||||
## .. zeek::see:: Intel::indicator_inserted
|
|
||||||
global indicator_removed: hook(indicator: string, indiator_type: Type);
|
|
||||||
|
|
||||||
global log_intel: event(rec: Info);
|
global log_intel: event(rec: Info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +251,7 @@ global min_data_store: MinDataStore &redef;
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(LOG, Log::Stream($columns=Info, $ev=log_intel, $path="intel", $policy=log_policy));
|
Log::create_stream(LOG, [$columns=Info, $ev=log_intel, $path="intel", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Function that abstracts expiration of different types.
|
# Function that abstracts expiration of different types.
|
||||||
|
@ -289,7 +260,7 @@ function expire_item(indicator: string, indicator_type: Type, metas: set[MetaDat
|
||||||
if ( hook item_expired(indicator, indicator_type, metas) )
|
if ( hook item_expired(indicator, indicator_type, metas) )
|
||||||
return item_expiration;
|
return item_expiration;
|
||||||
else
|
else
|
||||||
remove(Item($indicator=indicator, $indicator_type=indicator_type, $meta=MetaData($source="")), T);
|
remove([$indicator=indicator, $indicator_type=indicator_type, $meta=[$source=""]], T);
|
||||||
return 0 sec;
|
return 0 sec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -536,44 +507,18 @@ function _insert(item: Item, first_dispatch: bool &default = T)
|
||||||
# All intelligence is case insensitive at the moment.
|
# All intelligence is case insensitive at the moment.
|
||||||
local lower_indicator = to_lower(item$indicator);
|
local lower_indicator = to_lower(item$indicator);
|
||||||
|
|
||||||
# Track if the indicator was inserted into the min_data_store.
|
|
||||||
# It's tempting to just use is_new above, but it seems that only works
|
|
||||||
# correctly on a worker if the manager never spuriously sends a
|
|
||||||
# Intel::insert_item(), so better to determine this locally based
|
|
||||||
# on the actual contents of the min_data_store.
|
|
||||||
local inserted = F;
|
|
||||||
local inserted_value = "";
|
|
||||||
|
|
||||||
# Insert indicator into MinDataStore (might exist already).
|
# Insert indicator into MinDataStore (might exist already).
|
||||||
switch ( item$indicator_type )
|
switch ( item$indicator_type )
|
||||||
{
|
{
|
||||||
case ADDR:
|
case ADDR:
|
||||||
local host = to_addr(item$indicator);
|
local host = to_addr(item$indicator);
|
||||||
if ( host !in min_data_store$host_data )
|
|
||||||
{
|
|
||||||
inserted = T;
|
|
||||||
inserted_value = cat(host);
|
|
||||||
}
|
|
||||||
|
|
||||||
add min_data_store$host_data[host];
|
add min_data_store$host_data[host];
|
||||||
break;
|
break;
|
||||||
case SUBNET:
|
case SUBNET:
|
||||||
local net = to_subnet(item$indicator);
|
local net = to_subnet(item$indicator);
|
||||||
if ( net !in min_data_store$subnet_data )
|
|
||||||
{
|
|
||||||
inserted = T;
|
|
||||||
inserted_value = cat(net);
|
|
||||||
}
|
|
||||||
|
|
||||||
add min_data_store$subnet_data[net];
|
add min_data_store$subnet_data[net];
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
if ( [lower_indicator, item$indicator_type] !in min_data_store$string_data )
|
|
||||||
{
|
|
||||||
inserted = T;
|
|
||||||
inserted_value = lower_indicator;
|
|
||||||
}
|
|
||||||
|
|
||||||
add min_data_store$string_data[lower_indicator, item$indicator_type];
|
add min_data_store$string_data[lower_indicator, item$indicator_type];
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -588,9 +533,6 @@ function _insert(item: Item, first_dispatch: bool &default = T)
|
||||||
# Announce a (possibly) new item if this is the first dispatch and
|
# Announce a (possibly) new item if this is the first dispatch and
|
||||||
# we know it is new or have to assume that on a worker.
|
# we know it is new or have to assume that on a worker.
|
||||||
event Intel::new_item(item);
|
event Intel::new_item(item);
|
||||||
|
|
||||||
if ( inserted )
|
|
||||||
hook Intel::indicator_inserted(inserted_value, item$indicator_type);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function insert(item: Item)
|
function insert(item: Item)
|
||||||
|
@ -690,43 +632,18 @@ function remove(item: Item, purge_indicator: bool)
|
||||||
# Handling of indicator removal in minimal data stores.
|
# Handling of indicator removal in minimal data stores.
|
||||||
event remove_indicator(item: Item)
|
event remove_indicator(item: Item)
|
||||||
{
|
{
|
||||||
local removed = F;
|
|
||||||
local removed_value = "";
|
|
||||||
|
|
||||||
switch ( item$indicator_type )
|
switch ( item$indicator_type )
|
||||||
{
|
{
|
||||||
case ADDR:
|
case ADDR:
|
||||||
local host = to_addr(item$indicator);
|
local host = to_addr(item$indicator);
|
||||||
if ( host in min_data_store$host_data )
|
|
||||||
{
|
|
||||||
removed = T;
|
|
||||||
removed_value = cat(host);
|
|
||||||
}
|
|
||||||
|
|
||||||
delete min_data_store$host_data[host];
|
delete min_data_store$host_data[host];
|
||||||
break;
|
break;
|
||||||
case SUBNET:
|
case SUBNET:
|
||||||
local net = to_subnet(item$indicator);
|
local net = to_subnet(item$indicator);
|
||||||
if ( net in min_data_store$subnet_data )
|
|
||||||
{
|
|
||||||
removed = T;
|
|
||||||
removed_value = cat(net);
|
|
||||||
}
|
|
||||||
|
|
||||||
delete min_data_store$subnet_data[net];
|
delete min_data_store$subnet_data[net];
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
local indicator_value = to_lower(item$indicator);
|
delete min_data_store$string_data[to_lower(item$indicator), item$indicator_type];
|
||||||
if ( [indicator_value, item$indicator_type] in min_data_store$string_data )
|
|
||||||
{
|
|
||||||
removed = T;
|
|
||||||
removed_value = indicator_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
delete min_data_store$string_data[indicator_value, item$indicator_type];
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( removed )
|
|
||||||
hook Intel::indicator_removed(removed_value, item$indicator_type);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -422,30 +422,10 @@ export {
|
||||||
## .. :zeek:see:`Log::default_max_delay_queue_size`
|
## .. :zeek:see:`Log::default_max_delay_queue_size`
|
||||||
## .. :zeek:see:`Log::set_max_delay_queue_size`
|
## .. :zeek:see:`Log::set_max_delay_queue_size`
|
||||||
max_delay_queue_size: count &default=default_max_delay_queue_size;
|
max_delay_queue_size: count &default=default_max_delay_queue_size;
|
||||||
|
|
||||||
## Maximum string size for field in a log record from this stream.
|
|
||||||
##
|
|
||||||
## .. :zeek:see:`Log::default_max_field_string_bytes`
|
|
||||||
max_field_string_bytes: count &default=Log::default_max_field_string_bytes;
|
|
||||||
|
|
||||||
## Maximum total string size in a log record from this stream.
|
|
||||||
##
|
|
||||||
## .. :zeek:see:`Log::default_max_total_string_bytes`
|
|
||||||
max_total_string_bytes: count &default=Log::default_max_total_string_bytes;
|
|
||||||
|
|
||||||
## Maximum container elements for field in a log record from this stream.
|
|
||||||
##
|
|
||||||
## .. :zeek:see:`Log::default_max_field_container_elements`
|
|
||||||
max_field_container_elements: count &default=Log::default_max_field_container_elements;
|
|
||||||
|
|
||||||
## Maximum total container elements in a log record from this stream.
|
|
||||||
##
|
|
||||||
## .. :zeek:see:`Log::default_max_total_container_elements`
|
|
||||||
max_total_container_elements: count &default=Log::default_max_total_container_elements;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
## Sentinel value for indicating that a filter was not found when looked up.
|
## Sentinel value for indicating that a filter was not found when looked up.
|
||||||
const no_filter = Filter($name="<not found>");
|
const no_filter: Filter = [$name="<not found>"];
|
||||||
|
|
||||||
## Creates a new logging stream with the default filter.
|
## Creates a new logging stream with the default filter.
|
||||||
##
|
##
|
||||||
|
@ -1017,7 +997,7 @@ function flush(id: ID): bool
|
||||||
|
|
||||||
function add_default_filter(id: ID) : bool
|
function add_default_filter(id: ID) : bool
|
||||||
{
|
{
|
||||||
return add_filter(id, Filter($name="default"));
|
return add_filter(id, [$name="default"]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function remove_default_filter(id: ID) : bool
|
function remove_default_filter(id: ID) : bool
|
||||||
|
@ -1028,7 +1008,7 @@ function remove_default_filter(id: ID) : bool
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
if ( print_to_log != REDIRECT_NONE )
|
if ( print_to_log != REDIRECT_NONE )
|
||||||
Log::create_stream(PRINTLOG, Log::Stream($columns=PrintLogInfo, $ev=log_print, $path=print_log_path));
|
Log::create_stream(PRINTLOG, [$columns=PrintLogInfo, $ev=log_print, $path=print_log_path]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function empty_post_delay_cb(rec: any, id: ID): bool {
|
function empty_post_delay_cb(rec: any, id: ID): bool {
|
||||||
|
|
|
@ -7,9 +7,9 @@
|
||||||
##! names is printed out as meta information, with no "# fields" prepended; no
|
##! names is printed out as meta information, with no "# fields" prepended; no
|
||||||
##! other meta data gets included in that mode. Example filter using this::
|
##! other meta data gets included in that mode. Example filter using this::
|
||||||
##!
|
##!
|
||||||
##! local f = Log::Filter($name = "my-filter",
|
##! local f: Log::Filter = [$name = "my-filter",
|
||||||
##! $writer = Log::WRITER_ASCII,
|
##! $writer = Log::WRITER_ASCII,
|
||||||
##! $config = table(["tsv"] = "T"));
|
##! $config = table(["tsv"] = "T")];
|
||||||
##!
|
##!
|
||||||
|
|
||||||
module LogAscii;
|
module LogAscii;
|
||||||
|
|
|
@ -59,13 +59,13 @@ export {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(NetControl::DROP_LOG, Log::Stream($columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop", $policy=log_policy_drop));
|
Log::create_stream(NetControl::DROP_LOG, [$columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop", $policy=log_policy_drop]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function drop_connection(c: conn_id, t: interval, location: string &default="") : string
|
function drop_connection(c: conn_id, t: interval, location: string &default="") : string
|
||||||
{
|
{
|
||||||
local e = Entity($ty=CONNECTION, $conn=c);
|
local e: Entity = [$ty=CONNECTION, $conn=c];
|
||||||
local r = Rule($ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location);
|
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||||
|
|
||||||
if ( ! hook NetControl::drop_rule_policy(r) )
|
if ( ! hook NetControl::drop_rule_policy(r) )
|
||||||
return "";
|
return "";
|
||||||
|
@ -88,8 +88,8 @@ function drop_connection(c: conn_id, t: interval, location: string &default="")
|
||||||
|
|
||||||
function drop_address(a: addr, t: interval, location: string &default="") : string
|
function drop_address(a: addr, t: interval, location: string &default="") : string
|
||||||
{
|
{
|
||||||
local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a));
|
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
|
||||||
local r = Rule($ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location);
|
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||||
|
|
||||||
if ( ! hook NetControl::drop_rule_policy(r) )
|
if ( ! hook NetControl::drop_rule_policy(r) )
|
||||||
return "";
|
return "";
|
||||||
|
|
|
@ -383,7 +383,7 @@ global rule_entities: table[Entity, RuleType] of Rule;
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(NetControl::LOG, Log::Stream($columns=Info, $ev=log_netcontrol, $path="netcontrol", $policy=log_policy));
|
Log::create_stream(NetControl::LOG, [$columns=Info, $ev=log_netcontrol, $path="netcontrol", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function entity_to_info(info: Info, e: Entity)
|
function entity_to_info(info: Info, e: Entity)
|
||||||
|
@ -489,22 +489,22 @@ function rule_to_info(info: Info, r: Rule)
|
||||||
|
|
||||||
function log_msg(msg: string, p: PluginState)
|
function log_msg(msg: string, p: PluginState)
|
||||||
{
|
{
|
||||||
Log::write(LOG, Info($ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)));
|
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function log_error(msg: string, p: PluginState)
|
function log_error(msg: string, p: PluginState)
|
||||||
{
|
{
|
||||||
Log::write(LOG, Info($ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)));
|
Log::write(LOG, [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function log_msg_no_plugin(msg: string)
|
function log_msg_no_plugin(msg: string)
|
||||||
{
|
{
|
||||||
Log::write(LOG, Info($ts=network_time(), $category=MESSAGE, $msg=msg));
|
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: string &default="")
|
function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: string &default="")
|
||||||
{
|
{
|
||||||
local info = Info($ts=network_time());
|
local info: Info = [$ts=network_time()];
|
||||||
info$category = RULE;
|
info$category = RULE;
|
||||||
info$cmd = cmd;
|
info$cmd = cmd;
|
||||||
info$state = state;
|
info$state = state;
|
||||||
|
@ -519,14 +519,14 @@ function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: s
|
||||||
|
|
||||||
function log_rule_error(r: Rule, msg: string, p: PluginState)
|
function log_rule_error(r: Rule, msg: string, p: PluginState)
|
||||||
{
|
{
|
||||||
local info = Info($ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p));
|
local info: Info = [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)];
|
||||||
rule_to_info(info, r);
|
rule_to_info(info, r);
|
||||||
Log::write(LOG, info);
|
Log::write(LOG, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
function log_rule_no_plugin(r: Rule, state: InfoState, msg: string)
|
function log_rule_no_plugin(r: Rule, state: InfoState, msg: string)
|
||||||
{
|
{
|
||||||
local info = Info($ts=network_time());
|
local info: Info = [$ts=network_time()];
|
||||||
info$category = RULE;
|
info$category = RULE;
|
||||||
info$state = state;
|
info$state = state;
|
||||||
info$msg = msg;
|
info$msg = msg;
|
||||||
|
@ -538,16 +538,16 @@ function log_rule_no_plugin(r: Rule, state: InfoState, msg: string)
|
||||||
|
|
||||||
function whitelist_address(a: addr, t: interval, location: string &default="") : string
|
function whitelist_address(a: addr, t: interval, location: string &default="") : string
|
||||||
{
|
{
|
||||||
local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a));
|
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
|
||||||
local r = Rule($ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location);
|
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||||
|
|
||||||
return add_rule(r);
|
return add_rule(r);
|
||||||
}
|
}
|
||||||
|
|
||||||
function whitelist_subnet(s: subnet, t: interval, location: string &default="") : string
|
function whitelist_subnet(s: subnet, t: interval, location: string &default="") : string
|
||||||
{
|
{
|
||||||
local e = Entity($ty=ADDRESS, $ip=s);
|
local e: Entity = [$ty=ADDRESS, $ip=s];
|
||||||
local r = Rule($ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location);
|
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||||
|
|
||||||
return add_rule(r);
|
return add_rule(r);
|
||||||
}
|
}
|
||||||
|
@ -561,8 +561,8 @@ function redirect_flow(f: flow_id, out_port: count, t: interval, location: strin
|
||||||
$dst_h=addr_to_subnet(f$dst_h),
|
$dst_h=addr_to_subnet(f$dst_h),
|
||||||
$dst_p=f$dst_p
|
$dst_p=f$dst_p
|
||||||
);
|
);
|
||||||
local e = Entity($ty=FLOW, $flow=flow);
|
local e: Entity = [$ty=FLOW, $flow=flow];
|
||||||
local r = Rule($ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port);
|
local r: Rule = [$ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port];
|
||||||
|
|
||||||
return add_rule(r);
|
return add_rule(r);
|
||||||
}
|
}
|
||||||
|
@ -570,19 +570,19 @@ function redirect_flow(f: flow_id, out_port: count, t: interval, location: strin
|
||||||
function quarantine_host(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string
|
function quarantine_host(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string
|
||||||
{
|
{
|
||||||
local orules: vector of string = vector();
|
local orules: vector of string = vector();
|
||||||
local edrop = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected)));
|
local edrop: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected))];
|
||||||
local rdrop = Rule($ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location);
|
local rdrop: Rule = [$ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location];
|
||||||
orules += add_rule(rdrop);
|
orules += add_rule(rdrop);
|
||||||
|
|
||||||
local todnse = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp));
|
local todnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp)];
|
||||||
local todnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=todnse, $expire=t, $location=location, $mod=FlowMod($dst_h=quarantine), $priority=+5);
|
local todnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=todnse, $expire=t, $location=location, $mod=FlowMod($dst_h=quarantine), $priority=+5);
|
||||||
orules += add_rule(todnsr);
|
orules += add_rule(todnsr);
|
||||||
|
|
||||||
local fromdnse = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected)));
|
local fromdnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected))];
|
||||||
local fromdnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=fromdnse, $expire=t, $location=location, $mod=FlowMod($src_h=dns), $priority=+5);
|
local fromdnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=fromdnse, $expire=t, $location=location, $mod=FlowMod($src_h=dns), $priority=+5);
|
||||||
orules += add_rule(fromdnsr);
|
orules += add_rule(fromdnsr);
|
||||||
|
|
||||||
local wle = Entity($ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp));
|
local wle: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp)];
|
||||||
local wlr = Rule($ty=WHITELIST, $target=FORWARD, $entity=wle, $expire=t, $location=location, $priority=+5);
|
local wlr = Rule($ty=WHITELIST, $target=FORWARD, $entity=wle, $expire=t, $location=location, $priority=+5);
|
||||||
orules += add_rule(wlr);
|
orules += add_rule(wlr);
|
||||||
|
|
||||||
|
|
|
@ -303,7 +303,7 @@ function create_acld(config: AcldConfig) : PluginState
|
||||||
add netcontrol_acld_topics[config$acld_topic];
|
add netcontrol_acld_topics[config$acld_topic];
|
||||||
|
|
||||||
local host = cat(config$acld_host);
|
local host = cat(config$acld_host);
|
||||||
local p = PluginState($acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id);
|
local p: PluginState = [$acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id];
|
||||||
|
|
||||||
if ( [config$acld_port, host] in netcontrol_acld_peers )
|
if ( [config$acld_port, host] in netcontrol_acld_peers )
|
||||||
Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, config$acld_port));
|
Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, config$acld_port));
|
||||||
|
|
|
@ -117,7 +117,7 @@ global debug_plugin = Plugin(
|
||||||
|
|
||||||
function create_debug(do_something: bool, name: string) : PluginState
|
function create_debug(do_something: bool, name: string) : PluginState
|
||||||
{
|
{
|
||||||
local p = PluginState($plugin=debug_plugin);
|
local p: PluginState = [$plugin=debug_plugin];
|
||||||
|
|
||||||
# FIXME: Why's the default not working?
|
# FIXME: Why's the default not working?
|
||||||
p$config = table();
|
p$config = table();
|
||||||
|
@ -132,7 +132,7 @@ function create_debug(do_something: bool, name: string) : PluginState
|
||||||
|
|
||||||
function create_debug_error(name: string) : PluginState
|
function create_debug_error(name: string) : PluginState
|
||||||
{
|
{
|
||||||
local p = copy(PluginState($plugin=debug_plugin));
|
local p: PluginState = copy([$plugin=debug_plugin]);
|
||||||
p$config["name"] = name;
|
p$config["name"] = name;
|
||||||
p$config["all"] = "1";
|
p$config["all"] = "1";
|
||||||
p$plugin$add_rule = debug_add_rule_error;
|
p$plugin$add_rule = debug_add_rule_error;
|
||||||
|
@ -141,7 +141,7 @@ function create_debug_error(name: string) : PluginState
|
||||||
|
|
||||||
function create_debug_exists(name: string) : PluginState
|
function create_debug_exists(name: string) : PluginState
|
||||||
{
|
{
|
||||||
local p = copy(PluginState($plugin=debug_plugin));
|
local p: PluginState = copy([$plugin=debug_plugin]);
|
||||||
p$config["name"] = name;
|
p$config["name"] = name;
|
||||||
p$config["all"] = "1";
|
p$config["all"] = "1";
|
||||||
p$plugin$add_rule = debug_add_rule_exists;
|
p$plugin$add_rule = debug_add_rule_exists;
|
||||||
|
|
|
@ -447,7 +447,7 @@ global openflow_plugin = Plugin(
|
||||||
|
|
||||||
function create_openflow(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState
|
function create_openflow(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState
|
||||||
{
|
{
|
||||||
local p = PluginState($plugin=openflow_plugin, $of_controller=controller, $of_config=config);
|
local p: PluginState = [$plugin=openflow_plugin, $of_controller=controller, $of_config=config];
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,7 +106,7 @@ global packetfilter_plugin = Plugin(
|
||||||
|
|
||||||
function create_packetfilter() : PluginState
|
function create_packetfilter() : PluginState
|
||||||
{
|
{
|
||||||
local p = PluginState($plugin=packetfilter_plugin);
|
local p: PluginState = [$plugin=packetfilter_plugin];
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ export {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(NetControl::SHUNT, Log::Stream($columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt", $policy=log_policy_shunt));
|
Log::create_stream(NetControl::SHUNT, [$columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt", $policy=log_policy_shunt]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function shunt_flow(f: flow_id, t: interval, location: string &default="") : string
|
function shunt_flow(f: flow_id, t: interval, location: string &default="") : string
|
||||||
|
@ -51,8 +51,8 @@ function shunt_flow(f: flow_id, t: interval, location: string &default="") : str
|
||||||
$dst_h=addr_to_subnet(f$dst_h),
|
$dst_h=addr_to_subnet(f$dst_h),
|
||||||
$dst_p=f$dst_p
|
$dst_p=f$dst_p
|
||||||
);
|
);
|
||||||
local e = Entity($ty=FLOW, $flow=flow);
|
local e: Entity = [$ty=FLOW, $flow=flow];
|
||||||
local r = Rule($ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location);
|
local r: Rule = [$ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location];
|
||||||
|
|
||||||
local id = add_rule(r);
|
local id = add_rule(r);
|
||||||
|
|
||||||
|
|
|
@ -102,9 +102,9 @@ event zeek_init()
|
||||||
|
|
||||||
# This replaces the standard non-pretty-printing filter.
|
# This replaces the standard non-pretty-printing filter.
|
||||||
Log::add_filter(Notice::ALARM_LOG,
|
Log::add_filter(Notice::ALARM_LOG,
|
||||||
Log::Filter($name="alarm-mail", $writer=Log::WRITER_NONE,
|
[$name="alarm-mail", $writer=Log::WRITER_NONE,
|
||||||
$interv=Log::default_mail_alarms_interval,
|
$interv=Log::default_mail_alarms_interval,
|
||||||
$postprocessor=pp_postprocessor));
|
$postprocessor=pp_postprocessor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
hook notice(n: Notice::Info) &priority=-5
|
hook notice(n: Notice::Info) &priority=-5
|
||||||
|
|
|
@ -381,16 +381,16 @@ function log_mailing_postprocessor(info: Log::RotationInfo): bool
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Notice::LOG, Log::Stream($columns=Info, $ev=log_notice, $path="notice", $policy=log_policy));
|
Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice, $path="notice", $policy=log_policy]);
|
||||||
|
|
||||||
Log::create_stream(Notice::ALARM_LOG, Log::Stream($columns=Notice::Info, $path="notice_alarm", $policy=log_policy_alarm));
|
Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info, $path="notice_alarm", $policy=log_policy_alarm]);
|
||||||
# If Zeek is configured for mailing notices, set up mailing for alarms.
|
# If Zeek is configured for mailing notices, set up mailing for alarms.
|
||||||
# Make sure that this alarm log is also output as text so that it can
|
# Make sure that this alarm log is also output as text so that it can
|
||||||
# be packaged up and emailed later.
|
# be packaged up and emailed later.
|
||||||
if ( ! reading_traces() && mail_dest != "" )
|
if ( ! reading_traces() && mail_dest != "" )
|
||||||
Log::add_filter(Notice::ALARM_LOG,
|
Log::add_filter(Notice::ALARM_LOG,
|
||||||
Log::Filter($name="alarm-mail", $path="alarm-mail", $writer=Log::WRITER_ASCII,
|
[$name="alarm-mail", $path="alarm-mail", $writer=Log::WRITER_ASCII,
|
||||||
$interv=24hrs, $postprocessor=log_mailing_postprocessor));
|
$interv=24hrs, $postprocessor=log_mailing_postprocessor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function email_headers(subject_desc: string, dest: string): string
|
function email_headers(subject_desc: string, dest: string): string
|
||||||
|
|
|
@ -52,7 +52,7 @@ export {
|
||||||
## The peer that originated this weird. This is helpful in
|
## The peer that originated this weird. This is helpful in
|
||||||
## cluster deployments if a particular cluster node is having
|
## cluster deployments if a particular cluster node is having
|
||||||
## trouble to help identify which node is having trouble.
|
## trouble to help identify which node is having trouble.
|
||||||
peer: string &log &default=peer_description;
|
peer: string &log &optional &default=peer_description;
|
||||||
|
|
||||||
## The source of the weird. When reported by an analyzer, this
|
## The source of the weird. When reported by an analyzer, this
|
||||||
## should be the name of the analyzer.
|
## should be the name of the analyzer.
|
||||||
|
@ -318,7 +318,7 @@ const notice_actions = {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Weird::LOG, Log::Stream($columns=Info, $ev=log_weird, $path="weird", $policy=log_policy));
|
Log::create_stream(Weird::LOG, [$columns=Info, $ev=log_weird, $path="weird", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function flow_id_string(src: addr, dst: addr): string
|
function flow_id_string(src: addr, dst: addr): string
|
||||||
|
|
|
@ -50,12 +50,12 @@ export {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(OpenFlow::LOG, Log::Stream($columns=Info, $ev=log_openflow, $path="openflow", $policy=log_policy));
|
Log::create_stream(OpenFlow::LOG, [$columns=Info, $ev=log_openflow, $path="openflow", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function log_flow_mod(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
|
function log_flow_mod(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
|
||||||
{
|
{
|
||||||
Log::write(LOG, Info($ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod));
|
Log::write(OpenFlow::LOG, [$ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod]);
|
||||||
if ( state$log_success_event )
|
if ( state$log_success_event )
|
||||||
event OpenFlow::flow_mod_success(state$_name, match, flow_mod);
|
event OpenFlow::flow_mod_success(state$_name, match, flow_mod);
|
||||||
|
|
||||||
|
|
|
@ -175,7 +175,7 @@ event filter_change_tracking()
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(PacketFilter::LOG, Log::Stream($columns=Info, $path="packet_filter", $policy=log_policy));
|
Log::create_stream(PacketFilter::LOG, [$columns=Info, $path="packet_filter", $policy=log_policy]);
|
||||||
|
|
||||||
# Preverify the capture and restrict filters to give more granular failure messages.
|
# Preverify the capture and restrict filters to give more granular failure messages.
|
||||||
for ( id, cf in capture_filters )
|
for ( id, cf in capture_filters )
|
||||||
|
@ -303,9 +303,9 @@ function install(): bool
|
||||||
local error_string : string;
|
local error_string : string;
|
||||||
if ( state == Pcap::fatal )
|
if ( state == Pcap::fatal )
|
||||||
{
|
{
|
||||||
NOTICE(Notice::Info($note=Compile_Failure,
|
NOTICE([$note=Compile_Failure,
|
||||||
$msg=fmt("Compiling packet filter failed"),
|
$msg=fmt("Compiling packet filter failed"),
|
||||||
$sub=tmp_filter));
|
$sub=tmp_filter]);
|
||||||
|
|
||||||
error_string = fmt("Bad pcap filter '%s': %s", tmp_filter,
|
error_string = fmt("Bad pcap filter '%s': %s", tmp_filter,
|
||||||
Pcap::get_filter_state_string(DefaultPcapFilter));
|
Pcap::get_filter_state_string(DefaultPcapFilter));
|
||||||
|
@ -326,8 +326,8 @@ function install(): bool
|
||||||
}
|
}
|
||||||
local diff = current_time()-ts;
|
local diff = current_time()-ts;
|
||||||
if ( diff > max_filter_compile_time )
|
if ( diff > max_filter_compile_time )
|
||||||
NOTICE(Notice::Info($note=Too_Long_To_Compile_Filter,
|
NOTICE([$note=Too_Long_To_Compile_Filter,
|
||||||
$msg=fmt("A BPF filter is taking longer than %0.1f seconds to compile", diff)));
|
$msg=fmt("A BPF filter is taking longer than %0.1f seconds to compile", diff)]);
|
||||||
|
|
||||||
# Set it to the current filter if it passed precompiling
|
# Set it to the current filter if it passed precompiling
|
||||||
current_filter = tmp_filter;
|
current_filter = tmp_filter;
|
||||||
|
@ -350,9 +350,9 @@ function install(): bool
|
||||||
info$success = F;
|
info$success = F;
|
||||||
info$failure_reason = Pcap::get_filter_state_string(DefaultPcapFilter);
|
info$failure_reason = Pcap::get_filter_state_string(DefaultPcapFilter);
|
||||||
|
|
||||||
NOTICE(Notice::Info($note=Install_Failure,
|
NOTICE([$note=Install_Failure,
|
||||||
$msg=fmt("Installing packet filter failed"),
|
$msg=fmt("Installing packet filter failed"),
|
||||||
$sub=current_filter));
|
$sub=current_filter]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( reading_live_traffic() || reading_traces() )
|
if ( reading_live_traffic() || reading_traces() )
|
||||||
|
|
|
@ -24,10 +24,10 @@ event net_stats_update(last_stat: NetStats)
|
||||||
{
|
{
|
||||||
local new_recvd = ns$pkts_recvd - last_stat$pkts_recvd;
|
local new_recvd = ns$pkts_recvd - last_stat$pkts_recvd;
|
||||||
local new_link = ns$pkts_link - last_stat$pkts_link;
|
local new_link = ns$pkts_link - last_stat$pkts_link;
|
||||||
NOTICE(Notice::Info($note=Dropped_Packets,
|
NOTICE([$note=Dropped_Packets,
|
||||||
$msg=fmt("%d packets dropped after filtering, %d received%s",
|
$msg=fmt("%d packets dropped after filtering, %d received%s",
|
||||||
new_dropped, new_recvd + new_dropped,
|
new_dropped, new_recvd + new_dropped,
|
||||||
new_link != 0 ? fmt(", %d on link", new_link) : "")));
|
new_link != 0 ? fmt(", %d on link", new_link) : "")]);
|
||||||
}
|
}
|
||||||
|
|
||||||
schedule stats_collection_interval { net_stats_update(ns) };
|
schedule stats_collection_interval { net_stats_update(ns) };
|
||||||
|
|
|
@ -40,20 +40,20 @@ export {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Reporter::LOG, Log::Stream($columns=Info, $path="reporter", $policy=log_policy));
|
Log::create_stream(Reporter::LOG, [$columns=Info, $path="reporter", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event reporter_info(t: time, msg: string, location: string) &priority=-5
|
event reporter_info(t: time, msg: string, location: string) &priority=-5
|
||||||
{
|
{
|
||||||
Log::write(Reporter::LOG, Info($ts=t, $level=INFO, $message=msg, $location=location));
|
Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event reporter_warning(t: time, msg: string, location: string) &priority=-5
|
event reporter_warning(t: time, msg: string, location: string) &priority=-5
|
||||||
{
|
{
|
||||||
Log::write(Reporter::LOG, Info($ts=t, $level=WARNING, $message=msg, $location=location));
|
Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event reporter_error(t: time, msg: string, location: string) &priority=-5
|
event reporter_error(t: time, msg: string, location: string) &priority=-5
|
||||||
{
|
{
|
||||||
Log::write(Reporter::LOG, Info($ts=t, $level=ERROR, $message=msg, $location=location));
|
Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -145,14 +145,14 @@ global did_sig_log: set[string] &read_expire = 1 hr;
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Signatures::LOG, Log::Stream($columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy));
|
Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event sig_summary(orig: addr, id: string, msg: string)
|
event sig_summary(orig: addr, id: string, msg: string)
|
||||||
{
|
{
|
||||||
NOTICE(Notice::Info($note=Signature_Summary, $src=orig,
|
NOTICE([$note=Signature_Summary, $src=orig,
|
||||||
$msg=fmt("%s: %s", orig, msg),
|
$msg=fmt("%s: %s", orig, msg),
|
||||||
$n=count_per_orig[orig,id]));
|
$n=count_per_orig[orig,id] ]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event signature_match(state: signature_state, msg: string, data: string)
|
event signature_match(state: signature_state, msg: string, data: string)
|
||||||
|
@ -189,7 +189,7 @@ event signature_match(state: signature_state, msg: string, data: string)
|
||||||
|
|
||||||
if ( action != SIG_QUIET && action != SIG_COUNT_PER_RESP )
|
if ( action != SIG_QUIET && action != SIG_COUNT_PER_RESP )
|
||||||
{
|
{
|
||||||
local info = Info($ts=network_time(),
|
local info: Info = [$ts=network_time(),
|
||||||
$note=Sensitive_Signature,
|
$note=Sensitive_Signature,
|
||||||
$uid=state$conn$uid,
|
$uid=state$conn$uid,
|
||||||
$src_addr=src_addr,
|
$src_addr=src_addr,
|
||||||
|
@ -198,7 +198,7 @@ event signature_match(state: signature_state, msg: string, data: string)
|
||||||
$dst_port=dst_port,
|
$dst_port=dst_port,
|
||||||
$event_msg=fmt("%s: %s", src_addr, msg),
|
$event_msg=fmt("%s: %s", src_addr, msg),
|
||||||
$sig_id=sig_id,
|
$sig_id=sig_id,
|
||||||
$sub_msg=data);
|
$sub_msg=data];
|
||||||
Log::write(Signatures::LOG, info);
|
Log::write(Signatures::LOG, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,12 +211,12 @@ event signature_match(state: signature_state, msg: string, data: string)
|
||||||
local dst = state$conn$id$resp_h;
|
local dst = state$conn$id$resp_h;
|
||||||
if ( ++count_per_resp[dst,sig_id] in count_thresholds )
|
if ( ++count_per_resp[dst,sig_id] in count_thresholds )
|
||||||
{
|
{
|
||||||
NOTICE(Notice::Info($note=Count_Signature, $conn=state$conn,
|
NOTICE([$note=Count_Signature, $conn=state$conn,
|
||||||
$msg=msg,
|
$msg=msg,
|
||||||
$n=count_per_resp[dst,sig_id],
|
$n=count_per_resp[dst,sig_id],
|
||||||
$sub=fmt("%d matches of signature %s on host %s",
|
$sub=fmt("%d matches of signature %s on host %s",
|
||||||
count_per_resp[dst,sig_id],
|
count_per_resp[dst,sig_id],
|
||||||
sig_id, dst)));
|
sig_id, dst)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,10 +241,10 @@ event signature_match(state: signature_state, msg: string, data: string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( notice )
|
if ( notice )
|
||||||
NOTICE(Notice::Info($note=Sensitive_Signature,
|
NOTICE([$note=Sensitive_Signature,
|
||||||
$conn=state$conn, $src=src_addr,
|
$conn=state$conn, $src=src_addr,
|
||||||
$dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg),
|
$dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg),
|
||||||
$sub=data));
|
$sub=data]);
|
||||||
|
|
||||||
if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY )
|
if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY )
|
||||||
return;
|
return;
|
||||||
|
@ -273,12 +273,12 @@ event signature_match(state: signature_state, msg: string, data: string)
|
||||||
orig, sig_id, hcount);
|
orig, sig_id, hcount);
|
||||||
|
|
||||||
Log::write(Signatures::LOG,
|
Log::write(Signatures::LOG,
|
||||||
Info($ts=network_time(), $note=Multiple_Sig_Responders,
|
[$ts=network_time(), $note=Multiple_Sig_Responders,
|
||||||
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
|
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
|
||||||
$host_count=hcount, $sub_msg=horz_scan_msg));
|
$host_count=hcount, $sub_msg=horz_scan_msg]);
|
||||||
|
|
||||||
NOTICE(Notice::Info($note=Multiple_Sig_Responders, $src=orig,
|
NOTICE([$note=Multiple_Sig_Responders, $src=orig,
|
||||||
$msg=msg, $n=hcount, $sub=horz_scan_msg));
|
$msg=msg, $n=hcount, $sub=horz_scan_msg]);
|
||||||
|
|
||||||
last_hthresh[orig] = hcount;
|
last_hthresh[orig] = hcount;
|
||||||
}
|
}
|
||||||
|
@ -290,16 +290,16 @@ event signature_match(state: signature_state, msg: string, data: string)
|
||||||
orig, vcount, resp);
|
orig, vcount, resp);
|
||||||
|
|
||||||
Log::write(Signatures::LOG,
|
Log::write(Signatures::LOG,
|
||||||
Info($ts=network_time(),
|
[$ts=network_time(),
|
||||||
$note=Multiple_Signatures,
|
$note=Multiple_Signatures,
|
||||||
$src_addr=orig,
|
$src_addr=orig,
|
||||||
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
|
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
|
||||||
$event_msg=fmt("%s different signatures triggered", vcount),
|
$event_msg=fmt("%s different signatures triggered", vcount),
|
||||||
$sub_msg=vert_scan_msg));
|
$sub_msg=vert_scan_msg]);
|
||||||
|
|
||||||
NOTICE(Notice::Info($note=Multiple_Signatures, $src=orig, $dst=resp,
|
NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp,
|
||||||
$msg=fmt("%s different signatures triggered", vcount),
|
$msg=fmt("%s different signatures triggered", vcount),
|
||||||
$n=vcount, $sub=vert_scan_msg));
|
$n=vcount, $sub=vert_scan_msg]);
|
||||||
|
|
||||||
last_vthresh[orig] = vcount;
|
last_vthresh[orig] = vcount;
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,7 @@ export {
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Software::LOG, Log::Stream($columns=Info, $ev=log_software, $path="software", $policy=log_policy));
|
Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
type Description: record {
|
type Description: record {
|
||||||
|
@ -163,7 +163,7 @@ function parse(unparsed_version: string): Description
|
||||||
else
|
else
|
||||||
v = Version($major=extract_count(vs));
|
v = Version($major=extract_count(vs));
|
||||||
|
|
||||||
return Description($version=v, $unparsed_version=unparsed_version, $name=software_name);
|
return [$version=v, $unparsed_version=unparsed_version, $name=software_name];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -236,7 +236,7 @@ function parse(unparsed_version: string): Description
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return Description($version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]);
|
return [$version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]];
|
||||||
}
|
}
|
||||||
|
|
||||||
global parse_cache: table[string] of Description &read_expire=65secs;
|
global parse_cache: table[string] of Description &read_expire=65secs;
|
||||||
|
@ -269,13 +269,13 @@ function parse_mozilla(unparsed_version: string): Description
|
||||||
{
|
{
|
||||||
software_name = "MSIE";
|
software_name = "MSIE";
|
||||||
if ( /Trident\/4\.0/ in unparsed_version )
|
if ( /Trident\/4\.0/ in unparsed_version )
|
||||||
v = Version($major=8,$minor=0);
|
v = [$major=8,$minor=0];
|
||||||
else if ( /Trident\/5\.0/ in unparsed_version )
|
else if ( /Trident\/5\.0/ in unparsed_version )
|
||||||
v = Version($major=9,$minor=0);
|
v = [$major=9,$minor=0];
|
||||||
else if ( /Trident\/6\.0/ in unparsed_version )
|
else if ( /Trident\/6\.0/ in unparsed_version )
|
||||||
v = Version($major=10,$minor=0);
|
v = [$major=10,$minor=0];
|
||||||
else if ( /Trident\/7\.0/ in unparsed_version )
|
else if ( /Trident\/7\.0/ in unparsed_version )
|
||||||
v = Version($major=11,$minor=0);
|
v = [$major=11,$minor=0];
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
parts = split_string_all(unparsed_version, /MSIE [0-9]{1,2}\.*[0-9]*b?[0-9]*/);
|
parts = split_string_all(unparsed_version, /MSIE [0-9]{1,2}\.*[0-9]*b?[0-9]*/);
|
||||||
|
@ -373,7 +373,7 @@ function parse_mozilla(unparsed_version: string): Description
|
||||||
v = parse(parts[1])$version;
|
v = parse(parts[1])$version;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Description($version=v, $unparsed_version=unparsed_version, $name=software_name);
|
return [$version=v, $unparsed_version=unparsed_version, $name=software_name];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,8 +8,8 @@ export {
|
||||||
|
|
||||||
event max_file_depth_exceeded(f: fa_file, args: Files::AnalyzerArgs, limit: count)
|
event max_file_depth_exceeded(f: fa_file, args: Files::AnalyzerArgs, limit: count)
|
||||||
{
|
{
|
||||||
NOTICE(Notice::Info(
|
NOTICE([
|
||||||
$note=Spicy::Spicy_Max_File_Depth_Exceeded,
|
$note=Spicy::Spicy_Max_File_Depth_Exceeded,
|
||||||
$msg=fmt("Maximum file depth exceeded for file %s", f$id)
|
$msg=fmt("Maximum file depth exceeded for file %s", f$id)
|
||||||
));
|
]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,28 +81,18 @@ export {
|
||||||
function open_backend(btype: Storage::Backend, options: Storage::BackendOptions,
|
function open_backend(btype: Storage::Backend, options: Storage::BackendOptions,
|
||||||
key_type: any, val_type: any): Storage::OperationResult
|
key_type: any, val_type: any): Storage::OperationResult
|
||||||
{
|
{
|
||||||
if ( options$forced_sync )
|
|
||||||
return Storage::Sync::__open_backend(btype, options, key_type, val_type);
|
|
||||||
else
|
|
||||||
return Storage::Async::__open_backend(btype, options, key_type, val_type);
|
return Storage::Async::__open_backend(btype, options, key_type, val_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
function close_backend(backend: opaque of Storage::BackendHandle)
|
function close_backend(backend: opaque of Storage::BackendHandle)
|
||||||
: Storage::OperationResult
|
: Storage::OperationResult
|
||||||
{
|
{
|
||||||
if ( Storage::is_forced_sync(backend) )
|
|
||||||
return Storage::Sync::__close_backend(backend);
|
|
||||||
else
|
|
||||||
return Storage::Async::__close_backend(backend);
|
return Storage::Async::__close_backend(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
function put(backend: opaque of Storage::BackendHandle, args: Storage::PutArgs)
|
function put(backend: opaque of Storage::BackendHandle, args: Storage::PutArgs)
|
||||||
: Storage::OperationResult
|
: Storage::OperationResult
|
||||||
{
|
{
|
||||||
if ( Storage::is_forced_sync(backend) )
|
|
||||||
return Storage::Sync::__put(backend, args$key, args$value, args$overwrite,
|
|
||||||
args$expire_time);
|
|
||||||
else
|
|
||||||
return Storage::Async::__put(backend, args$key, args$value, args$overwrite,
|
return Storage::Async::__put(backend, args$key, args$value, args$overwrite,
|
||||||
args$expire_time);
|
args$expire_time);
|
||||||
}
|
}
|
||||||
|
@ -110,17 +100,11 @@ function put(backend: opaque of Storage::BackendHandle, args: Storage::PutArgs)
|
||||||
function get(backend: opaque of Storage::BackendHandle, key: any)
|
function get(backend: opaque of Storage::BackendHandle, key: any)
|
||||||
: Storage::OperationResult
|
: Storage::OperationResult
|
||||||
{
|
{
|
||||||
if ( Storage::is_forced_sync(backend) )
|
|
||||||
return Storage::Sync::__get(backend, key);
|
|
||||||
else
|
|
||||||
return Storage::Async::__get(backend, key);
|
return Storage::Async::__get(backend, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
function erase(backend: opaque of Storage::BackendHandle, key: any)
|
function erase(backend: opaque of Storage::BackendHandle, key: any)
|
||||||
: Storage::OperationResult
|
: Storage::OperationResult
|
||||||
{
|
{
|
||||||
if ( Storage::is_forced_sync(backend) )
|
|
||||||
return Storage::Sync::__erase(backend, key);
|
|
||||||
else
|
|
||||||
return Storage::Async::__erase(backend, key);
|
return Storage::Async::__erase(backend, key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,9 +3,6 @@
|
||||||
module Storage;
|
module Storage;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
# Default value for the BackendOptions::forced_sync field.
|
|
||||||
const default_forced_sync: bool = F &redef;
|
|
||||||
|
|
||||||
## Base record for backend options that can be passed to
|
## Base record for backend options that can be passed to
|
||||||
## :zeek:see:`Storage::Async::open_backend` and
|
## :zeek:see:`Storage::Async::open_backend` and
|
||||||
## :zeek:see:`Storage::Sync::open_backend`. Backend plugins can redef this record
|
## :zeek:see:`Storage::Sync::open_backend`. Backend plugins can redef this record
|
||||||
|
@ -13,11 +10,6 @@ export {
|
||||||
type BackendOptions: record {
|
type BackendOptions: record {
|
||||||
## The serializer used for converting Zeek data.
|
## The serializer used for converting Zeek data.
|
||||||
serializer: Storage::Serializer &default=Storage::STORAGE_SERIALIZER_JSON;
|
serializer: Storage::Serializer &default=Storage::STORAGE_SERIALIZER_JSON;
|
||||||
|
|
||||||
## Sets the backend into forced-synchronous mode. All operations will run
|
|
||||||
## in synchronous mode, even if the async functions are called. This
|
|
||||||
## should generally only be set to ``T`` during testing.
|
|
||||||
forced_sync : bool &default=Storage::default_forced_sync;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
## Record for passing arguments to :zeek:see:`Storage::Async::put` and
|
## Record for passing arguments to :zeek:see:`Storage::Async::put` and
|
||||||
|
@ -37,7 +29,4 @@ export {
|
||||||
## backend.
|
## backend.
|
||||||
expire_time: interval &default=0sec;
|
expire_time: interval &default=0sec;
|
||||||
};
|
};
|
||||||
|
|
||||||
# The histogram buckets to use for operation latency metrics, in seconds.
|
|
||||||
const latency_metric_bounds: vector of double = { 0.001, 0.01, 0.1, 1.0, } &redef;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -312,7 +312,7 @@ event zeek_init() &priority=100000
|
||||||
|
|
||||||
function init_resultval(r: Reducer): ResultVal
|
function init_resultval(r: Reducer): ResultVal
|
||||||
{
|
{
|
||||||
local rv = ResultVal($begin=network_time(), $end=network_time());
|
local rv: ResultVal = [$begin=network_time(), $end=network_time()];
|
||||||
hook init_resultval_hook(r, rv);
|
hook init_resultval_hook(r, rv);
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ hook register_observe_plugins()
|
||||||
if ( r$num_last_elements > 0 )
|
if ( r$num_last_elements > 0 )
|
||||||
{
|
{
|
||||||
if ( ! rv?$last_elements )
|
if ( ! rv?$last_elements )
|
||||||
rv$last_elements = Queue::init(Queue::Settings($max_len=r$num_last_elements));
|
rv$last_elements = Queue::init([$max_len=r$num_last_elements]);
|
||||||
Queue::put(rv$last_elements, obs);
|
Queue::put(rv$last_elements, obs);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -245,6 +245,16 @@ export {
|
||||||
label_values: labels_vector,
|
label_values: labels_vector,
|
||||||
measurement: double): bool;
|
measurement: double): bool;
|
||||||
|
|
||||||
|
## Interval at which the :zeek:see:`Telemetry::sync` hook is invoked.
|
||||||
|
##
|
||||||
|
## By default, the hook is invoked on demand, setting this option to
|
||||||
|
## a positive interval allows to invoke it regularly, too. Regular
|
||||||
|
## invocations are relative to Zeek's network time.
|
||||||
|
##
|
||||||
|
## Note that on-demand hook invocation will happen even if this
|
||||||
|
## is set.
|
||||||
|
option sync_interval = 0sec &deprecated="Remove in 8.1. If you require regular sync invocation, do so explicitly in a scheduled event.";
|
||||||
|
|
||||||
## Collect all counter and gauge metrics matching the given *name* and *prefix*.
|
## Collect all counter and gauge metrics matching the given *name* and *prefix*.
|
||||||
##
|
##
|
||||||
## For histogram metrics, use the :zeek:see:`Telemetry::collect_histogram_metrics`.
|
## For histogram metrics, use the :zeek:see:`Telemetry::collect_histogram_metrics`.
|
||||||
|
@ -286,12 +296,12 @@ function register_counter_family(opts: MetricOpts): CounterFamily
|
||||||
}
|
}
|
||||||
|
|
||||||
# Fallback Counter returned when there are issues with the labels.
|
# Fallback Counter returned when there are issues with the labels.
|
||||||
global error_counter_cf = register_counter_family(MetricOpts(
|
global error_counter_cf = register_counter_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="telemetry_counter_usage_error",
|
$name="telemetry_counter_usage_error",
|
||||||
$unit="",
|
$unit="",
|
||||||
$help_text="This counter is returned when label usage for counters is wrong. Check reporter.log if non-zero."
|
$help_text="This counter is returned when label usage for counters is wrong. Check reporter.log if non-zero."
|
||||||
));
|
]);
|
||||||
|
|
||||||
function counter_with(cf: CounterFamily, label_values: labels_vector): Counter
|
function counter_with(cf: CounterFamily, label_values: labels_vector): Counter
|
||||||
{
|
{
|
||||||
|
@ -345,12 +355,12 @@ function register_gauge_family(opts: MetricOpts): GaugeFamily
|
||||||
}
|
}
|
||||||
|
|
||||||
# Fallback Gauge returned when there are issues with the label usage.
|
# Fallback Gauge returned when there are issues with the label usage.
|
||||||
global error_gauge_cf = register_gauge_family(MetricOpts(
|
global error_gauge_cf = register_gauge_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="telemetry_gauge_usage_error",
|
$name="telemetry_gauge_usage_error",
|
||||||
$unit="",
|
$unit="",
|
||||||
$help_text="This gauge is returned when label usage for gauges is wrong. Check reporter.log if non-zero."
|
$help_text="This gauge is returned when label usage for gauges is wrong. Check reporter.log if non-zero."
|
||||||
));
|
]);
|
||||||
|
|
||||||
function gauge_with(gf: GaugeFamily, label_values: labels_vector): Gauge
|
function gauge_with(gf: GaugeFamily, label_values: labels_vector): Gauge
|
||||||
{
|
{
|
||||||
|
@ -414,13 +424,13 @@ function register_histogram_family(opts: MetricOpts): HistogramFamily
|
||||||
}
|
}
|
||||||
|
|
||||||
# Fallback Histogram when there are issues with the labels.
|
# Fallback Histogram when there are issues with the labels.
|
||||||
global error_histogram_hf = register_histogram_family(MetricOpts(
|
global error_histogram_hf = register_histogram_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="telemetry_histogram_usage_error",
|
$name="telemetry_histogram_usage_error",
|
||||||
$unit="",
|
$unit="",
|
||||||
$help_text="This histogram is returned when label usage for histograms is wrong. Check reporter.log if non-zero.",
|
$help_text="This histogram is returned when label usage for histograms is wrong. Check reporter.log if non-zero.",
|
||||||
$bounds=vector(1.0)
|
$bounds=vector(1.0)
|
||||||
));
|
]);
|
||||||
|
|
||||||
function histogram_with(hf: HistogramFamily, label_values: labels_vector): Histogram
|
function histogram_with(hf: HistogramFamily, label_values: labels_vector): Histogram
|
||||||
{
|
{
|
||||||
|
@ -455,18 +465,31 @@ function collect_histogram_metrics(prefix: string, name: string): vector of Hist
|
||||||
return Telemetry::__collect_histogram_metrics(prefix, name);
|
return Telemetry::__collect_histogram_metrics(prefix, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event run_sync_hook()
|
||||||
|
{
|
||||||
|
hook Telemetry::sync();
|
||||||
|
@pragma push ignore-deprecations
|
||||||
|
schedule sync_interval { run_sync_hook() };
|
||||||
|
@pragma pop ignore-deprecations
|
||||||
|
}
|
||||||
|
|
||||||
# Expose the Zeek version as Prometheus style info metric
|
# Expose the Zeek version as Prometheus style info metric
|
||||||
global version_gauge_family = Telemetry::register_gauge_family(Telemetry::MetricOpts(
|
global version_gauge_family = Telemetry::register_gauge_family([
|
||||||
$prefix="zeek",
|
$prefix="zeek",
|
||||||
$name="version_info",
|
$name="version_info",
|
||||||
$unit="",
|
$unit="",
|
||||||
$help_text="The Zeek version",
|
$help_text="The Zeek version",
|
||||||
$label_names=vector("version_number", "major", "minor", "patch", "commit",
|
$label_names=vector("version_number", "major", "minor", "patch", "commit",
|
||||||
"beta", "debug","version_string")
|
"beta", "debug","version_string")
|
||||||
));
|
]);
|
||||||
|
|
||||||
event zeek_init()
|
event zeek_init()
|
||||||
{
|
{
|
||||||
|
@pragma push ignore-deprecations
|
||||||
|
if ( sync_interval > 0sec )
|
||||||
|
schedule sync_interval { run_sync_hook() };
|
||||||
|
@pragma pop ignore-deprecations
|
||||||
|
|
||||||
local v = Version::info;
|
local v = Version::info;
|
||||||
local labels = vector(cat(v$version_number),
|
local labels = vector(cat(v$version_number),
|
||||||
cat(v$major), cat(v$minor), cat (v$patch),
|
cat(v$major), cat(v$minor), cat (v$patch),
|
||||||
|
|
|
@ -15,13 +15,6 @@ export {
|
||||||
## HTTP. The default value means Zeek won't expose the port.
|
## HTTP. The default value means Zeek won't expose the port.
|
||||||
const metrics_port = 0/unknown &redef;
|
const metrics_port = 0/unknown &redef;
|
||||||
|
|
||||||
## Every metric automatically receives a label with the following name
|
|
||||||
## and the metrics_endpoint_name as value to identify the originating
|
|
||||||
## cluster node.
|
|
||||||
## The label was previously hard-code as "endpoint", and that's why
|
|
||||||
## the variable is called the way it is, but "node" is the better label.
|
|
||||||
const metrics_endpoint_label = "node" &redef;
|
|
||||||
|
|
||||||
## ID for the metrics exporter. This is used as the 'endpoint' label
|
## ID for the metrics exporter. This is used as the 'endpoint' label
|
||||||
## value when exporting data to Prometheus. In a cluster setup, this
|
## value when exporting data to Prometheus. In a cluster setup, this
|
||||||
## defaults to the name of the node in the cluster configuration.
|
## defaults to the name of the node in the cluster configuration.
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue