Merge remote-tracking branch 'origin/master' into topic/johanna/spicy-tls

* origin/master: (386 commits)
  Normalize version strings in test
  Update doc submodule [nomail] [skip ci]
  Update external testing baseline hashes
  fuzzers: Add DTLS fuzzer
  generic-analyzer-fuzzer: Support NextPacket() fuzzing
  Require `truncate` for a test using it
  Bump outdated baseline
  Fix tests so they work both with GNU and BSD tools
  Install libmaxminddb in macOS CI
  Bump auxil/spicy to latest release
  Supervisor: Handle EAGAIN error on stem pipe
  fuzzer-setup: Allow customization without recompiling
  ssl: Prevent unbounded ssl_history growth
  ssl: Cap number of alerts parsed from SSL record
  subdir-btest: Allow setting build_dir
  Update doc submodule [nomail] [skip ci]
  CI: Pass -A flag to btest for cluster-testing builds
  Update doc submodule [nomail] [skip ci]
  Update baselines
  ftp: Do not base seq on number of pending commands
  ...
This commit is contained in:
Johanna Amann 2023-10-30 12:28:40 +00:00
commit 0afe94154d
800 changed files with 109788 additions and 98811 deletions

View file

@ -11,6 +11,7 @@ btest_retries: &BTEST_RETRIES 2
memory: &MEMORY 16GB
config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache
no_spicy_config: &NO_SPICY_CONFIG --build-type=release --disable-broker-tests --disable-spicy --prefix=$CIRRUS_WORKING_DIR/install --ccache
static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache
asan_sanitizer_config: &ASAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address --enable-fuzzers --enable-coverage --disable-spicy --ccache
ubsan_sanitizer_config: &UBSAN_SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=undefined --enable-fuzzers --disable-spicy --ccache
@ -44,13 +45,6 @@ freebsd_environment: &FREEBSD_ENVIRONMENT
ZEEK_CI_CPUS: 8
ZEEK_CI_BTEST_JOBS: 8
sanitizers_resource_template: &SANITIZERS_RESOURCE_TEMPLATE
cpu: 4
# Sanitizers uses a lot more memory than a typical config.
memory: 12GB
# For greediness, see https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4
greedy: true
builds_only_if_template: &BUILDS_ONLY_IF_TEMPLATE
# Rules for skipping builds:
# - Do not run builds for anything that's cron triggered
@ -74,6 +68,15 @@ skip_task_on_pr: &SKIP_TASK_ON_PR
skip: >
($CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ '.*fullci.*')
benchmark_only_if_template: &BENCHMARK_ONLY_IF_TEMPLATE
# only_if condition for cron-triggered benchmarking tests.
# These currently do not run for release/.*
only_if: >
( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
( $CIRRUS_CRON == 'benchmark-nightly' ||
$CIRRUS_PR_LABELS =~ '.*fullci.*' ||
$CIRRUS_PR_LABELS =~ '.*benchmark.*' )
ci_template: &CI_TEMPLATE
<< : *BUILDS_ONLY_IF_TEMPLATE
@ -97,6 +100,7 @@ ci_template: &CI_TEMPLATE
init_external_repos_script: ./ci/init-external-repos.sh
pre_build_script: ./ci/pre-build.sh
build_script: ./ci/build.sh
test_script: ./ci/test.sh
@ -160,9 +164,6 @@ env:
# a solution for the mtime pruning above.
ZEEK_CCACHE_EPOCH: 2
# Cache Spicy JIT results.
HILTI_CXX_COMPILER_LAUNCHER: ccache
# Linux EOL timelines: https://linuxlifecycle.com/
# Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle
@ -203,6 +204,8 @@ centos7_task:
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
env:
ZEEK_CI_CONFIGURE_FLAGS: *NO_SPICY_CONFIG
debian12_task:
container:
@ -225,6 +228,8 @@ arm_debian11_task:
dockerfile: ci/debian-11/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
env:
ZEEK_CI_CONFIGURE_FLAGS: *NO_SPICY_CONFIG
debian11_static_task:
container:
@ -250,6 +255,16 @@ opensuse_leap_15_4_task:
dockerfile: ci/opensuse-leap-15.4/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
env:
ZEEK_CI_CONFIGURE_FLAGS: *NO_SPICY_CONFIG
opensuse_leap_15_5_task:
container:
# Opensuse Leap 15.5 EOL: ~Dec 2024
dockerfile: ci/opensuse-leap-15.5/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
opensuse_tumbleweed_task:
container:
@ -260,10 +275,10 @@ opensuse_tumbleweed_task:
<< : *CI_TEMPLATE
# << : *SKIP_TASK_ON_PR
ubuntu2210_task:
ubuntu23_task:
container:
# Ubuntu 22.10 EOL: July 2023
dockerfile: ci/ubuntu-22.10/Dockerfile
# Ubuntu 23.04 EOL: January 2024
dockerfile: ci/ubuntu-23.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
@ -278,6 +293,44 @@ ubuntu22_task:
upload_binary_artifacts:
path: build.tgz
benchmark_script: ./ci/benchmark.sh
# Run on PRs, merges to master and release/.* and benchmark-nightly cron.
only_if: >
( $CIRRUS_PR != '' && $CIRRUS_BRANCH !=~ 'dependabot/.*' ) ||
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
$CIRRUS_BRANCH == 'master' ||
$CIRRUS_BRANCH =~ 'release/.*' ||
$CIRRUS_CRON == 'benchmark-nightly' )
ubuntu22_spicy_task:
container:
# Ubuntu 22.04 EOL: April 2027
dockerfile: ci/ubuntu-22.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
env:
ZEEK_CI_CREATE_ARTIFACT: 1
test_script: true # Don't run tests, these are redundant.
spicy_install_analyzers_script: ./ci/spicy-install-analyzers.sh
upload_binary_artifacts:
path: build.tgz
benchmark_script: ./ci/benchmark.sh
<< : *BENCHMARK_ONLY_IF_TEMPLATE
ubuntu22_spicy_head_task:
container:
# Ubuntu 22.04 EOL: April 2027
dockerfile: ci/ubuntu-22.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
env:
ZEEK_CI_CREATE_ARTIFACT: 1
# Pull auxil/spicy to the latest head version. May or may not build.
ZEEK_CI_PREBUILD_COMMAND: 'cd auxil/spicy && git fetch && git reset --hard origin/main && git submodule update --init --recursive'
spicy_install_analyzers_script: ./ci/spicy-install-analyzers.sh
upload_binary_artifacts:
path: build.tgz
benchmark_script: ./ci/benchmark.sh
<< : *BENCHMARK_ONLY_IF_TEMPLATE
ubuntu20_task:
container:
@ -287,14 +340,6 @@ ubuntu20_task:
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
ubuntu18_task:
container:
# Ubuntu 18.04 EOL: April 2023
dockerfile: ci/ubuntu-18.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
alpine_task:
container:
# Alpine releases typically happen every 6 months w/ support for 2 years.
@ -337,7 +382,7 @@ freebsd14_task:
freebsd13_task:
freebsd_instance:
# FreeBSD 13 EOL: January 31, 2026
image_family: freebsd-13-1
image_family: freebsd-13-2
<< : *FREEBSD_RESOURCES_TEMPLATE
prepare_script: ./ci/freebsd/prepare.sh
@ -348,7 +393,7 @@ freebsd13_task:
freebsd12_task:
freebsd_instance:
# FreeBSD 12 EOL: June 30, 2024
image_family: freebsd-12-3
image_family: freebsd-12-2
<< : *FREEBSD_RESOURCES_TEMPLATE
prepare_script: ./ci/freebsd/prepare.sh
@ -360,7 +405,7 @@ asan_sanitizer_task:
container:
# Just uses a recent/common distro to run memory error/leak checks.
dockerfile: ci/ubuntu-20.04/Dockerfile
<< : *SANITIZERS_RESOURCE_TEMPLATE
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
test_fuzzers_script: ./ci/test-fuzzers.sh
@ -375,7 +420,7 @@ ubsan_sanitizer_task:
container:
# Just uses a recent/common distro to run undefined behavior checks.
dockerfile: ci/ubuntu-20.04/Dockerfile
<< : *SANITIZERS_RESOURCE_TEMPLATE
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
@ -387,28 +432,26 @@ ubsan_sanitizer_task:
ZEEK_TAILORED_UB_CHECKS: 1
UBSAN_OPTIONS: print_stacktrace=1
tsan_sanitizer_task:
container:
# Just uses a recent/common distro to run memory error/leak checks.
dockerfile: ci/ubuntu-20.04/Dockerfile
<< : *SANITIZERS_RESOURCE_TEMPLATE
# tsan_sanitizer_task:
# container:
# # Just uses a recent/common distro to run memory error/leak checks.
# dockerfile: ci/ubuntu-22.04/Dockerfile
# << : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
env:
ZEEK_CI_CONFIGURE_FLAGS: *TSAN_SANITIZER_CONFIG
ZEEK_CI_DISABLE_SCRIPT_PROFILING: 1
# If this is defined directly in the environment, configure fails to find
# OpenSSL. Instead we define it with a different name and then give it
# the correct name in the testing scripts.
ZEEK_TSAN_OPTIONS: suppressions=/zeek/ci/tsan_suppressions.txt
# << : *CI_TEMPLATE
# << : *SKIP_TASK_ON_PR
# env:
# ZEEK_CI_CONFIGURE_FLAGS: *TSAN_SANITIZER_CONFIG
# ZEEK_CI_DISABLE_SCRIPT_PROFILING: 1
# # If this is defined directly in the environment, configure fails to find
# # OpenSSL. Instead we define it with a different name and then give it
# # the correct name in the testing scripts.
# ZEEK_TSAN_OPTIONS: suppressions=/zeek/ci/tsan_suppressions.txt
windows_task:
# 2 hour timeout just for potential of building Docker image taking a while
timeout_in: 120m
windows_container:
# image: cirrusci/windowsservercore:cmake
# image: zeekurity/broker-ci-windows:latest
dockerfile: ci/windows/Dockerfile
os_version: 2019
cpu: 8
@ -453,6 +496,7 @@ docker_build_template: &DOCKER_BUILD_TEMPLATE
CIRRUS_LOG_TIMESTAMP: true
BUILDER_IMAGE_CACHE_DIR: /tmp/builder-image-cache
ZEEK_IMAGE_CACHE_DIR: /tmp/zeek-image-cache-${CIRRUS_ARCH}
BUILDKIT_PROGRESS: plain
always:
ccache_cache:
@ -608,9 +652,6 @@ container_image_manifest_docker_builder:
# Tag images and push to Docker Hub and AWS ECR "zeek" account
- ./ci/container-images-tag-and-push.sh
- REGISTRY_PREFIX=public.ecr.aws/ ./ci/container-images-tag-and-push.sh
# Continue to push tags to the "zeekurity" account as well.
- ZEEK_IMAGE_REPO=zeekurity ./ci/container-images-tag-and-push.sh
depends_on:
- arm64_container_image
- amd64_container_image
@ -654,7 +695,7 @@ cluster_testing_docker_builder:
test_script:
# Invoke btest directly here. This mirrors ci/test.sh, ensures we don't
# accidentally build a Docker image, and enables console-level output:
- cd testing/external/zeek-testing-cluster && ../../../auxil/btest/btest -d -b -j ${ZEEK_CI_BTEST_JOBS}
- cd testing/external/zeek-testing-cluster && ../../../auxil/btest/btest -A -d -b -j ${ZEEK_CI_BTEST_JOBS}
on_failure:
upload_cluster_testing_artifacts:
path: "testing/external/zeek-testing-cluster/.tmp/**"

View file

@ -64,6 +64,14 @@
"_filepath": "*",
"_sympath": "*"
}
},
"spicy_add_analyzer": {
"kwargs": {
"NAME": "*",
"PACKAGE_NAME": "*",
"SOURCES": "*",
"MODULES": "*"
}
}
}
},

View file

@ -77,13 +77,17 @@ jobs:
uses: hendrikmuhs/ccache-action@v1.2
with:
key: 'docs-gen-${{ github.job }}'
max-size: '2000M'
- name: Configure
run: ./configure --disable-broker-tests --disable-cpp-tests --disable-spicy --ccache
run: ./configure --disable-broker-tests --disable-cpp-tests --ccache
- name: Build
run: cd build && make -j $(nproc)
- name: Check Spicy docs
run: cd doc && make check-spicy-docs
- name: Generate Docs
run: |
git config --global user.name zeek-bot

View file

@ -8,13 +8,13 @@ repos:
- id: clang-format
- repo: https://github.com/maxwinterstein/shfmt-py
rev: 3.3.1.8
rev: v3.7.0.1
hooks:
- id: shfmt
args: ["-w", "-i", "4", "-ci"]
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.31.0
- repo: https://github.com/google/yapf
rev: v0.40.0
hooks:
- id: yapf
@ -25,7 +25,7 @@ repos:
exclude: '^auxil/.*$'
- repo: https://github.com/crate-ci/typos
rev: v1.15.0
rev: v1.16.8
hooks:
- id: typos
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES)$'

View file

@ -70,3 +70,5 @@ helo = "helo"
inout = "inout"
# Seems we use this in the management framework
requestor = "requestor"
# `inout` is used as a keyword in Spicy, but looks like a typo of `input`.
inout = "inout"

1625
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -96,8 +96,11 @@ if (MSVC)
endforeach ()
# Set compilation flags for Windows
add_compile_options(/guard:cf # required by CheckCFlags
/Z7) # required by CheckCFlags
add_compile_options(
/guard:cf # required by CheckCFlags
/Z7 # required by CheckCFlags
/J # Similar to -funsigned-char on other platforms
/wd4068) # Disable unknown pragma warnings
add_link_options(/debug:full # required by CheckCFlags
)
@ -149,6 +152,7 @@ if (MSVC)
set(UNISTD_INCLUDES ${CMAKE_SOURCE_DIR}/auxil/libunistd/unistd
${CMAKE_SOURCE_DIR}/auxil/libunistd/regex)
include_directories(BEFORE ${UNISTD_INCLUDES})
# Required for `check_include_files` to operate correctly
list(APPEND CMAKE_REQUIRED_INCLUDES ${UNISTD_INCLUDES})
list(APPEND zeekdeps libunistd libregex)
@ -287,6 +291,10 @@ target_include_directories(
zeek_dynamic_plugin_base
INTERFACE $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>)
if (OPENSSL_INCLUDE_DIR)
target_include_directories(zeek_dynamic_plugin_base INTERFACE "${OPENSSL_INCLUDE_DIR}")
endif ()
target_link_libraries(zeek_dynamic_plugin_base INTERFACE Threads::Threads)
add_library(Zeek::DynamicPluginBase ALIAS zeek_dynamic_plugin_base)
set_target_properties(zeek_dynamic_plugin_base PROPERTIES EXPORT_NAME DynamicPluginBase)
install(TARGETS zeek_dynamic_plugin_base EXPORT ZeekTargets)
@ -464,7 +472,8 @@ set(zeek_script_install_path "${ZEEK_SCRIPT_INSTALL_PATH}")
if (MSVC)
# This has to happen before we modify the paths below so that the pure Windows
# paths are stored in the output file.
configure_file(zeek-path-dev.bat.in ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.bat)
configure_file(cmake_templates/zeek-path-dev.bat.in
${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.bat)
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_binary_dir "${cmake_binary_dir}")
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_current_binary_dir
@ -504,7 +513,7 @@ install(DIRECTORY DESTINATION ${ZEEK_STATE_DIR})
install(DIRECTORY DESTINATION ${ZEEK_SPOOL_DIR})
install(DIRECTORY DESTINATION ${ZEEK_LOG_DIR})
configure_file(zeek-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev)
configure_file(cmake_templates/zeek-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev)
file(
WRITE ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.sh
@ -513,7 +522,8 @@ file(
"export PATH=\"${cmake_binary_dir}\":\"${cmake_binary_dir}/src\":\"${cmake_binary_dir}/auxil/spicy/bin\":\"${cmake_binary_dir}/src/spicy/spicyz\":$\{PATH\}\n"
"export SPICY_PATH=`${cmake_binary_dir}/spicy-path`\n"
"export HILTI_CXX_INCLUDE_DIRS=`${cmake_binary_dir}/hilti-cxx-include-dirs`\n"
"export ZEEK_SPICY_LIBRARY_PATH=${cmake_source_dir}/scripts/spicy\n")
"export ZEEK_SPICY_LIBRARY_PATH=${cmake_source_dir}/scripts/spicy\n"
"export SPICY_BUILD_DIRECTORY=${cmake_binary_dir}/auxil/spicy\n")
file(
WRITE ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.csh
@ -522,9 +532,19 @@ file(
"setenv PATH \"${cmake_binary_dir}\":\"${cmake_binary_dir}/src\":\"${cmake_binary_dir}/auxil/spicy/bin\":\"${cmake_binary_dir}/src/spicy/spicyz\":$\{PATH\}\n"
"setenv SPICY_PATH \"`${cmake_binary_dir}/spicy-path`\"\n"
"setenv HILTI_CXX_INCLUDE_DIRS \"`${cmake_binary_dir}/hilti-cxx-include-dirs`\"\n"
"setenv ZEEK_SPICY_LIBRARY_PATH \"${cmake_source_dir}/scripts/spicy\"\n")
"setenv ZEEK_SPICY_LIBRARY_PATH \"${cmake_source_dir}/scripts/spicy\"\n"
"setenv SPICY_BUILD_DIRECTORY \"${cmake_binary_dir}/auxil/spicy\"\n")
if (CMAKE_CXX_COMPILER_LAUNCHER)
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.sh
"export HILTI_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER}\n")
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.csh
"setenv HILTI_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER}\n")
endif ()
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" ZEEK_VERSION_FULL LIMIT_COUNT 1)
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1)
execute_process(
COMMAND grep "^constexpr int PLUGIN_API_VERSION"
INPUT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/src/plugin/Plugin.h
@ -534,7 +554,7 @@ execute_process(
string(REGEX MATCH "^constexpr int PLUGIN_API_VERSION = ([0-9]+);" API_VERSION "${API_VERSION}")
set(API_VERSION "${CMAKE_MATCH_1}")
string(REGEX REPLACE "[.-]" " " version_numbers ${VERSION})
string(REGEX REPLACE "[.-]" " " version_numbers ${ZEEK_VERSION_FULL})
separate_arguments(version_numbers)
list(GET version_numbers 0 VERSION_MAJOR)
list(GET version_numbers 1 VERSION_MINOR)
@ -543,7 +563,7 @@ set(VERSION_MAJ_MIN "${VERSION_MAJOR}.${VERSION_MINOR}")
math(EXPR ZEEK_VERSION_NUMBER
"${VERSION_MAJOR} * 10000 + ${VERSION_MINOR} * 100 + ${VERSION_PATCH}")
set(VERSION_C_IDENT "${VERSION}_plugin_${API_VERSION}")
set(VERSION_C_IDENT "${ZEEK_VERSION_FULL}_plugin_${API_VERSION}")
string(REGEX REPLACE "-[0-9]*$" "_git" VERSION_C_IDENT "${VERSION_C_IDENT}")
string(REGEX REPLACE "[^a-zA-Z0-9_\$]" "_" VERSION_C_IDENT "${VERSION_C_IDENT}")
@ -714,8 +734,6 @@ endif ()
# ##############################################################################
# Dependency Configuration
include(FindRequiredPackage)
# Check cache value first to avoid displaying "Found sed" messages everytime
if (NOT SED_EXE)
find_program(SED_EXE sed)
@ -726,16 +744,17 @@ if (NOT SED_EXE)
endif ()
endif ()
list(APPEND Python_ADDITIONAL_VERSIONS 3)
FindRequiredPackage(PythonInterp)
FindRequiredPackage(FLEX)
FindRequiredPackage(BISON)
FindRequiredPackage(PCAP)
FindRequiredPackage(OpenSSL)
set(ZEEK_PYTHON_MIN 3.5.0)
set(Python_FIND_UNVERSIONED_NAMES FIRST)
find_package(Python ${ZEEK_PYTHON_MIN} REQUIRED COMPONENTS Interpreter)
find_package(FLEX REQUIRED)
find_package(BISON 2.5 REQUIRED)
find_package(PCAP REQUIRED)
find_package(OpenSSL REQUIRED)
if (NOT MSVC)
FindRequiredPackage(BIND)
find_package(BIND REQUIRED)
endif ()
FindRequiredPackage(ZLIB)
find_package(ZLIB REQUIRED)
if (NOT BINARY_PACKAGING_MODE)
# TODO: Broker seems to always turn on static libraries. We don't want that for Spicy by default.
@ -744,7 +763,7 @@ endif ()
# Forward user-defined hint for OpenSSL to the plugins. Use a cache variable to
# make sure this variable survives CMake re-runs. Note: This variable is picked
# up in ZeekPluginConfig.cmake.in.
# up in ZeekPluginConfig.cmake.in and ZeekConfig.cmake.in.
if (OPENSSL_ROOT_DIR)
set(ZeekOpenSSLHint "${OPENSSL_ROOT_DIR}" CACHE INTERNAL "" FORCE)
elseif (DEFINED ENV{OPENSSL_ROOT_DIR})
@ -766,7 +785,7 @@ endif ()
if (ZEEK_PYTHON_DIR)
set(py_mod_install_dir ${ZEEK_PYTHON_DIR})
elseif (ZEEK_PYTHON_PREFIX)
set(pyver ${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR})
set(pyver ${Python_VERSION_MAJOR}.${Python_VERSION_MINOR})
set(py_mod_install_dir ${ZEEK_PYTHON_PREFIX}/lib/python${pyver}/site-packages)
elseif (ZEEK_PYTHON_HOME)
set(py_mod_install_dir ${ZEEK_PYTHON_HOME}/lib/python)
@ -794,7 +813,8 @@ if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/auxil/binpac/CMakeLists.txt)
# TODO in ZeekPluginConfig.cmake.in.
set(BINPAC_EXE_PATH "${CMAKE_BINARY_DIR}/auxil/binpac/src/binpac${CMAKE_EXECUTABLE_SUFFIX}")
endif ()
FindRequiredPackage(BinPAC)
find_package(BinPAC REQUIRED)
# Add an alias (used by our plugin setup).
add_executable(Zeek::BinPAC ALIAS binpac)
@ -834,25 +854,6 @@ if (ENABLE_JEMALLOC)
endif ()
endif ()
if (BISON_VERSION AND BISON_VERSION VERSION_LESS 2.5)
set(MISSING_PREREQS true)
list(APPEND MISSING_PREREQ_DESCS
" Could not find prerequisite package Bison >= 2.5, found: ${BISON_VERSION}")
endif ()
if (MISSING_PREREQS)
foreach (prereq ${MISSING_PREREQ_DESCS})
message(SEND_ERROR ${prereq})
endforeach ()
message(FATAL_ERROR "Configuration aborted due to missing prerequisites")
endif ()
set(ZEEK_PYTHON_MIN 3.5.0)
if (PYTHON_VERSION_STRING VERSION_LESS ${ZEEK_PYTHON_MIN})
message(FATAL_ERROR "Python ${ZEEK_PYTHON_MIN} or greater is required.")
endif ()
add_subdirectory(auxil/paraglob)
if (MSVC)
cmake_policy(SET CMP0079 NEW)
@ -932,10 +933,27 @@ if (NOT DISABLE_SPICY)
# disable it.
set(HILTI_DEV_PRECOMPILE_HEADERS OFF)
# Remove in v6.2. Older versions of Spicy use find_package(Python3),
# accommodate by setting the Python3_EXECUTABLE hint.
if (Python_EXECUTABLE)
set(Python3_EXECUTABLE ${Python_EXECUTABLE} CACHE STRING "Python3_EXECUTABLE hint")
endif ()
add_subdirectory(auxil/spicy)
include(ConfigureSpicyBuild) # set some options different for building Spicy
zeek_add_dependencies(spicy)
# Explicitly link against Spicy libraries if we are packaging. Since
# Zeek's binary packaging mode still leaves `BUILD_SHARED_LIBS` set we
# cannot use the branching inside `hilti_link_libraries_in_tree` and
# instead explicitly branch on `BINARY_PACKAGING_MODE` here.
if (BINARY_PACKAGING_MODE)
hilti_link_object_libraries_in_tree(zeek_exe PRIVATE)
else ()
hilti_link_libraries_in_tree(zeek_exe PRIVATE)
endif ()
set(HAVE_SPICY yes)
endif ()
@ -1154,8 +1172,8 @@ endif ()
set(ZEEK_CONFIG_BTEST_TOOLS_DIR ${ZEEK_ROOT_DIR}/share/btest)
install(DIRECTORY DESTINATION ${ZEEK_CONFIG_BTEST_TOOLS_DIR})
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zeek-config.in ${CMAKE_CURRENT_BINARY_DIR}/zeek-config
@ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake_templates/zeek-config.in
${CMAKE_CURRENT_BINARY_DIR}/zeek-config @ONLY)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/zeek-config DESTINATION bin)
install(
@ -1179,8 +1197,8 @@ if (INSTALL_ZKG)
set(ZEEK_ZKG_CONFIG_DIR "${ZEEK_ETC_INSTALL_DIR}/zkg")
set(ZEEK_ZKG_STATE_DIR "${ZEEK_STATE_DIR}/zkg")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zkg-config.in ${CMAKE_CURRENT_BINARY_DIR}/zkg-config
@ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake_templates/zkg-config.in
${CMAKE_CURRENT_BINARY_DIR}/zkg-config @ONLY)
install(DIRECTORY DESTINATION var/lib/zkg)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zkg-config DESTINATION ${ZEEK_ZKG_CONFIG_DIR}
@ -1270,19 +1288,20 @@ checkoptionalbuildsources(auxil/zeek-client ZeekClient INSTALL_ZEEK_CLIENT)
# Generate Spicy helper scripts referenced in e.g., `zeek-path-dev.*`. These
# set Spicy-side environment variables to run it out of the build directory.
configure_file(${CMAKE_SOURCE_DIR}/spicy-path.in ${CMAKE_BINARY_DIR}/spicy-path @ONLY)
configure_file(${CMAKE_SOURCE_DIR}/hilti-cxx-include-dirs.in
configure_file(${CMAKE_SOURCE_DIR}/cmake_templates/spicy-path.in ${CMAKE_BINARY_DIR}/spicy-path
@ONLY)
configure_file(${CMAKE_SOURCE_DIR}/cmake_templates/hilti-cxx-include-dirs.in
${CMAKE_BINARY_DIR}/hilti-cxx-include-dirs @ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zeek-version.h.in
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake_templates/zeek-version.h.in
${CMAKE_CURRENT_BINARY_DIR}/zeek-version.h)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zeek-version.h DESTINATION include/zeek)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zeek-config.h.in
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake_templates/zeek-config.h.in
${CMAKE_CURRENT_BINARY_DIR}/zeek-config.h)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zeek-config.h DESTINATION include/zeek)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zeek-config-paths.h.in
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake_templates/zeek-config-paths.h.in
${CMAKE_CURRENT_BINARY_DIR}/zeek-config-paths.h)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zeek-config-paths.h DESTINATION include/zeek)
@ -1299,7 +1318,7 @@ endif ()
# has already configured packaging
if ("${PROJECT_SOURCE_DIR}" STREQUAL "${CMAKE_SOURCE_DIR}")
include(ConfigurePackaging)
ConfigurePackaging(${VERSION})
ConfigurePackaging(${ZEEK_VERSION_FULL})
endif ()
# Refers back to the "distribution prefix". This is the source tree when

216
NEWS
View file

@ -3,34 +3,15 @@ This document summarizes the most important changes in the current Zeek
release. For an exhaustive list of changes, see the ``CHANGES`` file
(note that submodules, such as Broker, come with their own ``CHANGES``.)
Zeek 6.1.0
Zeek 6.2.0
==========
Breaking Changes
----------------
- ``assert`` is now a reserved keyword for the new ``assert`` statement.
New Functionality
-----------------
- Added a new ``assert`` statement for assertion based testing and asserting
runtime state.
assert <expr: bool>[, <message: string>];
This statement comes with two hooks. First, ``assertion_failure()`` that
is invoked for every failing assert statement. Second, ``assertion_result()``
which is invoked for every assert statement and its outcome. The latter allows
to construct a summary of failing and passing assert statements. Both hooks
receive the location and call stack for the ``assert`` statement via a
``Backtrace`` vector.
A failing assert will abort execution of the current event handler similar
to scripting errors. By default, a reporter error message is logged. Using
the break statement within ``assertion_failure()`` or ``assertion_result()``
allows to suppress the default message.
Changed Functionality
---------------------
@ -40,6 +21,197 @@ Removed Functionality
Deprecated Functionality
------------------------
Zeek 6.1.0
==========
Breaking Changes
----------------
- ``assert`` is now a reserved keyword for the new ``assert`` statement.
- The ``__bro_plugin__`` file that gets generated as part of plugin builds was
renamed to ``__zeek_plugin__``. This will affect the ability for older
versions of ``zkg`` to use the ``zkg unload`` and ``zkg load`` commands. This
should only cause breakage for people using a version of ``zkg` that doesn't
come bundled with Zeek (which we generally don't recommend doing).
- Zeek does not traverse into dot directories to find plugins or hlto files
anymore. Any dot directories found below the directories specified in
ZEEK_PLUGIN_PATH or ZEEK_SPICY_MODULE_PATH are now skipped. Dot directories
explicitly listed in ZEEK_PLUGIN_PATH or ZEEK_SPICY_MODULE_PATH are not
skipped.
- External plugins will fail to configure if their minimum required CMake
version is below 3.15. This was a warning with Zeek 6.0, but has caused user
confusion due to unhelpful error messages around the IN_LIST operator policy.
- The FindBISON, FindOpenSSL, FindPackageHandleStandardArgs, FindPackageMessage,
and SelectLibraryConfigurations cmake files were removed from our cmake
repository in favor of the versions that come with CMake. This should not
cause any breakage, but it is possible in the case that someone was using
these in a plugin.
New Functionality
-----------------
- Zeek now includes the LDAP protocol analyzer from the zeek/spicy-ldap project
(https://github.com/zeek/spicy-ldap). This analyzer is enabled by default. The
analyzer's events and its ``ldap.log`` and ``ldap_search.log`` should be
considered preliminary and experimental until the arrival of Zeek's next
long-term-stable release (7.0).
If you observe unusually high CPU consumption or other issues due to this
analyzer being enabled by default, the easiest way to disable it is via the
``Analyzer::disabled_analyzers`` const as follows:
redef Analyzer::disabled_analyzers += {
Analyzer::ANALYZER_LDAP_UDP,
Analyzer::ANALYZER_LDAP_TCP,
};
Please do report issues to us including diagnostic information in case this is
necessary in your environment. We're also open to general feedback about the
structure of the new logs.
- Zeek now includes the QUIC protocol analyzer from the zeek/spicy-quic project
(https://github.com/zeek/spicy-quic). This project is a fork of Fox-IT's
initial implementation (https://github.com/fox-ds/spicy-quic).
As for the LDAP analyzer, the analyzer's events and the new ``quic.log``
should be considered preliminary and experimental until the arrival of Zeek's
next long-term-stable release (7.0). As above, any feedback and contributions
to this analyzer and the new log are welcome.
The analyzer's functionality is limited to decryption of the INITIAL packets
of QUIC version 1. If decryption of these packets is successful, the handshake
data is forwarded to Zeek's SSL analyzer. An ``ssl.log`` entry will appear in
``ssl.log`` for QUIC connections. The entry in the ``conn.log`` will contain
``quic`` and ``ssl`` in the service field.
To disable the analyzer in case of issues, use the following snippet:
redef Analyzer::disabled_analyzers += {
Analyzer::ANALYZER_QUIC,
};
- Added a new ``assert`` statement for assertion based testing and asserting
runtime state.
assert <expr: bool>[, <message: string>];
This statement comes with two hooks. First, ``assertion_failure()`` that is
invoked for every failing assert statement. Second, ``assertion_result()``
which is invoked for every assert statement and its outcome. The latter allows
to construct a summary of failing and passing assert statements. Both hooks
receive the location and call stack for the ``assert`` statement via a
``Backtrace`` vector.
A failing assert will abort execution of the current event handler similar to
scripting errors. By default, a reporter error message is logged. Using the
break statement within ``assertion_failure()`` or ``assertion_result()``
allows to suppress the default message.
- Add a new ``&default_insert`` attribute for tables. This behaves as
``&default`` with the addition that the default value is inserted into the
table upon a failed lookup. Particularly for tables with nested container
values, the ``&default`` behavior of not inserting the value can be of little
use.
- The ``from_json()`` function now takes an optional key_func argument to
normalize JSON object key names. This can be useful if the keys in a JSON
object are not valid Zeek identifiers or reserved keywords.
- Module names are now included in ``global_ids()``. Their key in the returned
table is prefixed with "module " and their value will have the ``type_name``
field set to "module".
- Identifiers in the global scope can now be referenced and defined from within
modules by prefixing their names with ``::``. Previously, these required an
explicit ``GLOBAL::`` prefix to be used. Using ``GLOBAL::`` has been
deprecated.
- The ``as`` keyword now supports casting between ``set`` and ``vector`` values
with the same element type. Converting ``set`` values with multiple index
values is not supported. We plan to extend the use of the ``as`` keyword to
support more type conversions in the future.
- Added new packet analyzer to handle PCAP files DLT_PPP link type.
- Fixed appending of ``any`` to ``vector of any``.
- The ModBus analyzer's function support was expanded, with new handling of the
Encapsulation Interface Transport (function 28) And Diagnostics (function 8)
functions. This adds new ``modbus_encap_interface_transport_{request,response}``
and ``modbus_diagnostics_{request,response}`` events.
- The ModBus file record read and write events now provide the full data from
the request and response messages as part of the event data.
- The full PDU length was added to the ``ModBusHeader`` record type passed with
all of the ModBus events.
Changed Functionality
---------------------
- A connection's value is now updated in-place when its directionality is
flipped due to Zeek's heuristics (for example, SYN/SYN-ACK reversal or
protocol specific approaches). Previously, a connection's value was discarded
when flipped, including any values set in a ``new_connection()`` handler. A
new ``connection_flipped()`` event is added to allow updating custom state in
script-land.
- Loading ``policy/frameworks/notice/community-id.zeek`` now also automatically
community ID logging. In the past, loading the script had no effect unless
``policy/protocols/conn/community-id-logging.zeek`` was loaded before. This
was fairly unusual and hard to debug behavior.
- Connections to broadcast addresses are not flipped based on
``likely_server_ports`` anymore. Previously, broadcast packets originating
from a likely server port resulted in 255.255.255.255 being the originator in
``conn.log``.
- When too many HTTP requests are pending, Zeek will now log them at once and
reset request/response correlation instead of running into unbounded state
growth. This behavior is configurable via a new option
``HTTP::max_pending_requests``. The default is ``100``.
- Fix deferred initialization of nested records containing non-const &default
attributes.
- Parameter lists for functions, events and hooks now use commas instead of
semicolons in error messages or when printing such functions.
- The IO buffer size used for PCAP file reading is now always 128kb. This new
default can be changed via ``Pcap::bufsize_offline_bytes``.
- The input framework now provides better information in error messages when
encountering missing non-optional field while loading data.
- The SSL analyzer will now parse a configurable maximum of 10 SSL Alerts per
SSL message. For TLS 1.3, the maximum is implicitly 1 as defined by RFC 8446.
If there are more alerts, a new weird "SSL_excessive_alerts_in_record" is raised.
For non-TLS 1.3, the maximum can be redefined via ``SSL::max_alerts_per_record``.
- The ``ssl_history`` field in the ssl.log is now capped at a configurable
limit of 100 characters prevent unbounded growth. The limit can be changed
via the option ``SSL::max_ssl_history_length``. When reached, a new weird
named "SSL_max_ssl_history_length_reached" is raised.
Deprecated Functionality
------------------------
- Accessing globals with ``GLOBAL::name`` has been deprecated and will be
removed with Zeek 7.1. Use ``::name`` instead.
- The original ``trigger::Trigger`` constructor has been deprecated and will be
removed with Zeek 7.1. Use the new alternative constructor (per
``src/Trigger.h``) instead, including replacing any use of ``new ...`` with
``make_intrusive<...>``. The new constructor differs only in the placement of
the ``timeout`` parameter, and in that - unlike the original - it always
returns a valid pointer, which must be Unref()'d after construction, either
explicitly (if using ``new``) or implicitly (if using
``make_intrusive<...>``).
Zeek 6.0.0
==========
@ -110,6 +282,10 @@ Breaking Changes
depending on the functionality included in the plugin, may trigger subsequent
errors during configuration or build.
- Zeek container images are not pushed to the zeekurity organization anymore.
Please switch to using the ``zeek/zeek`` image on DockerHub, or the images
published to ``public.ecr.aws/zeek/zeek``.
- The IRC_Data analyzer declaration has been moved to protocols/irc/IRC.h.
- The error message returned when using ``bro_init``, ``bro_done``, and

View file

@ -1 +1 @@
6.1.0-dev.115
6.2.0-dev.78

@ -1 +1 @@
Subproject commit 183a0c7fb0e04b843e4a2bafbee44117001228e6
Subproject commit c7bf54c587439d3bcb16d53b0d77a702e48d2526

@ -1 +1 @@
Subproject commit fad1f7322209d93cfa67be3420aecb441a90468b
Subproject commit 84b730fdcc5b983c65c6226ec092aee66c486680

@ -1 +1 @@
Subproject commit 2a1d3232b75b530a0cd7df4b376ca3608fe8b9df
Subproject commit 86ed39b80c272cff44bb95f58eeda1bc5ff6f65c

@ -1 +1 @@
Subproject commit 0c3cafb2ed638f88a446732fa03d90af9bcf796c
Subproject commit 46f982cd6fafd34639c2f97628a57f1457f7e56a

@ -1 +1 @@
Subproject commit 2aa086f822aad5017a6f2061ef656f237a62d0ed
Subproject commit e8fe27eaa841ed32a4f82bef7a72c4e04e7f86a7

@ -1 +1 @@
Subproject commit 16841c95849d4d82f239bfb0c46bc217af368da2
Subproject commit cbba05dbaa58fdabe863f4e8a122ca92809b52d6

@ -1 +1 @@
Subproject commit 2766a0c45a6dbcdcf26cd1209a73a13323854961
Subproject commit b38e9c8ebff08959a712a5663ba25e0624a3af00

@ -1 +1 @@
Subproject commit e1d8271af1f499b3d072c99ae717d2593f551645
Subproject commit b6149ba03253bbf79dce573d5b2a2a34511b5bd9

@ -1 +1 @@
Subproject commit 6f4375867083805513a70feb32a626df40039256
Subproject commit 7b8eff527f60ec58eff3242253bdc1f5f1fccbef

@ -1 +1 @@
Subproject commit acd8e365c652ea6113b70fbbb1339d42e496819d
Subproject commit a3fe59b3f1ded5c3461995134b66c6db182fa56f

@ -1 +1 @@
Subproject commit 98e5bd2a4c6c23f6bc3a70bdfe26d17c084c8ab6
Subproject commit f9f5dcb5b3808137c2086d9b7415e7e32bb91063

@ -1 +1 @@
Subproject commit 4b4d4242e6ebebe41bee0e3ba262cb453b02f88f
Subproject commit 6c8cb3e1c475424880eae968f812805fdbd95cea

@ -1 +1 @@
Subproject commit 42341843df09dd7867b8b4ce3059ebd3ebd67278
Subproject commit 81e8c48fea6171d49e66e371ae46437c7ee63a74

@ -1 +1 @@
Subproject commit c1cb44eb709e15ef16844c6a8648ed35017409e1
Subproject commit b3e5de0aa5fb386318709c81eb364e81c696af14

View file

@ -2,7 +2,7 @@ FROM alpine:latest
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230113
ENV DOCKERFILE_VERSION 20230823
RUN apk add --no-cache \
bash \

View file

@ -39,10 +39,19 @@ curl -sS -G --stderr - --fail --insecure -X POST \
-o "/zeek/benchmark-${TIMESTAMP}.log" \
-H "Zeek-HMAC: ${HMAC_DIGEST}" \
-H "Zeek-HMAC-Timestamp: ${TIMESTAMP}" \
--data-urlencode branch=${CIRRUS_BRANCH} \
--data-urlencode build=${BUILD_URL} \
--data-urlencode build_hash=${BUILD_HASH} \
--data-urlencode commit=${CIRRUS_CHANGE_IN_REPO} \
--data-urlencode "branch=${CIRRUS_BRANCH}" \
--data-urlencode "build=${BUILD_URL}" \
--data-urlencode "build_hash=${BUILD_HASH}" \
--data-urlencode "commit=${CIRRUS_CHANGE_IN_REPO}" \
--data-urlencode "cirrus_repo_owner=${CIRRUS_REPO_OWNER}" \
--data-urlencode "cirrus_repo_name=${CIRRUS_REPO_NAME}" \
--data-urlencode "cirrus_task_id=${CIRRUS_TASK_ID}" \
--data-urlencode "cirrus_task_name=${CIRRUS_TASK_NAME}" \
--data-urlencode "cirrus_build_id=${CIRRUS_BUILD_ID}" \
--data-urlencode "cirrus_pr=${CIRRUS_PR}" \
--data-urlencode "cirrus_pr_labels=${CIRRUS_PR_LABELS}" \
--data-urlencode "github_check_suite_id=${GITHUB_CHECK_SUITE_ID}" \
--data-urlencode "repo_version=$(cat ./VERSION)" \
"${TARGET}"
STATUS=$?

View file

@ -2,7 +2,7 @@ FROM centos:7
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230312
ENV DOCKERFILE_VERSION 20230807
ENV FLEX_VERSION=2.6.4
ENV FLEX_DIR=/opt/flex
@ -39,9 +39,10 @@ RUN yum -y install \
make \
openssl \
openssl-devel \
python3 \
python3-devel \
python3-pip\
rh-python38 \
rh-python38-devel \
rh-python38-python-devel \
rh-python38-pip \
sqlite \
swig \
which \
@ -59,6 +60,7 @@ RUN pip3 install websockets junit2html
RUN echo 'unset BASH_ENV PROMPT_COMMAND ENV' > /usr/bin/zeek-ci-env && \
echo 'source /opt/rh/devtoolset-8/enable' >> /usr/bin/zeek-ci-env && \
echo 'source /opt/rh/rh-python38/enable' >> /usr/bin/zeek-ci-env && \
echo 'export PATH=${PATH}:${FLEX_DIR}/bin' >> /usr/bin/zeek-ci-env
ENV BASH_ENV="/usr/bin/zeek-ci-env" \

View file

@ -2,7 +2,7 @@ FROM quay.io/centos/centos:stream8
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230320
ENV DOCKERFILE_VERSION 20230801
RUN dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
RUN dnf config-manager --set-enabled powertools
@ -21,9 +21,9 @@ RUN dnf -y install \
openssl \
openssl-devel \
procps-ng \
python3 \
python3-devel \
python3-pip\
python38 \
python38-devel \
python38-pip\
sqlite \
swig \
which \

View file

@ -2,7 +2,7 @@ FROM quay.io/centos/centos:stream9
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20220519
ENV DOCKERFILE_VERSION 20230801
# dnf config-manager isn't available at first, and
# we need it to install the CRB repo below.

View file

@ -170,7 +170,7 @@ def main():
zeek_dir = pathlib.Path(args.dir).absolute()
if not (zeek_dir / "zeek-config.h.in").exists():
if not (zeek_dir / "cmake_templates" / "zeek-config.h.in").exists():
logger.error("%s missing zeek-config.h.in", zeek_dir)
return 1

View file

@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20220519
ENV DOCKERFILE_VERSION 20230801
ENV CMAKE_DIR "/opt/cmake"
ENV CMAKE_VERSION "3.19.1"

View file

@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20220519
ENV DOCKERFILE_VERSION 20230801
RUN apt-get update && apt-get -y install \
bison \

View file

@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230413
ENV DOCKERFILE_VERSION 20230801
RUN apt-get update && apt-get -y install \
bison \

View file

@ -2,7 +2,7 @@ FROM fedora:37
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230413
ENV DOCKERFILE_VERSION 20230801
RUN dnf -y install \
bison \

View file

@ -2,7 +2,7 @@ FROM fedora:38
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230428
ENV DOCKERFILE_VERSION 20230801
RUN dnf -y install \
bison \

View file

@ -7,7 +7,7 @@ set -x
brew update
brew upgrade cmake
brew install openssl@3 swig bison flex ccache
brew install openssl@3 swig bison flex ccache libmaxminddb
python3 -m pip install --user websockets
# Brew doesn't create the /opt/homebrew/opt/openssl symlink if you install

View file

@ -2,7 +2,7 @@ FROM opensuse/leap:15.4
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20220615
ENV DOCKERFILE_VERSION 20230801
RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.4:Update/standard/openSUSE:Leap:15.4:Update.repo \
&& zypper refresh \
@ -19,15 +19,19 @@ RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.4
libopenssl-devel \
libpcap-devel \
make \
python3 \
python3-devel \
python3-pip \
python39 \
python39-devel \
python39-pip \
swig \
tar \
which \
zlib-devel \
&& rm -rf /var/cache/zypp
RUN update-alternatives --install /usr/bin/pip3 pip3 /usr/bin/pip3.9 100
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 100
RUN update-alternatives --install /usr/bin/python3-config python3-config /usr/bin/python3.9-config 100
RUN pip3 install websockets junit2html
RUN update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-10 100

View file

@ -0,0 +1,38 @@
FROM opensuse/leap:15.5
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230905
RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.5:Update/standard/openSUSE:Leap:15.5:Update.repo \
&& zypper refresh \
&& zypper in -y \
bison \
ccache \
cmake \
curl \
flex \
gcc12 \
gcc12-c++ \
git \
gzip \
libopenssl-devel \
libpcap-devel \
make \
python311 \
python311-devel \
python311-pip \
swig \
tar \
which \
zlib-devel \
&& rm -rf /var/cache/zypp
RUN update-alternatives --install /usr/bin/pip3 pip3 /usr/bin/pip3.11 100
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 100
RUN update-alternatives --install /usr/bin/python3-config python3-config /usr/bin/python3.11-config 100
RUN pip3 install websockets junit2html
RUN update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-12 100
RUN update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-12 100

View file

@ -2,7 +2,7 @@ FROM opensuse/tumbleweed
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230620
ENV DOCKERFILE_VERSION 20230801
# Remove the repo-openh264 repository, it caused intermittent issues
# and we should not be needing any packages from it.

8
ci/pre-build.sh Executable file
View file

@ -0,0 +1,8 @@
#/usr/bin/env bash
#
# If ZEEK_CI_PREBUILD_COMMAND is not empty, execute it via bash -c. That's it.
set -ex
if [ -n "$ZEEK_CI_PREBUILD_COMMAND" ]; then
bash -c "$ZEEK_CI_PREBUILD_COMMAND"
fi

31
ci/spicy-install-analyzers.sh Executable file
View file

@ -0,0 +1,31 @@
#! /usr/bin/env bash
#
# Shell script to install the latest version of certain
# Spicy analyzers using zkg *and* repackages build.tgz.
# This script should run after build.sh, but before the
# artifact upload happens.
set -eux
test -d ${CIRRUS_WORKING_DIR}/install
# Install prefix
PREFIX=${CIRRUS_WORKING_DIR}/install
export PATH=$PREFIX/bin:$PATH
zkg --version
ANALYZERS="
https://github.com/zeek/spicy-dhcp
https://github.com/zeek/spicy-dns
https://github.com/zeek/spicy-http
"
for analyzer in $ANALYZERS; do
echo Y | zkg -vvvvv install "${analyzer}"
done
# After installing analyzers, package up build.tgz (representing
# the contents of the installation directory). This overwrites any
# existing artifact created by build.sh
tar -czf ${CIRRUS_WORKING_DIR}/build.tgz ${CIRRUS_WORKING_DIR}/install

View file

@ -17,6 +17,7 @@ race:std::ctype<char>::narrow
race:broker::internal::connector::run_impl
race:caf::net::multiplexer::set_thread_id
race:caf::action::run
mutex:caf::detail::ringbuffer<std::unique_ptr<caf::detail::thread_safe_actor_clock::schedule_entry, std::default_delete<caf::detail::thread_safe_actor_clock::schedule_entry> >, 64ul>::push_back
# This one causes supervisor.config-bare-mode to fail occasionally but not always
signal:caf::actor_control_block::enqueue
@ -32,3 +33,12 @@ race:sqlite3_initialize
# This one isn't actually in sqlite code, but some StringVal object gets ref'd by
# zeek::id::find_const and throws a data race.
race:zeek::logging::writer::detail::SQLite::DoInit
# These findings were suppressed after the CI build was upgraded to Ubuntu 22.04.
# They weren't reported by prior compiler versions.
race:zeek::threading::MsgThread::RetrieveIn
race:zeek::threading::MsgThread::Run
race:zeek::threading::InputMessage<zeek::threading::MsgThread>::Object
mutex:zeek::threading::Queue<zeek::threading::BasicInputMessage*>::Put
mutex:zeek::threading::Queue<zeek::threading::BasicInputMessage*>::LocksForAllQueues
deadlock:zeek::threading::Queue<zeek::threading::BasicInputMessage*>::LocksForAllQueues

View file

@ -1,48 +0,0 @@
FROM ubuntu:18.04
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20220519
ENV CMAKE_DIR "/opt/cmake"
ENV CMAKE_VERSION "3.19.1"
ENV PATH "${CMAKE_DIR}/bin:${PATH}"
RUN apt-get update && apt-get -y install \
bc \
bison \
bsdmainutils \
ccache \
clang-10 \
curl \
flex \
git \
lcov \
libkrb5-dev \
libmaxminddb-dev \
libpcap-dev \
libssl-dev \
make \
python3 \
python3-dev \
python3-pip\
ruby \
sqlite3 \
swig \
unzip \
wget \
zlib1g-dev \
&& apt-get autoclean \
&& rm -rf /var/lib/apt/lists/*
# Recent CMake.
RUN mkdir -p "${CMAKE_DIR}" \
&& curl -sSL "https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz" | tar xzf - -C "${CMAKE_DIR}" --strip-components 1
RUN pip3 install websockets junit2html
RUN gem install coveralls-lcov
RUN update-alternatives --install /usr/bin/cc cc /usr/bin/clang-10 100
RUN update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++-10 100

View file

@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20220519
ENV DOCKERFILE_VERSION 20230801
RUN apt-get update && apt-get -y install \
bc \

View file

@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20220614
ENV DOCKERFILE_VERSION 20230801
RUN apt-get update && apt-get -y install \
bc \
@ -25,7 +25,9 @@ RUN apt-get update && apt-get -y install \
make \
python3 \
python3-dev \
python3-git \
python3-pip\
python3-semantic-version \
ruby \
sqlite3 \
swig \

View file

@ -1,10 +1,10 @@
FROM ubuntu:22.10
FROM ubuntu:23.04
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230413
ENV DOCKERFILE_VERSION 20230828
RUN apt-get update && apt-get -y install \
bc \
@ -17,17 +17,15 @@ RUN apt-get update && apt-get -y install \
g++ \
gcc \
git \
lcov \
libkrb5-dev \
libmaxminddb-dev \
libnode-dev \
libpcap-dev \
libssl-dev \
libuv1-dev \
make \
python3 \
python3-dev \
python3-pip \
python3-websockets \
ruby \
sqlite3 \
swig \
@ -37,5 +35,4 @@ RUN apt-get update && apt-get -y install \
&& apt autoclean \
&& rm -rf /var/lib/apt/lists/*
RUN pip3 install websockets junit2html
RUN gem install coveralls-lcov
RUN pip3 install --break-system-packages junit2html

View file

@ -13,7 +13,6 @@ source_dir="$(cd $dir/.. && pwd)"
build_dir=$source_dir/build
conf_file=$build_dir/zeekygen-test.conf
output_dir=$source_dir/doc
zeek_error_file=$build_dir/zeekygen-test-stderr.txt
if [ -n "$1" ]; then
output_dir=$1
@ -29,11 +28,10 @@ cd $build_dir
export ZEEK_SEED_FILE=$source_dir/testing/btest/random.seed
function run_zeek {
ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen >/dev/null 2>$zeek_error_file
ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen >/dev/null
if [ $? -ne 0 ]; then
echo "Failed running zeek with zeekygen config file $conf_file"
echo "See stderr in $zeek_error_file"
echo "Failed running zeek with zeekygen config file $conf_file" >&2
exit 1
fi
}

View file

@ -3,6 +3,10 @@ FROM mcr.microsoft.com/dotnet/framework/sdk:4.8-windowsservercore-ltsc2019
SHELL [ "powershell" ]
# A version field to invalidatea Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20230801
RUN Set-ExecutionPolicy Unrestricted -Force
# Install Chocolatey

2
cmake

@ -1 +1 @@
Subproject commit afa62ecbe399c3dac41f6ebcdb622f409569edd6
Subproject commit 98799bb51aabb282e7dd6372aea7dbcf909469ac

View file

@ -80,9 +80,6 @@
/* Define if you have the <sys/ethernet.h> header file. */
#cmakedefine HAVE_SYS_ETHERNET_H
/* Include krb5.h */
#cmakedefine NEED_KRB5_H
/* Compatibility for Darwin */
#cmakedefine NEED_NAMESER_COMPAT_H

View file

@ -14,7 +14,7 @@ prefix="@CMAKE_INSTALL_PREFIX@"
python_dir="@PY_MOD_INSTALL_DIR@"
script_dir="@ZEEK_SCRIPT_INSTALL_PATH@"
site_dir="@ZEEK_SCRIPT_INSTALL_PATH@/site"
version="@VERSION@"
version="@ZEEK_VERSION_FULL@"
zeek_dist="@ZEEK_DIST@"
zeekpath="@DEFAULT_ZEEKPATH@"

View file

@ -0,0 +1,14 @@
#!/bin/sh
# After configured by CMake, this file prints the absolute path to Zeek scripts
# that come with the source distributions of Zeek as well as scripts that are
# generated by the BIF compiler at compile time.
#
# The intended use of this script is to make it easier to run Zeek from
# the build directory, avoiding the need to install it. This could be
# done like:
#
# ZEEKPATH=`./zeek-path-dev` ./src/zeek
#
# This file should be kept in sync with the ZEEKPATH line in zeek-path-dev.bat.in.
echo .:${cmake_source_dir}/scripts:${cmake_source_dir}/scripts/policy:${cmake_source_dir}/scripts/site:${cmake_binary_dir}/scripts:${cmake_binary_dir}/scripts/builtin-plugins

View file

@ -3,7 +3,7 @@
#pragma once
/* Version number of package */
#define VERSION "@VERSION@"
#define VERSION "@ZEEK_VERSION_FULL@"
// Zeek version number.
// This is the result of (major * 10000 + minor * 100 + patch)

2
configure vendored
View file

@ -365,7 +365,7 @@ while [ $# -ne 0 ]; do
append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
;;
--with-python=*)
append_cache_entry PYTHON_EXECUTABLE PATH $optarg
append_cache_entry Python_EXECUTABLE PATH $optarg
;;
--with-python-inc=*)
append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg

2
doc

@ -1 +1 @@
Subproject commit e479f28d2263ae3c452567a52ef613f144191f08
Subproject commit 22fe25d980131abdfadb4bdb9390aee347e77023

View file

@ -3,8 +3,17 @@
# Layer to build Zeek.
FROM debian:bookworm-slim
# Make the shell split commands in the log so we can determine reasons for
# failures more easily.
SHELL ["/bin/sh", "-x", "-c"]
# Allow apt to retry 3 times before failing.
RUN echo 'Acquire::Retries "3";' > /etc/apt/apt.conf.d/80-retries
# Force apt to timeout retrieval of data after 3 minutes.
RUN echo 'Acquire::http::timeout "180";' > /etc/apt/apt.conf.d/99-timeouts
RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
# Configure system for build.
RUN apt-get -q update \
&& apt-get install -q -y --no-install-recommends \

View file

@ -3,8 +3,17 @@
# Final layer containing all artifacts.
FROM debian:bookworm-slim
# Make the shell split commands in the log so we can determine reasons for
# failures more easily.
SHELL ["/bin/sh", "-x", "-c"]
# Allow apt to retry 3 times before failing.
RUN echo 'Acquire::Retries "3";' > /etc/apt/apt.conf.d/80-retries
# Force apt to timeout retrieval of data after 3 minutes.
RUN echo 'Acquire::http::timeout "180";' > /etc/apt/apt.conf.d/99-timeouts
RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
RUN apt-get -q update \
&& apt-get install -q -y --no-install-recommends \
ca-certificates \

View file

@ -11,6 +11,15 @@ export {
## number of bytes). A value of zero means unlimited.
option default_limit = 0;
## This setting configures if the file extract limit is inclusive
## of missing bytes. By default, missing bytes do count towards the
## limit.
## Setting this option to false changes this behavior so that missing
## bytes no longer count towards these limits. Files with
## missing bytes are created as sparse files on disk. Their apparent size
## can exceed this file size limit.
option default_limit_includes_missing = T;
redef record Files::Info += {
## Local filename of extracted file.
extracted: string &optional &log;
@ -37,6 +46,14 @@ export {
## :zeek:see:`FileExtract::set_limit` is called to increase the
## limit. A value of zero means "no limit".
extract_limit: count &default=default_limit;
## By default, missing bytes in files count towards the extract file size.
## Missing bytes can, e.g., occur due to missed traffic, or offsets
## used when downloading files.
## Setting this option to false changes this behavior so that holes
## in files do no longer count towards these limits. Files with
## holes are created as sparse files on disk. Their apparent size
## can exceed this file size limit.
extract_limit_includes_missing: bool &default=default_limit_includes_missing;
};
## Sets the maximum allowed extracted file size.

View file

@ -81,10 +81,6 @@ export {
## reference to the actual connection will be deleted after
## applying the notice policy.
conn: connection &optional;
## A shorthand way of giving the uid and id to a notice. The
## reference to the actual connection will be deleted after
## applying the notice policy.
iconn: icmp_conn &optional;
## A file record if the notice is related to a file. The
## reference to the actual fa_file record will be deleted after
@ -108,7 +104,7 @@ export {
file_desc: string &log &optional;
## The transport protocol. Filled automatically when either
## *conn*, *iconn* or *p* is specified.
## *conn* or *p* is specified.
proto: transport_proto &log &optional;
## The :zeek:type:`Notice::Type` of the notice.
@ -668,15 +664,6 @@ function apply_policy(n: Notice::Info)
if ( n?$p )
n$proto = get_port_transport_proto(n$p);
if ( n?$iconn )
{
n$proto = icmp;
if ( ! n?$src )
n$src = n$iconn$orig_h;
if ( ! n?$dst )
n$dst = n$iconn$resp_h;
}
if ( ! n?$email_body_sections )
n$email_body_sections = vector();
if ( ! n?$email_delay_tokens )

View file

@ -144,7 +144,9 @@ export {
["FTP_password_too_long"] = ACTION_LOG_PER_CONN,
["HTTP_bad_chunk_size"] = ACTION_LOG,
["HTTP_chunked_transfer_for_multipart_message"] = ACTION_LOG,
["HTTP_excessive_pipelining"] = ACTION_LOG,
["HTTP_overlapping_messages"] = ACTION_LOG,
["HTTP_response_before_request"] = ACTION_LOG,
["unknown_HTTP_method"] = ACTION_LOG,
["HTTP_version_mismatch"] = ACTION_LOG,
["ident_request_addendum"] = ACTION_LOG,
@ -211,6 +213,7 @@ export {
["spontaneous_RST"] = ACTION_IGNORE,
["SMB_parsing_error"] = ACTION_LOG,
["SMB_discarded_messages_state"] = ACTION_LOG,
["SMB_discarded_dce_rpc_analyzers"] = ACTION_LOG,
["no_smb_session_using_parsesambamsg"] = ACTION_LOG,
["smb_andx_command_failed_to_parse"] = ACTION_LOG,
["smb_tree_connect_andx_response_without_tree"] = ACTION_LOG_PER_CONN,

View file

@ -101,8 +101,8 @@ export {
host_count: count &log &optional;
};
## Actions for a signature.
const actions: table[string] of Action = {
## Actions for a signature. Can be updated dynamically.
global actions: table[string] of Action = {
["unspecified"] = SIG_IGNORE, # place-holder
} &redef &default = SIG_ALARM;

View file

@ -1,5 +1,3 @@
@load base/misc/version
# doc-common-start
module Spicy;

View file

@ -131,6 +131,13 @@ type files_tag_set: set[Files::Tag];
## directly and then remove this alias.
type interval_set: set[interval];
## Function mapping a string to a string.
##
## .. todo:: We need this type definition only for declaring builtin functions
## via ``bifcl``. We should extend ``bifcl`` to understand composite types
## directly and then remove this alias.
type string_mapper: function(s: string): string;
## A structure indicating a MIME type and strength of a match against
## file magic signatures.
##
@ -224,21 +231,6 @@ type flow_id : record {
dst_p: port; ##< The destination port number.
} &log;
## Specifics about an ICMP conversation. ICMP events typically pass this in
## addition to :zeek:type:`conn_id`.
##
## .. zeek:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent
## icmp_time_exceeded icmp_unreachable
type icmp_conn: record {
orig_h: addr; ##< The originator's IP address.
resp_h: addr; ##< The responder's IP address.
itype: count; ##< The ICMP type of the packet that triggered the instantiation of the record.
icode: count; ##< The ICMP code of the packet that triggered the instantiation of the record.
len: count; ##< The length of the ICMP payload of the packet that triggered the instantiation of the record.
hlim: count; ##< The encapsulating IP header's Hop Limit value.
v6: bool; ##< True if it's an ICMPv6 packet.
};
## Specifics about an ICMP conversation/packet.
## ICMP events typically pass this in addition to :zeek:type:`conn_id`.
##
@ -1115,6 +1107,11 @@ type geo_autonomous_system: record {
## The directory containing MaxMind DB (.mmdb) files to use for GeoIP support.
const mmdb_dir: string = "" &redef;
## Sets the interval for MaxMind DB file staleness checks. When Zeek detects a
## change in inode or modification time, the database is re-opened. Setting
## a negative interval disables staleness checks.
const mmdb_stale_check_interval: interval = 5min &redef;
## Computed entropy values. The record captures a number of measures that are
## computed in parallel. See `A Pseudorandom Number Sequence Test Program
## <http://www.fourmilab.ch/random>`_ for more information, Zeek uses the same
@ -1129,6 +1126,12 @@ type entropy_test_result: record {
serial_correlation: double; ##< Serial correlation coefficient.
};
## The default JSON key mapper function. Identity function.
function from_json_default_key_mapper(s: string): string
{
return s;
}
## Return type for from_json BIF.
##
## .. zeek:see:: from_json
@ -3041,6 +3044,12 @@ export {
##
## .. zeek:see:: smb2_discarded_messages_state
const SMB::max_pending_messages = 1000 &redef;
## Maximum number of DCE-RPC analyzers per connection
## before discarding them to avoid unbounded state growth.
##
## .. zeek:see:: smb_discarded_dce_rpc_analyzers
const max_dce_rpc_analyzers = 1000 &redef;
}
module SMB1;
@ -4432,8 +4441,39 @@ type ModbusHeaders: record {
uid: count;
## MODBUS function code
function_code: count;
## Length of the application PDU following the header plus
## one byte for the uid field.
len: count;
};
type ModbusFileRecordRequest: record {
ref_type: count;
file_num: count;
record_num: count;
record_len: count;
};
type ModbusFileRecordRequests: vector of ModbusFileRecordRequest;
type ModbusFileRecordResponse: record {
file_len: count;
ref_type: count;
record_data: string;
};
type ModbusFileRecordResponses: vector of ModbusFileRecordResponse;
type ModbusFileReference: record {
ref_type: count;
file_num: count;
record_num: count;
record_len: count;
record_data: string;
};
type ModbusFileReferences: vector of ModbusFileReference;
module SSL;
export {
type SignatureAndHashAlgorithm: record {
@ -4455,6 +4495,11 @@ const SSL::dtls_max_version_errors = 10 &redef;
## Maximum number of invalid version errors to report in one DTLS connection.
const SSL::dtls_max_reported_version_errors = 1 &redef;
## Maximum number of Alert messages parsed from an SSL record with
## content_type alert (21). The remaining alerts are discarded. For
## TLS 1.3 connections, this is implicitly 1 as defined by RFC 8446.
const SSL::max_alerts_per_record = 10 &redef;
}
module GLOBAL;
@ -5251,6 +5296,11 @@ export {
## interfaces.
const bufsize = 128 &redef;
## Number of bytes to use for buffering file read operations when reading
## from a PCAP file. Setting this to 0 uses operating system defaults
## as chosen by fopen().
const bufsize_offline_bytes = 128 * 1024 &redef;
## Default timeout for packet sources without file descriptors.
##
## For libpcap based packet sources that do not provide a usable

View file

@ -59,12 +59,14 @@
@load base/protocols/imap
@load base/protocols/irc
@load base/protocols/krb
@load base/protocols/ldap
@load base/protocols/modbus
@load base/protocols/mqtt
@load base/protocols/mysql
@load base/protocols/ntlm
@load base/protocols/ntp
@load base/protocols/pop3
@load base/protocols/quic
@load base/protocols/radius
@load base/protocols/rdp
@load base/protocols/rfb

View file

@ -11,6 +11,7 @@
@load base/packet-protocols/linux_sll2
@load base/packet-protocols/nflog
@load base/packet-protocols/null
@load base/packet-protocols/ppp
@load base/packet-protocols/ppp_serial
@load base/packet-protocols/pppoe
@load base/packet-protocols/vlan

View file

@ -2,4 +2,10 @@ module PacketAnalyzer::GRE;
export {
const default_analyzer: PacketAnalyzer::Tag = PacketAnalyzer::ANALYZER_IPTUNNEL &redef;
const gre_ports = { 4754/udp } &redef;
}
event zeek_init() &priority=20
{
PacketAnalyzer::register_for_ports(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_GRE, gre_ports);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,12 @@
module PacketAnalyzer::PPP;
const DLT_PPP: count = 9;
event zeek_init() &priority=20
{
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_ROOT, DLT_PPP, PacketAnalyzer::ANALYZER_PPP);
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_PPP, 0x0281, PacketAnalyzer::ANALYZER_MPLS);
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_PPP, 0x0021, PacketAnalyzer::ANALYZER_IP);
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_PPP, 0x0057, PacketAnalyzer::ANALYZER_IP);
}

View file

@ -1,5 +1 @@
module PacketAnalyzer::UDP;
#event zeek_init() &priority=20
# {
# }

View file

@ -88,8 +88,6 @@ function set_state(c: connection, state_x: BackingState)
c$dce_rpc$endpoint = uuid_endpoint_map[c$dce_rpc_state$uuid];
if ( c$dce_rpc_state?$named_pipe )
c$dce_rpc$named_pipe = c$dce_rpc_state$named_pipe;
Conn::register_removal_hook(c, finalize_dce_rpc);
}
function set_session(c: connection, fid: count)
@ -97,7 +95,9 @@ function set_session(c: connection, fid: count)
if ( ! c?$dce_rpc_backing )
{
c$dce_rpc_backing = table();
Conn::register_removal_hook(c, finalize_dce_rpc);
}
if ( fid !in c$dce_rpc_backing )
{
local info = Info($ts=network_time(),$id=c$id,$uid=c$uid);
@ -216,6 +216,23 @@ event dce_rpc_response(c: connection, fid: count, ctx_id: count, opnum: count, s
}
}
event smb_discarded_dce_rpc_analyzers(c: connection)
{
# This event is raised when the DCE-RPC analyzers table
# grew too large. Assume things are broken and wipe
# the backing table.
delete c$dce_rpc_backing;
Reporter::conn_weird("SMB_discarded_dce_rpc_analyzers", c, "", "SMB");
}
# If a fid representing a pipe was closed, remove it from dce_rpc_backing.
event smb2_close_request(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID) &priority=-5
{
local fid = file_id$persistent + file_id$volatile;
if ( c?$dce_rpc_backing )
delete c$dce_rpc_backing[fid];
}
hook finalize_dce_rpc(c: connection)
{
if ( ! c?$dce_rpc )

View file

@ -204,11 +204,16 @@ event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, ms
log_info$msg_types += DHCP::message_types[msg$m_type];
# The is_orig flag is T for "connections" initiated by servers
# to broadcast addresses, otherwise is_orig indicates that this
# is a DHCP client.
local is_client = is_orig && (id$orig_h == 0.0.0.0 || id$orig_p == 68/udp || id$resp_p == 67/udp);
# Let's watch for messages in any DHCP message type
# and split them out based on client and server.
if ( options?$message )
{
if ( is_orig )
if ( is_client )
log_info$client_message = options$message;
else
log_info$server_message = options$message;
@ -218,7 +223,7 @@ event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, ms
# expiration handling.
log_info$last_message_ts = ts;
if ( is_orig ) # client requests
if ( is_client ) # client requests
{
# Assign the client addr in case this is a session
# of only INFORM messages (no lease handed out).
@ -246,13 +251,28 @@ event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, ms
{
# Only log the address of the server if it handed out
# an IP address.
if ( msg$yiaddr != 0.0.0.0 &&
id$resp_h != 255.255.255.255 )
if ( msg$yiaddr != 0.0.0.0 )
{
if ( is_orig )
{
# This is a server message and is_orig is T.
# This means it's a DHCP server broadcasting
# and the server is the originator.
log_info$server_addr = id$orig_h;
log_info$server_port = id$orig_p;
log_info$client_port = id$resp_p;
}
else
{
# When a server sends to a non-broadcast
# address, Zeek's connection flipping is
# in effect and the server is the responder
# instead.
log_info$server_addr = id$resp_h;
log_info$server_port = id$resp_p;
log_info$client_port = id$orig_p;
}
}
# Only use the client hardware address from the server
# if we didn't already pick one up from the client.

View file

@ -64,6 +64,9 @@ export {
## to are tracked here.
pending_commands: PendingCmds;
## Sequence number of previous command.
command_seq: count &default=0;
## Indicates if the session is in active or passive mode.
passive: bool &default=F;

View file

@ -165,7 +165,7 @@ function set_ftp_session(c: connection)
Conn::register_removal_hook(c, finalize_ftp);
# Add a shim command so the server can respond with some init response.
add_pending_cmd(c$ftp$pending_commands, "<init>", "");
add_pending_cmd(c$ftp$pending_commands, ++c$ftp$command_seq, "<init>", "");
}
}
@ -261,7 +261,7 @@ event ftp_request(c: connection, command: string, arg: string) &priority=5
# attackers.
if ( c?$ftp && c$ftp?$cmdarg && c$ftp?$reply_code )
{
remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg);
if ( remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg) )
ftp_message(c);
}
@ -270,7 +270,7 @@ event ftp_request(c: connection, command: string, arg: string) &priority=5
# Queue up the new command and argument
if ( |c$ftp$pending_commands| < max_pending_commands )
add_pending_cmd(c$ftp$pending_commands, command, arg);
add_pending_cmd(c$ftp$pending_commands, ++c$ftp$command_seq, command, arg);
else
Reporter::conn_weird("FTP_too_many_pending_commands", c,
cat(|c$ftp$pending_commands|), "FTP");

View file

@ -78,9 +78,9 @@ export {
};
}
function add_pending_cmd(pc: PendingCmds, cmd: string, arg: string): CmdArg
function add_pending_cmd(pc: PendingCmds, seq: count, cmd: string, arg: string): CmdArg
{
local ca = [$cmd = cmd, $arg = arg, $seq=|pc|+1, $ts=network_time()];
local ca = [$cmd = cmd, $arg = arg, $seq=seq, $ts=network_time()];
pc[ca$seq] = ca;
return ca;

View file

@ -103,7 +103,7 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
c$http$current_entity$filename = extract_filename_from_content_disposition(value);
}
else if ( name == "CONTENT-TYPE" &&
/[nN][aA][mM][eE][:blank:]*=/ in value )
/[nN][aA][mM][eE][[:blank:]]*=/ in value )
{
c$http$current_entity$filename = extract_filename_from_content_disposition(value);
}

View file

@ -133,6 +133,12 @@ export {
## HTTP finalization hook. Remaining HTTP info may get logged when it's called.
global finalize_http: Conn::RemovalHook;
## Only allow that many pending requests on a single connection.
## If this number is exceeded, all pending requests are flushed
## out and request/response tracking reset to prevent unbounded
## state growth.
option max_pending_requests = 100;
}
# Add the http state tracking fields to the connection record.
@ -205,6 +211,47 @@ event http_request(c: connection, method: string, original_URI: string,
Conn::register_removal_hook(c, finalize_http);
}
# Request/response tracking exists to account for HTTP pipelining.
# It fails if more responses have been seen than requests. If that
# happens, just fast-forward current_request such that the next
# response matches the in-flight request.
if ( c$http_state$current_request < c$http_state$current_response )
{
Reporter::conn_weird("HTTP_response_before_request", c);
c$http_state$current_request = c$http_state$current_response;
}
# Too many requests are pending for which we have not yet observed a
# reply. This might be due to excessive HTTP pipelining, one-sided
# traffic capture, or the responder side of the HTTP analyzer having
# been disabled. In any case, we simply log out all pending requests
# to make room for a new one. Any matching of pipelined requests and
# responses is most likely totally off anyhow.
if ( max_pending_requests > 0 && |c$http_state$pending| > max_pending_requests )
{
Reporter::conn_weird("HTTP_excessive_pipelining", c);
if ( c$http_state$current_response == 0 )
++c$http_state$current_response;
while ( c$http_state$current_response < c$http_state$current_request )
{
local cr = c$http_state$current_response;
if ( cr in c$http_state$pending )
{
Log::write(HTTP::LOG, c$http_state$pending[cr]);
delete c$http_state$pending[cr];
}
else
{
# The above should have been true...
# Reporter::error(fmt("Expected pending request at %d", cr));
}
++c$http_state$current_response;
}
}
++c$http_state$current_request;
set_state(c, T);
@ -290,7 +337,7 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
{
if ( /^[bB][aA][sS][iI][cC] / in value )
{
local userpass = decode_base64_conn(c$id, sub(value, /[bB][aA][sS][iI][cC][[:blank:]]/, ""));
local userpass = decode_base64_conn(c$id, sub(value, /[bB][aA][sS][iI][cC][[:blank:]]+/, ""));
local up = split_string(userpass, /:/);
if ( |up| >= 2 )
{

View file

@ -0,0 +1,6 @@
@if ( have_spicy_analyzers() )
@load ./spicy-events.zeek
@load-sigs ./dpd.sig
@load ./consts
@load ./main.zeek
@endif

View file

@ -0,0 +1,123 @@
module LDAP;
export {
const PROTOCOL_OPCODES = { [ LDAP::ProtocolOpcode_BIND_REQUEST ] = "bind", [
LDAP::ProtocolOpcode_BIND_RESPONSE ] = "bind", [
LDAP::ProtocolOpcode_UNBIND_REQUEST ] = "unbind", [
LDAP::ProtocolOpcode_SEARCH_REQUEST ] = "search", [
LDAP::ProtocolOpcode_SEARCH_RESULT_ENTRY ] = "search", [
LDAP::ProtocolOpcode_SEARCH_RESULT_DONE ] = "search", [
LDAP::ProtocolOpcode_MODIFY_REQUEST ] = "modify", [
LDAP::ProtocolOpcode_MODIFY_RESPONSE ] = "modify", [
LDAP::ProtocolOpcode_ADD_REQUEST ] = "add", [
LDAP::ProtocolOpcode_ADD_RESPONSE ] = "add", [
LDAP::ProtocolOpcode_DEL_REQUEST ] = "delete", [
LDAP::ProtocolOpcode_DEL_RESPONSE ] = "delete", [
LDAP::ProtocolOpcode_MOD_DN_REQUEST ] = "modify", [
LDAP::ProtocolOpcode_MOD_DN_RESPONSE ] = "modify", [
LDAP::ProtocolOpcode_COMPARE_REQUEST ] = "compare", [
LDAP::ProtocolOpcode_COMPARE_RESPONSE ] = "compare", [
LDAP::ProtocolOpcode_ABANDON_REQUEST ] = "abandon", [
LDAP::ProtocolOpcode_SEARCH_RESULT_REFERENCE ] = "search", [
LDAP::ProtocolOpcode_EXTENDED_REQUEST ] = "extended", [
LDAP::ProtocolOpcode_EXTENDED_RESPONSE ] = "extended", [
LDAP::ProtocolOpcode_INTERMEDIATE_RESPONSE ] = "intermediate" }
&default="unknown";
const BIND_SIMPLE = "bind simple";
const BIND_SASL = "bind SASL";
const RESULT_CODES = { [ LDAP::ResultCode_SUCCESS ] = "success", [
LDAP::ResultCode_OPERATIONS_ERROR ] = "operations error", [
LDAP::ResultCode_PROTOCOL_ERROR ] = "protocol error", [
LDAP::ResultCode_TIME_LIMIT_EXCEEDED ] = "time limit exceeded", [
LDAP::ResultCode_SIZE_LIMIT_EXCEEDED ] = "size limit exceeded", [
LDAP::ResultCode_COMPARE_FALSE ] = "compare false", [
LDAP::ResultCode_COMPARE_TRUE ] = "compare true", [
LDAP::ResultCode_AUTH_METHOD_NOT_SUPPORTED ] =
"auth method not supported", [
LDAP::ResultCode_STRONGER_AUTH_REQUIRED ] =
"stronger auth required", [ LDAP::ResultCode_PARTIAL_RESULTS ] =
"partial results", [ LDAP::ResultCode_REFERRAL ] = "referral", [
LDAP::ResultCode_ADMIN_LIMIT_EXCEEDED ] = "admin limit exceeded", [
LDAP::ResultCode_UNAVAILABLE_CRITICAL_EXTENSION ] =
"unavailable critical extension", [
LDAP::ResultCode_CONFIDENTIALITY_REQUIRED ] =
"confidentiality required", [ LDAP::ResultCode_SASL_BIND_IN_PROGRESS ] =
"SASL bind in progress", [ LDAP::ResultCode_NO_SUCH_ATTRIBUTE ] =
"no such attribute", [ LDAP::ResultCode_UNDEFINED_ATTRIBUTE_TYPE ] =
"undefined attribute type", [
LDAP::ResultCode_INAPPROPRIATE_MATCHING ] =
"inappropriate matching", [ LDAP::ResultCode_CONSTRAINT_VIOLATION ] =
"constraint violation", [ LDAP::ResultCode_ATTRIBUTE_OR_VALUE_EXISTS ] =
"attribute or value exists", [
LDAP::ResultCode_INVALID_ATTRIBUTE_SYNTAX ] =
"invalid attribute syntax", [ LDAP::ResultCode_NO_SUCH_OBJECT ] =
"no such object", [ LDAP::ResultCode_ALIAS_PROBLEM ] =
"alias problem", [ LDAP::ResultCode_INVALID_DNSYNTAX ] =
"invalid DN syntax", [ LDAP::ResultCode_ALIAS_DEREFERENCING_PROBLEM ] =
"alias dereferencing problem", [
LDAP::ResultCode_INAPPROPRIATE_AUTHENTICATION ] =
"inappropriate authentication", [
LDAP::ResultCode_INVALID_CREDENTIALS ] = "invalid credentials", [
LDAP::ResultCode_INSUFFICIENT_ACCESS_RIGHTS ] =
"insufficient access rights", [ LDAP::ResultCode_BUSY ] = "busy", [
LDAP::ResultCode_UNAVAILABLE ] = "unavailable", [
LDAP::ResultCode_UNWILLING_TO_PERFORM ] = "unwilling to perform", [
LDAP::ResultCode_LOOP_DETECT ] = "loop detect", [
LDAP::ResultCode_SORT_CONTROL_MISSING ] = "sort control missing", [
LDAP::ResultCode_OFFSET_RANGE_ERROR ] = "offset range error", [
LDAP::ResultCode_NAMING_VIOLATION ] = "naming violation", [
LDAP::ResultCode_OBJECT_CLASS_VIOLATION ] =
"object class violation", [ LDAP::ResultCode_NOT_ALLOWED_ON_NON_LEAF ] =
"not allowed on non-leaf", [ LDAP::ResultCode_NOT_ALLOWED_ON_RDN ] =
"not allowed on RDN", [ LDAP::ResultCode_ENTRY_ALREADY_EXISTS ] =
"entry already exists", [
LDAP::ResultCode_OBJECT_CLASS_MODS_PROHIBITED ] =
"object class mods prohibited", [ LDAP::ResultCode_RESULTS_TOO_LARGE ] =
"results too large", [ LDAP::ResultCode_AFFECTS_MULTIPLE_DSAS ] =
"affects multiple DSAs", [ LDAP::ResultCode_CONTROL_ERROR ] =
"control error", [ LDAP::ResultCode_OTHER ] = "other", [
LDAP::ResultCode_SERVER_DOWN ] = "server down", [
LDAP::ResultCode_LOCAL_ERROR ] = "local error", [
LDAP::ResultCode_ENCODING_ERROR ] = "encoding error", [
LDAP::ResultCode_DECODING_ERROR ] = "decoding error", [
LDAP::ResultCode_TIMEOUT ] = "timeout", [
LDAP::ResultCode_AUTH_UNKNOWN ] = "auth unknown", [
LDAP::ResultCode_FILTER_ERROR ] = "filter error", [
LDAP::ResultCode_USER_CANCELED ] = "user canceled", [
LDAP::ResultCode_PARAM_ERROR ] = "param error", [
LDAP::ResultCode_NO_MEMORY ] = "no memory", [
LDAP::ResultCode_CONNECT_ERROR ] = "connect error", [
LDAP::ResultCode_NOT_SUPPORTED ] = "not supported", [
LDAP::ResultCode_CONTROL_NOT_FOUND ] = "control not found", [
LDAP::ResultCode_NO_RESULTS_RETURNED ] = "no results returned", [
LDAP::ResultCode_MORE_RESULTS_TO_RETURN ] =
"more results to return", [ LDAP::ResultCode_CLIENT_LOOP ] =
"client loop", [ LDAP::ResultCode_REFERRAL_LIMIT_EXCEEDED ] =
"referral limit exceeded", [ LDAP::ResultCode_INVALID_RESPONSE ] =
"invalid response", [ LDAP::ResultCode_AMBIGUOUS_RESPONSE ] =
"ambiguous response", [ LDAP::ResultCode_TLS_NOT_SUPPORTED ] =
"TLS not supported", [ LDAP::ResultCode_INTERMEDIATE_RESPONSE ] =
"intermediate response", [ LDAP::ResultCode_UNKNOWN_TYPE ] =
"unknown type", [ LDAP::ResultCode_LCUP_INVALID_DATA ] =
"LCUP invalid data", [ LDAP::ResultCode_LCUP_UNSUPPORTED_SCHEME ] =
"LCUP unsupported scheme", [ LDAP::ResultCode_LCUP_RELOAD_REQUIRED ] =
"LCUP reload required", [ LDAP::ResultCode_CANCELED ] =
"canceled", [ LDAP::ResultCode_NO_SUCH_OPERATION ] =
"no such operation", [ LDAP::ResultCode_TOO_LATE ] = "too late", [
LDAP::ResultCode_CANNOT_CANCEL ] = "cannot cancel", [
LDAP::ResultCode_ASSERTION_FAILED ] = "assertion failed", [
LDAP::ResultCode_AUTHORIZATION_DENIED ] = "authorization denied" }
&default="unknown";
const SEARCH_SCOPES = { [ LDAP::SearchScope_SEARCH_BASE ] = "base", [
LDAP::SearchScope_SEARCH_SINGLE ] = "single", [
LDAP::SearchScope_SEARCH_TREE ] = "tree", } &default="unknown";
const SEARCH_DEREF_ALIASES = { [ LDAP::SearchDerefAlias_DEREF_NEVER ] =
"never", [ LDAP::SearchDerefAlias_DEREF_IN_SEARCHING ] =
"searching", [ LDAP::SearchDerefAlias_DEREF_FINDING_BASE ] =
"finding", [ LDAP::SearchDerefAlias_DEREF_ALWAYS ] = "always", }
&default="unknown";
}

View file

@ -0,0 +1,23 @@
signature dpd_ldap_client_udp {
ip-proto == udp
payload /^\x30.\x02\x01.\x60/
}
signature dpd_ldap_server_udp {
ip-proto == udp
payload /^\x30/
requires-reverse-signature dpd_ldap_client_udp
enable "LDAP_UDP"
}
signature dpd_ldap_client_tcp {
ip-proto == tcp
payload /^\x30.\x02\x01.\x60/
}
signature dpd_ldap_server_tcp {
ip-proto == tcp
payload /^\x30/
requires-reverse-signature dpd_ldap_client_tcp
enable "LDAP_TCP"
}

View file

@ -0,0 +1,358 @@
# Copyright (c) 2021 by the Zeek Project. See LICENSE for details.
@load base/protocols/conn/removal-hooks
@load ./consts
module LDAP;
export {
redef enum Log::ID += { LDAP_LOG, LDAP_SEARCH_LOG };
## TCP ports which should be considered for analysis.
const ports_tcp = { 389/tcp, 3268/tcp } &redef;
## UDP ports which should be considered for analysis.
const ports_udp = { 389/udp } &redef;
## Whether clear text passwords are captured or not.
option default_capture_password = F;
## Whether to log LDAP search attributes or not.
option default_log_search_attributes = F;
## Default logging policy hook for LDAP_LOG.
global log_policy: Log::PolicyHook;
## Default logging policy hook for LDAP_SEARCH_LOG.
global log_policy_search: Log::PolicyHook;
## LDAP finalization hook.
global finalize_ldap: Conn::RemovalHook;
#############################################################################
# This is the format of ldap.log (ldap operations minus search-related)
# Each line represents a unique connection+message_id (requests/responses)
type MessageInfo: record {
# Timestamp for when the event happened.
ts: time &log;
# Unique ID for the connection.
uid: string &log;
# The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
# Message ID
message_id: int &log &optional;
# LDAP version
version: int &log &optional;
# normalized operations (e.g., bind_request and bind_response to "bind")
opcodes: set[string] &log &optional;
# Result code(s)
results: set[string] &log &optional;
# result diagnostic message(s)
diagnostic_messages: vector of string &log &optional;
# object(s)
objects: vector of string &log &optional;
# argument(s)
arguments: vector of string &log &optional;
};
#############################################################################
# This is the format of ldap_search.log (search-related messages only)
# Each line represents a unique connection+message_id (requests/responses)
type SearchInfo: record {
# Timestamp for when the event happened.
ts: time &log;
# Unique ID for the connection.
uid: string &log;
# The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
# Message ID
message_id: int &log &optional;
# sets of search scope and deref alias
scopes: set[string] &log &optional;
derefs: set[string] &log &optional;
# base search objects
base_objects: vector of string &log &optional;
# number of results returned
result_count: count &log &optional;
# Result code (s)
results: set[string] &log &optional;
# result diagnostic message(s)
diagnostic_messages: vector of string &log &optional;
# a string representation of the search filter used in the query
filter: string &log &optional;
# a list of attributes that were returned in the search
attributes: vector of string &log &optional;
};
type State: record {
messages: table[int] of MessageInfo &optional;
searches: table[int] of SearchInfo &optional;
};
# Event that can be handled to access the ldap record as it is sent on
# to the logging framework.
global log_ldap: event(rec: LDAP::MessageInfo);
global log_ldap_search: event(rec: LDAP::SearchInfo);
}
redef record connection += {
ldap: State &optional;
};
redef likely_server_ports += { LDAP::ports_tcp, LDAP::ports_udp };
#############################################################################
global OPCODES_FINISHED: set[LDAP::ProtocolOpcode] = { LDAP::ProtocolOpcode_BIND_RESPONSE,
LDAP::ProtocolOpcode_UNBIND_REQUEST,
LDAP::ProtocolOpcode_SEARCH_RESULT_DONE,
LDAP::ProtocolOpcode_MODIFY_RESPONSE,
LDAP::ProtocolOpcode_ADD_RESPONSE,
LDAP::ProtocolOpcode_DEL_RESPONSE,
LDAP::ProtocolOpcode_MOD_DN_RESPONSE,
LDAP::ProtocolOpcode_COMPARE_RESPONSE,
LDAP::ProtocolOpcode_ABANDON_REQUEST,
LDAP::ProtocolOpcode_EXTENDED_RESPONSE };
global OPCODES_SEARCH: set[LDAP::ProtocolOpcode] = { LDAP::ProtocolOpcode_SEARCH_REQUEST,
LDAP::ProtocolOpcode_SEARCH_RESULT_ENTRY,
LDAP::ProtocolOpcode_SEARCH_RESULT_DONE,
LDAP::ProtocolOpcode_SEARCH_RESULT_REFERENCE };
#############################################################################
event zeek_init() &priority=5 {
Analyzer::register_for_ports(Analyzer::ANALYZER_LDAP_TCP, LDAP::ports_tcp);
Analyzer::register_for_ports(Analyzer::ANALYZER_LDAP_UDP, LDAP::ports_udp);
Log::create_stream(LDAP::LDAP_LOG, [$columns=MessageInfo, $ev=log_ldap, $path="ldap", $policy=log_policy]);
Log::create_stream(LDAP::LDAP_SEARCH_LOG, [$columns=SearchInfo, $ev=log_ldap_search, $path="ldap_search", $policy=log_policy_search]);
}
#############################################################################
function set_session(c: connection, message_id: int, opcode: LDAP::ProtocolOpcode) {
if (! c?$ldap ) {
c$ldap = State();
Conn::register_removal_hook(c, finalize_ldap);
}
if (! c$ldap?$messages )
c$ldap$messages = table();
if (! c$ldap?$searches )
c$ldap$searches = table();
if ((opcode in OPCODES_SEARCH) && (message_id !in c$ldap$searches)) {
c$ldap$searches[message_id] = [$ts=network_time(),
$uid=c$uid,
$id=c$id,
$message_id=message_id,
$result_count=0];
} else if ((opcode !in OPCODES_SEARCH) && (message_id !in c$ldap$messages)) {
c$ldap$messages[message_id] = [$ts=network_time(),
$uid=c$uid,
$id=c$id,
$message_id=message_id];
}
}
#############################################################################
event LDAP::message(c: connection,
message_id: int,
opcode: LDAP::ProtocolOpcode,
result: LDAP::ResultCode,
matched_dn: string,
diagnostic_message: string,
object: string,
argument: string) {
if (opcode == LDAP::ProtocolOpcode_SEARCH_RESULT_DONE) {
set_session(c, message_id, opcode);
local searches = c$ldap$searches[message_id];
if ( result != LDAP::ResultCode_Undef ) {
if ( ! searches?$results )
searches$results = set();
add searches$results[RESULT_CODES[result]];
}
if ( diagnostic_message != "" ) {
if ( ! searches?$diagnostic_messages )
searches$diagnostic_messages = vector();
searches$diagnostic_messages += diagnostic_message;
}
Log::write(LDAP::LDAP_SEARCH_LOG, searches);
delete c$ldap$searches[message_id];
} else if (opcode !in OPCODES_SEARCH) {
set_session(c, message_id, opcode);
local messages = c$ldap$messages[message_id];
if ( ! messages?$opcodes )
messages$opcodes = set();
add messages$opcodes[PROTOCOL_OPCODES[opcode]];
if ( result != LDAP::ResultCode_Undef ) {
if ( ! messages?$results )
messages$results = set();
add messages$results[RESULT_CODES[result]];
}
if ( diagnostic_message != "" ) {
if ( ! messages?$diagnostic_messages )
messages$diagnostic_messages = vector();
messages$diagnostic_messages += diagnostic_message;
}
if ( object != "" ) {
if ( ! messages?$objects )
messages$objects = vector();
messages$objects += object;
}
if ( argument != "" ) {
if ( ! messages?$arguments )
messages$arguments = vector();
if ("bind simple" in messages$opcodes && !default_capture_password)
messages$arguments += "REDACTED";
else
messages$arguments += argument;
}
if (opcode in OPCODES_FINISHED) {
if ((BIND_SIMPLE in messages$opcodes) ||
(BIND_SASL in messages$opcodes)) {
# don't have both "bind" and "bind <method>" in the operations list
delete messages$opcodes[PROTOCOL_OPCODES[LDAP::ProtocolOpcode_BIND_REQUEST]];
}
Log::write(LDAP::LDAP_LOG, messages);
delete c$ldap$messages[message_id];
}
}
}
#############################################################################
event LDAP::search_request(c: connection,
message_id: int,
base_object: string,
scope: LDAP::SearchScope,
deref: LDAP::SearchDerefAlias,
size_limit: int,
time_limit: int,
types_only: bool,
filter: string,
attributes: vector of string) {
set_session(c, message_id, LDAP::ProtocolOpcode_SEARCH_REQUEST);
if ( scope != LDAP::SearchScope_Undef ) {
if ( ! c$ldap$searches[message_id]?$scopes )
c$ldap$searches[message_id]$scopes = set();
add c$ldap$searches[message_id]$scopes[SEARCH_SCOPES[scope]];
}
if ( deref != LDAP::SearchDerefAlias_Undef ) {
if ( ! c$ldap$searches[message_id]?$derefs )
c$ldap$searches[message_id]$derefs = set();
add c$ldap$searches[message_id]$derefs[SEARCH_DEREF_ALIASES[deref]];
}
if ( base_object != "" ) {
if ( ! c$ldap$searches[message_id]?$base_objects )
c$ldap$searches[message_id]$base_objects = vector();
c$ldap$searches[message_id]$base_objects += base_object;
}
c$ldap$searches[message_id]$filter = filter;
if ( default_log_search_attributes ) {
c$ldap$searches[message_id]$attributes = attributes;
}
}
#############################################################################
event LDAP::search_result(c: connection,
message_id: int,
object_name: string) {
set_session(c, message_id, LDAP::ProtocolOpcode_SEARCH_RESULT_ENTRY);
c$ldap$searches[message_id]$result_count += 1;
}
#############################################################################
event LDAP::bind_request(c: connection,
message_id: int,
version: int,
name: string,
authType: LDAP::BindAuthType,
authInfo: string) {
set_session(c, message_id, LDAP::ProtocolOpcode_BIND_REQUEST);
if ( ! c$ldap$messages[message_id]?$version )
c$ldap$messages[message_id]$version = version;
if ( ! c$ldap$messages[message_id]?$opcodes )
c$ldap$messages[message_id]$opcodes = set();
if (authType == LDAP::BindAuthType_BIND_AUTH_SIMPLE) {
add c$ldap$messages[message_id]$opcodes[BIND_SIMPLE];
} else if (authType == LDAP::BindAuthType_BIND_AUTH_SASL) {
add c$ldap$messages[message_id]$opcodes[BIND_SASL];
}
}
#############################################################################
hook finalize_ldap(c: connection) {
# log any "pending" unlogged LDAP messages/searches
if ( c$ldap?$messages && (|c$ldap$messages| > 0) ) {
for ( [mid], m in c$ldap$messages ) {
if (mid > 0) {
if ((BIND_SIMPLE in m$opcodes) || (BIND_SASL in m$opcodes)) {
# don't have both "bind" and "bind <method>" in the operations list
delete m$opcodes[PROTOCOL_OPCODES[LDAP::ProtocolOpcode_BIND_REQUEST]];
}
Log::write(LDAP::LDAP_LOG, m);
}
}
delete c$ldap$messages;
}
if ( c$ldap?$searches && (|c$ldap$searches| > 0) ) {
for ( [mid], s in c$ldap$searches ) {
if (mid > 0) {
Log::write(LDAP::LDAP_SEARCH_LOG, s);
}
}
delete c$ldap$searches;
}
}

View file

@ -0,0 +1,100 @@
##! Events generated by the LDAP analyzer.
##!
##! See See `RFC4511 <https://tools.ietf.org/html/rfc4511>`__.
## Event generated for each LDAPMessage (either direction).
##
## c: The connection.
##
## message_id: The messageID element.
##
## opcode: The protocolOp field in the message.
##
## result: The result code if the message contains a result.
##
## matched_dn: The DN if the message contains a result.
##
## diagnostic_message: Diagnostic message if the LDAP message contains a result.
##
## object: The object name this message refers to.
##
## argument: Additional arguments this message includes.
global LDAP::message: event(
c: connection,
message_id: int,
opcode: LDAP::ProtocolOpcode,
result: LDAP::ResultCode,
matched_dn: string,
diagnostic_message: string,
object: string,
argument: string
);
## Event generated for each LDAPMessage containing a BindRequest.
##
## c: The connection.
##
## message_id: The messageID element.
##
## version: The version field in the BindRequest.
##
## name: The name field in the BindRequest.
##
## auth_type: The auth type field in the BindRequest.
##
## auth_info: Additional information related to the used auth type.
global LDAP::bind_request: event(
c: connection,
message_id: int,
version: int,
name: string,
auth_type: LDAP::BindAuthType,
auth_info: string
);
## Event generated for each LDAPMessage containing a SearchRequest.
##
## c: The connection.
##
## message_id: The messageID element.
##
## base_object: The baseObject field in the SearchRequest.
##
## scope: The scope field in the SearchRequest.
##
## deref_alias: The derefAlias field in the SearchRequest
##
## size_limit: The sizeLimit field in the SearchRequest.
##
## time_limit: The timeLimit field in the SearchRequest.
##
## types_only: The typesOnly field in the SearchRequest.
##
## filter: The string representation of the filter field in the SearchRequest.
##
## attributes: Additional attributes of the SearchRequest.
global LDAP::search_request: event (
c: connection,
message_id: int,
base_object: string,
scope: LDAP::SearchScope,
deref: LDAP::SearchDerefAlias,
size_limit: int,
time_limit: int,
types_only: bool,
filter: string,
attributes: vector of string
);
## Event generated for each SearchResultEntry in LDAP messages.
##
## c: The connection.
##
## message_id: The messageID element.
##
## object_name: The object name in the SearchResultEntry.
global LDAP::search_result: event (
c: connection,
message_id: int,
object_name: string
);

View file

@ -79,4 +79,3 @@ event modbus_exception(c: connection, headers: ModbusHeaders, code: count) &prio
Log::write(LOG, c$modbus);
delete c$modbus$exception;
}

View file

@ -0,0 +1,5 @@
@ifdef ( Analyzer::ANALYZER_QUIC )
@load ./spicy-events
@load ./consts
@load ./main
@endif

View file

@ -0,0 +1,7 @@
module QUIC;
export {
const version_strings: table[count] of string = {
[0x00000001] = "1",
} &default=function(version: count): string { return fmt("unknown-%x", version); };
}

View file

@ -0,0 +1,221 @@
##! Initial idea for a quic.log.
@load base/frameworks/notice/weird
@load base/protocols/conn/removal-hooks
@load ./consts
module QUIC;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp of first QUIC packet for this entry.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## QUIC version as found in the first INITIAL packet from
## the client.
version: string &log;
## First Destination Connection ID used by client. This is
## random and unpredictable, but used for packet protection
## by client and server.
client_initial_dcid: string &log &optional;
## Server chosen Connection ID usually from server's first
## INITIAL packet. This is to be used by the client in
## subsequent packets.
server_scid: string &log &optional;
## Server name extracted from SNI extension in ClientHello
## packet if available.
server_name: string &log &optional;
## First protocol extracted from ALPN extension in ClientHello
## packet if available.
client_protocol: string &log &optional;
## Experimental QUIC history.
##
## Letters have the following meaning with client-sent
## letters being capitalized:
##
## ====== ====================================================
## Letter Meaning
## ====== ====================================================
## I INIT packet
## H HANDSHAKE packet
## Z 0RTT packet
## R RETRY packet
## C CONNECTION_CLOSE packet
## S SSL Client/Server Hello
## ====== ====================================================
history: string &log &default="";
# Internal state for the history field.
history_state: vector of string;
# Internal state if this record has already been logged.
logged: bool &default=F;
};
global log_quic: event(rec: Info);
global log_policy: Log::PolicyHook;
global finalize_quic: Conn::RemovalHook;
## The maximum length of the history field.
option max_history_length = 100;
}
redef record connection += {
# XXX: We may have multiple QUIC connections with different
# Connection ID over the same UDP connection.
quic: Info &optional;
};
# Faster to modify here than re-compiling .evt files.
const quic_ports = {
443/udp, # HTTP3-over-QUIC
853/udp, # DNS-over-QUIC
784/udp, # DNS-over-QUIC early
};
function add_to_history(c: connection, is_orig: bool, what: string)
{
if ( |c$quic$history_state| == max_history_length )
return;
c$quic$history_state += is_orig ? to_upper(what[0]) : to_lower(what[0]);
if ( |c$quic$history_state| == max_history_length )
Reporter::conn_weird("QUIC_max_history_length_reached", c);
}
function log_record(quic: Info)
{
quic$history = join_string_vec(quic$history_state, "");
Log::write(LOG, quic);
quic$logged = T;
}
function set_conn(c: connection, is_orig: bool, version: count, dcid: string, scid: string)
{
if ( ! c?$quic )
{
c$quic = Info(
$ts=network_time(),
$uid=c$uid,
$id=c$id,
$version=version_strings[version],
);
Conn::register_removal_hook(c, finalize_quic);
}
if ( is_orig && |dcid| > 0 && ! c$quic?$client_initial_dcid )
c$quic$client_initial_dcid = bytestring_to_hexstr(dcid);
if ( ! is_orig && |scid| > 0 )
c$quic$server_scid = bytestring_to_hexstr(scid);
}
event QUIC::initial_packet(c: connection, is_orig: bool, version: count, dcid: string, scid: string)
{
set_conn(c, is_orig, version, dcid, scid);
add_to_history(c, is_orig, "INIT");
}
event QUIC::handshake_packet(c: connection, is_orig: bool, version: count, dcid: string, scid: string)
{
set_conn(c, is_orig, version, dcid, scid);
add_to_history(c, is_orig, "HANDSHAKE");
}
event QUIC::zero_rtt_packet(c: connection, is_orig: bool, version: count, dcid: string, scid: string)
{
set_conn(c, is_orig, version, dcid, scid);
add_to_history(c, is_orig, "ZeroRTT");
}
# RETRY packets trigger a log entry and state reset.
event QUIC::retry_packet(c: connection, is_orig: bool, version: count, dcid: string, scid: string, retry_token: string, integrity_tag: string)
{
if ( ! c?$quic )
set_conn(c, is_orig, version, dcid, scid);
add_to_history(c, is_orig, "RETRY");
log_record(c$quic);
delete c$quic;
}
# Upon a connection_close_frame(), if any c$quic state is pending to be logged, do so
# now and prepare for a new entry.
event QUIC::connection_close_frame(c: connection, is_orig: bool, version: count, dcid: string, scid: string, error_code: count, reason_phrase: string)
{
if ( ! c?$quic )
return;
add_to_history(c, is_orig, "CONNECTION_CLOSE");
log_record(c$quic);
delete c$quic;
}
event ssl_extension_server_name(c: connection, is_client: bool, names: string_vec) &priority=5
{
if ( is_client && c?$quic && |names| > 0 )
c$quic$server_name = names[0];
}
event ssl_extension_application_layer_protocol_negotiation(c: connection, is_client: bool, protocols: string_vec)
{
if ( c?$quic && is_client )
{
c$quic$client_protocol = protocols[0];
if ( |protocols| > 1 )
# Probably not overly weird, but the quic.log only
# works with the first one in the hope to avoid
# vector or concatenation.
Reporter::conn_weird("QUIC_many_protocols", c, cat(protocols));
}
}
event ssl_client_hello(c: connection, version: count, record_version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec, comp_methods: index_vec)
{
if ( ! c?$quic )
return;
add_to_history(c, T, "SSL");
}
event ssl_server_hello(c: connection, version: count, record_version: count, possible_ts: time, server_random: string, session_id: string, cipher: count, comp_method: count) &priority=-5
{
if ( ! c?$quic )
return;
add_to_history(c, F, "SSL");
}
hook finalize_quic(c: connection)
{
if ( ! c?$quic || c$quic$logged )
return;
log_record(c$quic);
}
event zeek_init()
{
Log::create_stream(LOG, [$columns=Info, $ev=log_quic, $path="quic", $policy=log_policy]);
Analyzer::register_for_ports(Analyzer::ANALYZER_QUIC, quic_ports);
}

View file

@ -0,0 +1,82 @@
##! Events generated by the QUIC analyzer.
##!
##! See See `RFC9000 <https://tools.ietf.org/html/rfc9000>`__.
## Generated for a QUIC Initial packet.
##
## c: The connection.
##
## is_orig: True if the packet is from the the connection's originator.
##
## version: The Version field.
##
## dcid: The Destination Connection ID field.
##
## scid: The Source Connection ID field.
##
global QUIC::initial_packet: event(c: connection, is_orig: bool, version: count, dcid: string, scid: string);
## Generated for a QUIC Retry packet.
##
## c: The connection.
##
## is_orig: True if the packet is from the the connection's originator.
##
## version: The Version field.
##
## dcid: The Destination Connection ID field.
##
## scid: The Source Connection ID field.
##
## retry_token: The Retry Token field.
##
## integrity_tag: The Retry Integrity Tag field.
global QUIC::retry_packet: event(c: connection, is_orig: bool, version: count, dcid: string, scid: string, retry_token: string, retry_integrity_tag: string);
## Generated for a QUIC Handshake packet.
##
## c: The connection.
##
## is_orig: True if the packet is from the the connection's originator.
##
## version: The Version field.
##
## dcid: The Destination Connection ID field.
##
## scid: The Source Connection ID field.
global QUIC::handshake_packet: event(c: connection, is_orig: bool, version: count, dcid: string, scid: string);
## Generated for a QUIC 0-RTT packet.
##
## c: The connection.
##
## is_orig: True if the packet is from the the connection's originator.
##
## version: The Version field.
##
## dcid: The Destination Connection ID field.
##
## scid: The Source Connection ID field.
global QUIC::zero_rtt_packet: event(c: connection, is_orig: bool, version: count, dcid: string, scid: string);
## Generated for a QUIC CONNECTION_CLOSE frame.
##
## c: The connection.
##
## is_orig: True if the packet is from the the connection's originator.
##
## version: The Version field.
##
## dcid: The Destination Connection ID field.
##
## scid: The Source Connection ID field.
##
## error_code: Count indicating the reason for closing this connection.
##
## reason_phrase: Additional diagnostic information for the closure.
##
## .. note:: Packets with CONNECTION_CLOSE frames are usually encrypted after connection establishment and not visible to Zeek.
global QUIC::connection_close_frame: event(c: connection, is_orig: bool, version: count, dcid: string, scid: string, error_code: count, reason_phrase: string);

View file

@ -67,7 +67,7 @@ event mime_one_header(c: connection, h: mime_header_rec) &priority=5
c$smtp$entity$filename = extract_filename_from_content_disposition(h$value);
if ( h$name == "CONTENT-TYPE" &&
/[nN][aA][mM][eE][:blank:]*=/ in h$value )
/[nN][aA][mM][eE][[:blank:]]*=/ in h$value )
c$smtp$entity$filename = extract_filename_from_content_disposition(h$value);
}

View file

@ -143,6 +143,10 @@ export {
## (especially with large file transfers).
option disable_analyzer_after_detection = T;
## Maximum length of the ssl_history field to prevent unbounded
## growth when the parser is running into unexpected situations.
option max_ssl_history_length = 100;
## Delays an SSL record for a specific token: the record will not be
## logged as long as the token exists or until 15 seconds elapses.
global delay_log: function(info: Info, token: string);
@ -208,10 +212,16 @@ function set_session(c: connection)
function add_to_history(c: connection, is_client: bool, char: string)
{
if ( |c$ssl$ssl_history| == max_ssl_history_length )
return;
if ( is_client )
c$ssl$ssl_history = c$ssl$ssl_history+to_upper(char);
else
c$ssl$ssl_history = c$ssl$ssl_history+to_lower(char);
if ( |c$ssl$ssl_history| == max_ssl_history_length )
Reporter::conn_weird("SSL_max_ssl_history_length_reached", c);
}
function delay_log(info: Info, token: string)

View file

@ -1,11 +1,9 @@
# Source this script in addition to protocols/conn/community-id
# to add Community ID to notices.
# Source this script to add Community ID to notices.
# This script will automatically load the main community-id script.
# Only support loading this if the main script is also loaded.
@load base/protocols/conn
@load base/frameworks/notice
@ifdef ( CommunityID::seed )
@load policy/protocols/conn/community-id-logging
module CommunityID::Notice;
@ -23,6 +21,5 @@ export {
hook Notice::notice(n: Notice::Info)
{
if ( CommunityID::Notice::enabled && n?$conn )
n$community_id = community_id_v1(n$conn$id);
n$community_id = community_id_v1(n$conn$id, CommunityID::seed, CommunityID::do_base64);
}
@endif

View file

@ -20,7 +20,7 @@ hook notice(n: Notice::Info) &priority=-1
return;
# This should only be done for notices that are being sent to email.
if ( ! n?$email_dest )
if ( |n$email_dest| == 0 )
return;
# I'm not recovering gracefully from the when statements because I want

View file

@ -9,6 +9,7 @@ public type Val = __library_type("::zeek::ValPtr");
public type BroType = __library_type("::zeek::TypePtr");
public type EventHandlerPtr = __library_type("::zeek::EventHandlerPtr");
public type PortRange = __library_type("::zeek::spicy::rt::PortRange");
public type RecordField = __library_type("::zeek::spicy::rt::RecordField");
declare public PortRange make_port_range(port begin_, port end_) &cxxname="zeek::spicy::rt::make_port_range" &have_prototype;
@ -16,10 +17,12 @@ type ZeekTypeTag = enum {
Addr, Any, Bool, Count, Double, Enum, Error, File, Func, Int, Interval, List, Opaque, Pattern, Port, Record, String, Subnet, Table, Time, Type, Vector, Void
} &cxxname="::zeek::spicy::rt::ZeekTypeTag";
declare public void register_spicy_module_begin(string name, string description, time mtime) &cxxname="zeek::spicy::rt::register_spicy_module_begin";
declare public void register_protocol_analyzer(string name, hilti::Protocol protocol, vector<PortRange> ports, string parser_orig, string parser_resp, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_protocol_analyzer" &have_prototype;
declare public void register_file_analyzer(string name, vector<string> mime_types, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_file_analyzer" &have_prototype;
declare public void register_packet_analyzer(string name, string parser, string replaces, string linker_scope) &cxxname="zeek::spicy::rt::register_packet_analyzer" &have_prototype;
declare public void register_type(string ns, string id, BroType t) &cxxname="zeek::spicy::rt::register_type" &have_prototype;
declare public void register_spicy_module_end() &cxxname="zeek::spicy::rt::register_spicy_module_end";
declare public bool have_handler(EventHandlerPtr handler) &cxxname="zeek::spicy::rt::have_handler" &have_prototype;
declare public EventHandlerPtr internal_handler(string event) &cxxname="zeek::spicy::rt::internal_handler" &have_prototype;
@ -29,10 +32,10 @@ declare public void raise_event(EventHandlerPtr handler, vector<Val> args) &cxxn
declare public BroType event_arg_type(EventHandlerPtr handler, uint<64> idx) &cxxname="zeek::spicy::rt::event_arg_type" &have_prototype;
declare public Val to_val(any x, BroType target) &cxxname="zeek::spicy::rt::to_val" &have_prototype;
type RecordField = tuple<string, BroType, bool>; # (ID, type, optional)
declare public BroType create_base_type(ZeekTypeTag tag) &cxxname="zeek::spicy::rt::create_base_type" &have_prototype;
declare public BroType create_enum_type(string ns, string id, vector<tuple<string, int<64>>> labels) &cxxname="zeek::spicy::rt::create_enum_type" &have_prototype;
declare public BroType create_record_type(string ns, string id, vector<RecordField> fields) &cxxname="zeek::spicy::rt::create_record_type" &have_prototype;
declare public RecordField create_record_field(string id, BroType type_, bool is_optional, bool is_log) &cxxname="zeek::spicy::rt::create_record_field" &have_prototype;
declare public BroType create_table_type(BroType key, optional<BroType> value = Null) &cxxname="zeek::spicy::rt::create_table_type" &have_prototype;
declare public BroType create_vector_type(BroType elem) &cxxname="zeek::spicy::rt::create_vector_type" &have_prototype;

View file

@ -14,13 +14,21 @@
@load policy/protocols/conn/speculative-service.zeek
@if ( have_spicy() )
@load frameworks/spicy/record-spicy-batch.zeek
# Loading this messes up documentation of some elements defined elsewhere.
# @load frameworks/spicy/record-spicy-batch.zeek
@load frameworks/spicy/resource-usage.zeek
@endif
@load ./example.zeek
event zeek_init()
event zeek_init() &priority=1000
{
# Disable events in modules that use zeek_init() to do stuff and may
# fail when run under zeekygen. For the purpose of zeekygen, we could
# probably disable all modules, too.
disable_module_events("Control");
disable_module_events("Management::Agent::Runtime");
disable_module_events("Management::Controller::Runtime");
disable_module_events("Management::Node");
terminate();
}

View file

@ -173,7 +173,7 @@ export {
# This function isn't exported, so it won't appear anywhere in the generated
# documentation. So using ``##``-style comments is pointless here.
function function_without_proto(tag: string): string
function function_without_proto(tag: string): string &is_used
{
# Zeekygen-style comments only apply to entities at global-scope so
# Zeekygen doesn't associate the following comments with anything.

View file

@ -16,9 +16,12 @@ namespace zeek::detail
const char* attr_name(AttrTag t)
{
// Do not collapse the list.
// clang-format off
static const char* attr_names[int(NUM_ATTRS)] = {
"&optional",
"&default",
"&default_insert",
"&redef",
"&add_func",
"&delete_func",
@ -42,6 +45,7 @@ const char* attr_name(AttrTag t)
"&is_used",
"&ordered",
};
// clang-format on
return attr_names[int(t)];
}
@ -359,8 +363,35 @@ void Attributes::CheckAttr(Attr* a)
}
break;
case ATTR_DEFAULT_INSERT:
{
if ( ! type->IsTable() )
{
Error("&default_insert only applicable to tables");
break;
}
if ( Find(ATTR_DEFAULT) )
{
Error("&default and &default_insert cannot be used together");
break;
}
std::string err_msg;
if ( ! check_default_attr(a, type, global_var, in_record, err_msg) &&
! err_msg.empty() )
Error(err_msg.c_str());
break;
}
case ATTR_DEFAULT:
{
if ( Find(ATTR_DEFAULT_INSERT) )
{
Error("&default and &default_insert cannot be used together");
break;
}
std::string err_msg;
if ( ! check_default_attr(a, type, global_var, in_record, err_msg) &&
! err_msg.empty() )
@ -672,11 +703,13 @@ bool Attributes::operator==(const Attributes& other) const
bool check_default_attr(Attr* a, const TypePtr& type, bool global_var, bool in_record,
std::string& err_msg)
{
ASSERT(a->Tag() == ATTR_DEFAULT || a->Tag() == ATTR_DEFAULT_INSERT);
std::string aname = attr_name(a->Tag());
// &default is allowed for global tables, since it's used in
// initialization of table fields. It's not allowed otherwise.
if ( global_var && ! type->IsTable() )
{
err_msg = "&default is not valid for global variables except for tables";
err_msg = aname + " is not valid for global variables except for tables";
return false;
}
@ -707,7 +740,7 @@ bool check_default_attr(Attr* a, const TypePtr& type, bool global_var, bool in_r
return true;
}
a->GetExpr()->Error("&default value has inconsistent type", type.get());
a->GetExpr()->Error(util::fmt("%s value has inconsistent type", aname.c_str()), type.get());
return false;
}
@ -725,7 +758,7 @@ bool check_default_attr(Attr* a, const TypePtr& type, bool global_var, bool in_r
FuncType* f = atype->AsFuncType();
if ( ! f->CheckArgs(tt->GetIndexTypes()) || ! same_type(f->Yield(), ytype) )
{
err_msg = "&default function type clash";
err_msg = aname + " function type clash";
return false;
}
@ -748,7 +781,7 @@ bool check_default_attr(Attr* a, const TypePtr& type, bool global_var, bool in_r
return true;
}
err_msg = "&default value has inconsistent type";
err_msg = aname + " value has inconsistent type";
return false;
}

View file

@ -30,6 +30,7 @@ enum AttrTag
{
ATTR_OPTIONAL,
ATTR_DEFAULT,
ATTR_DEFAULT_INSERT, // insert default value on failed lookups
ATTR_REDEF,
ATTR_ADD_FUNC,
ATTR_DEL_FUNC,

View file

@ -256,7 +256,7 @@ cmake_policy(POP)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/DebugCmdConstants.h
${CMAKE_CURRENT_BINARY_DIR}/DebugCmdInfoConstants.cc
COMMAND ${PYTHON_EXECUTABLE} ARGS ${CMAKE_CURRENT_SOURCE_DIR}/make_dbg_constants.py
COMMAND ${Python_EXECUTABLE} ARGS ${CMAKE_CURRENT_SOURCE_DIR}/make_dbg_constants.py
${CMAKE_CURRENT_SOURCE_DIR}/DebugCmdInfoConstants.in
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/make_dbg_constants.py
${CMAKE_CURRENT_SOURCE_DIR}/DebugCmdInfoConstants.in
@ -502,10 +502,14 @@ set(zeek_SRCS
${FLEX_Scanner_INPUT}
${BISON_Parser_INPUT}
${CMAKE_CURRENT_BINARY_DIR}/DebugCmdConstants.h
${CMAKE_CURRENT_BINARY_DIR}/ZAM-MethodDecls.h
${THIRD_PARTY_SRCS}
${HH_SRCS}
${MAIN_SRCS})
${CMAKE_CURRENT_BINARY_DIR}/ZAM-MethodDecls.h)
# Add the above files to the list of clang tidy sources before adding the third party and HH
# sources. Also, the main_SRCS will get added to that list separately.
add_clang_tidy_files(${zeek_SRCS})
list(APPEND zeek_SRCS ${THIRD_PARTY_SRCS})
list(APPEND zeek_SRCS ${HH_SRCS})
list(APPEND zeek_SRCS ${MAIN_SRCS})
collect_headers(zeek_HEADERS ${zeek_SRCS})
@ -515,11 +519,11 @@ set_target_properties(zeek_objs PROPERTIES CXX_EXTENSIONS OFF)
target_link_libraries(zeek_objs PRIVATE $<BUILD_INTERFACE:zeek_internal>)
target_compile_definitions(zeek_objs PRIVATE ZEEK_CONFIG_SKIP_VERSION_H)
add_dependencies(zeek_objs zeek_autogen_files)
add_clang_tidy_files(${zeek_SRCS})
zeek_target_link_libraries(zeek_objs)
if (HAVE_SPICY)
target_link_libraries(zeek_objs PRIVATE hilti spicy)
prefer_configured_spicy_include_dirs(zeek_objs)
endif ()
if (TARGET zeek_exe)

Some files were not shown because too many files have changed in this diff Show more