Merge remote-tracking branch 'origin/master' into topic/johanna/tls12-decryption

This commit is contained in:
Johanna Amann 2022-01-05 10:27:55 +00:00
commit d1e7134156
640 changed files with 14727 additions and 14980 deletions

View file

@ -10,10 +10,11 @@ btest_jobs: &BTEST_JOBS 4
btest_retries: &BTEST_RETRIES 2 btest_retries: &BTEST_RETRIES 2
memory: &MEMORY 4GB memory: &MEMORY 4GB
config: &CONFIG --build-type=release --enable-cpp-tests --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install
static_config: &STATIC_CONFIG --build-type=release --enable-cpp-tests --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install
sanitizer_config: &SANITIZER_CONFIG --build-type=debug --enable-cpp-tests --disable-broker-tests --sanitizers=address,undefined --enable-fuzzers --enable-coverage sanitizer_config: &SANITIZER_CONFIG --build-type=debug --disable-broker-tests --sanitizers=address,undefined --enable-fuzzers --enable-coverage
mobile_ipv6_config: &MOBILE_IPV6_CONFIG --build-type=release --enable-cpp-tests --enable-mobile-ipv6 --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install mobile_ipv6_config: &MOBILE_IPV6_CONFIG --build-type=release --enable-mobile-ipv6 --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install
openssl30_config: &OPENSSL30_CONFIG --build-type=release --disable-broker-tests --with-openssl=/opt/openssl --prefix=$CIRRUS_WORKING_DIR/install
resources_template: &RESOURCES_TEMPLATE resources_template: &RESOURCES_TEMPLATE
cpu: *CPUS cpu: *CPUS
@ -93,6 +94,13 @@ env:
# Linux EOL timelines: https://linuxlifecycle.com/ # Linux EOL timelines: https://linuxlifecycle.com/
# Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle # Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle
fedora35_task:
container:
# Fedora 35 EOL: Around Dec 2022
dockerfile: ci/fedora-35/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
fedora34_task: fedora34_task:
container: container:
# Fedora 34 EOL: Around May 2022 # Fedora 34 EOL: Around May 2022
@ -212,16 +220,16 @@ alpine_task:
# Apple doesn't publish official long-term support timelines. # Apple doesn't publish official long-term support timelines.
# We aim to support both the current and previous macOS release. # We aim to support both the current and previous macOS release.
macos_big_sur_task: macos_monterey_task:
macos_instance: macos_instance:
image: big-sur-xcode-12.5 image: monterey-xcode-13.1
prepare_script: ./ci/macos/prepare.sh prepare_script: ./ci/macos/prepare.sh
<< : *CI_TEMPLATE << : *CI_TEMPLATE
<< : *MACOS_RESOURCES_TEMPLATE << : *MACOS_RESOURCES_TEMPLATE
macos_catalina_task: macos_big_sur_task:
macos_instance: macos_instance:
image: catalina-xcode image: big-sur-xcode-12.5
prepare_script: ./ci/macos/prepare.sh prepare_script: ./ci/macos/prepare.sh
<< : *CI_TEMPLATE << : *CI_TEMPLATE
<< : *MACOS_RESOURCES_TEMPLATE << : *MACOS_RESOURCES_TEMPLATE
@ -261,6 +269,17 @@ freebsd12_task:
prepare_script: ./ci/freebsd/prepare.sh prepare_script: ./ci/freebsd/prepare.sh
<< : *CI_TEMPLATE << : *CI_TEMPLATE
# This can be removed as soon as the first distribution that we use ships
# OpenSSL 3.0
openssl30_task:
container:
# Tweaked Ubuntu 20.04 EOL: April 2025
dockerfile: ci/openssl-3.0/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
env:
ZEEK_CI_CONFIGURE_FLAGS: *OPENSSL30_CONFIG
sanitizer_task: sanitizer_task:
container: container:
# Just uses a recent/common distro to run memory error/leak checks. # Just uses a recent/common distro to run memory error/leak checks.

View file

@ -1,10 +1,5 @@
# Clang-format configuration for Zeek. This configuration requires # Clang-format configuration for Zeek. This configuration requires
# at least clang-format 12.0.1 to format correctly. # at least clang-format 12.0.1 to format correctly.
#
# The easiest way to run this from the command-line is using the
# python script in auxil/run-clang-format:
#
# python3 auxil/run-clang-format/run-clang-format.py --clang-format-executable /path/to/clang-format -r src -i
Language: Cpp Language: Cpp
Standard: c++17 Standard: c++17
@ -102,4 +97,4 @@ IncludeCategories:
- Regex: '^"zeek/' - Regex: '^"zeek/'
Priority: 4 Priority: 4
- Regex: '.*' - Regex: '.*'
Priority: 5 Priority: 5

View file

@ -1,17 +0,0 @@
# Ignore everything 3rdparty
src/3rdparty/*
# These are files that are technically sourced from other places but aren't in 3rdparty
# and shouldn't be reformatted.
src/ConvertUTF.*
src/bro_inet_ntop.*
src/bsd-getopt-long.*
src/in_cksum.*
src/nb_dns.*
src/modp_numtoa.*
src/patricia.*
src/strsep.c
src/setsignal.c
# These files are generated code
src/DebugCmdInfoConstants.*

26
.git-blame-ignore-revs Normal file
View file

@ -0,0 +1,26 @@
# Reformat the world (initial clang-format formatting)
b2f171ec69eae3a833a9db1b16e5234bd3eaf0b6
# clang-format: Force zeek-config.h to be earlier in the config ordering
9cb54f5d449b63006cc9a1f451a47732c92fef2d
# clang-format: A few minor comment-spacing fixes
07e276ab2e351ce71b709139f1933b9ead40d094
# clang-format: Enforce ordering of includes in ZBody
cb99ae2b7c9988656b097ad2789dffd2c0c37939
# clang-format: Other include ordering changes
e97c14add5b04aedc7f3f9dba59f665cbad793af
# clang-format: Other minor formatting changes
02206f3215f977ba7752476ba89ca06abe93375c
# clang-format: Set IndentCaseBlocks to false
4423574d265749da8e707ab0fbcffcbfaed26614
# clang-format: Set penalty for breaking after assignment operator
9af6b2f48d11b4e287d0f18034a486f76f9f2d61
# Remove trailing whitespace from script files
a6378531dbc5c357926d98fe785bb719cc70e1b4

View file

@ -13,12 +13,17 @@ defaults:
run: run:
shell: bash shell: bash
env:
IMAGE_NAME: zeek-image.tar
IMAGE_FILE: /tmp/zeek-image.tar
IMAGE_PATH: /tmp
jobs: jobs:
build: docker-build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
env: env:
TEST_TAG: zeek:latest TEST_TAG: zeek:latest
CONFFLAGS: --generator=Ninja --build-type=Release CONFFLAGS: --generator=Ninja --build-type=Release --enable-zeek-client
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
@ -27,7 +32,8 @@ jobs:
# Create and boot a loader. This will e.g., provide caching # Create and boot a loader. This will e.g., provide caching
# so we avoid rebuilds of the same image after this step. # so we avoid rebuilds of the same image after this step.
- uses: docker/setup-buildx-action@v1 - uses: docker/setup-buildx-action@v1
- name: Build
- name: Build image
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: ./ context: ./
@ -40,9 +46,13 @@ jobs:
- name: Run btests - name: Run btests
run: make -C docker/btest run: make -C docker/btest
- name: Save image tarball
run: docker save -o ${{ env.IMAGE_FILE }} ${{ env.TEST_TAG }}
- name: Get version - name: Get version
id: version id: version
run: echo "::set-output name=RELEASE_VERSION::$(cat VERSION)" run: echo "::set-output name=RELEASE_VERSION::$(cat VERSION)"
- name: Compute target tag - name: Compute target tag
id: target id: target
env: env:
@ -59,21 +69,22 @@ jobs:
echo "::set-output name=tag::zeek:latest" echo "::set-output name=tag::zeek:latest"
elif [ "${GITHUB_REF}" = "refs/heads/master" ]; then elif [ "${GITHUB_REF}" = "refs/heads/master" ]; then
echo "::set-output name=tag::zeek-dev:latest" echo "::set-output name=tag::zeek-dev:latest"
elif [[ "${GITHUB_REF}" = refs/heads/v* ]] && [[ "${GITHUB_REF}" != refs/heads/v*-dev ]]; then elif [[ "${GITHUB_REF}" = refs/tags/v* ]] && [[ "${GITHUB_REF}" != refs/tags/v*-dev ]]; then
echo "::set-output name=tag::zeek:${RELEASE_VERSION}" echo "::set-output name=tag::zeek:${RELEASE_VERSION}"
fi fi
- name: Login to DockerHub - name: Login to DockerHub
uses: docker/login-action@v1 uses: docker/login-action@v1
# Secrets for the login are not available for pull requests. # Don't publish on forks. Also note that secrets for the login are not
if: github.event_name == 'push' # available for pull requests, so trigger on pushes only.
if: github.repository == 'zeek/zeek' && github.event_name == 'push'
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- name: Push - name: Push image
# Only publish if we did compute a tag. # Only publish if we did compute a tag.
if: github.event_name == 'push' && steps.target.outputs.tag != '' if: github.repository == 'zeek/zeek' && github.event_name == 'push' && steps.target.outputs.tag != ''
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
with: with:
context: ./ context: ./
@ -84,10 +95,65 @@ jobs:
tags: | tags: |
zeekurity/${{ steps.target.outputs.tag }} zeekurity/${{ steps.target.outputs.tag }}
- name: Preserve artifacts - name: Preserve image artifact
uses: actions/upload-artifact@v2
with:
name: ${{ env.IMAGE_NAME }}
path: ${{ env.IMAGE_FILE }}
retention-days: 1
- name: Preserve btest artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
if: failure() if: failure()
with: with:
name: docker-btest name: docker-btest
path: docker/btest/.tmp path: docker/btest/.tmp
if-no-files-found: ignore if-no-files-found: ignore
cluster-testing:
# We need the Zeek Docker image build job to complete first, since we need
# the resulting image for our docker-compose setup.
needs: docker-build
runs-on: ubuntu-latest
steps:
# Grab the sources so we have access to btest. Could also use pip, but it
# seems appealing to be using the in-tree version of btest. btest is in a
# submodule; we check it out selectively to save time.
- uses: actions/checkout@v2
- name: Check out btest
run: git submodule update --init ./auxil/btest
- name: Download Docker image artifact
uses: actions/download-artifact@v2
with:
name: ${{ env.IMAGE_NAME }}
path: ${{ env.IMAGE_PATH }}
- name: Load Docker image
run: |
docker load --input ${{ env.IMAGE_FILE }}
docker tag zeek:latest zeektest:latest
# The testsuite ref to use for this version of Zeek is stored in a file in
# the Zeek source tree.
- name: Get testsuite version
run: |
echo "TESTSUITE_COMMIT=$(cat ./testing/external/commit-hash.zeek-testing-cluster)" >> $GITHUB_ENV
- name: Retrieve cluster testsuite
uses: actions/checkout@v2
with:
repository: zeek/zeek-testing-cluster
path: testing/external/zeek-testing-cluster
ref: ${{ ENV.TESTSUITE_COMMIT }}
- name: Run testsuite
run: make -C testing/external/zeek-testing-cluster
- name: Preserve btest artifacts
uses: actions/upload-artifact@v2
if: failure()
with:
name: cluster-btest
path: testing/external/zeek-testing-cluster/.tmp
if-no-files-found: ignore

14
.github/workflows/pre-commit.yml vendored Normal file
View file

@ -0,0 +1,14 @@
name: pre-commit
on:
pull_request:
push:
branches: [master]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- uses: pre-commit/action@v2.0.3

3
.gitmodules vendored
View file

@ -49,6 +49,3 @@
[submodule "auxil/zeek-client"] [submodule "auxil/zeek-client"]
path = auxil/zeek-client path = auxil/zeek-client
url = https://github.com/zeek/zeek-client url = https://github.com/zeek/zeek-client
[submodule "auxil/run-clang-format"]
path = auxil/run-clang-format
url = https://github.com/Sarcasm/run-clang-format

19
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,19 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
#
repos:
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: 'v13.0.0'
hooks:
- id: clang-format
- repo: https://github.com/maxwinterstein/shfmt-py
rev: 3.3.1.8
hooks:
- id: shfmt
args: ["-w", "-i", "4", "-ci"]
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.31.0
hooks:
- id: yapf

2
.style.yapf Normal file
View file

@ -0,0 +1,2 @@
[style]
column_limit=100

477
CHANGES
View file

@ -1,3 +1,480 @@
4.2.0-dev.514 | 2022-01-03 13:56:12 -0700
* deprecation warning on use of out-of-scope local (Vern Paxson, Corelight)
4.2.0-dev.510 | 2022-01-03 13:54:52 -0700
* Switch BitTorrent analyzer to Zeek's regex engine (Avinal Kumar)
- Removes dependency on <regex.h>
- Replaces regex function with Zeek's standard regex functions
- Some replacements are workaround, may be improved later via an
appropiate API
- Update test baseline to fix what seems to be capturing on a bug in the
existing code.
Edit pass by Robin Sommer. Note that our test doesn't cover all the code
paths, but it does go through the one with the most substantial change.
* Adding test for BitTorrent tracker. (Robin Sommer, Corelight)
Our test trace is extracted from https://www.cloudshark.org/captures/b9089aac6eee.
There actually seems to be a bug in the existing code: the URI passed to
bt_tracker_request() includes a partial HTTP version. This commits
includes the baseline as the current code produces it, we'll fix that in
a subsequent comment.
4.2.0-dev.506 | 2022-01-03 09:33:43 -0800
* Expansion of the emerging cluster controller framework (Christian Kreibich, Corelight)
- Controller/agent connectivity is now controlled by pushed configurations
- The Request module now supports state timeouts
- Use Result records consistently for responses to the client
- Track successful config deployment in cluster controller
- Add ClusterController::API::notify_agents_ready event
- Make all globals start with a "g_" prefix
- Add missing debug() log function to log module's API
- Add separate utility module for controller and agent
- Additional infrastructure for printing types
- Bump zeek-client to v0.2.0
- Add Github action job for cluster tests
- Tweak Docker image configure invocation to include zeek-client
- Zeekygen documentation pass
4.2.0-dev.477 | 2021-12-14 16:53:57 -0700
* fixes for double-delete and reducing '?' operator with constant alternatives (Vern Paxson, Corelight)
* correct usage info for -u flag; -uu no longer supported (Vern Paxson, Corelight)
4.2.0-dev.468 | 2021-12-14 11:34:47 -0700
* factoring of generating C++ initializations, no semantic changes (Vern Paxson, Corelight)
* restored support for incremental compilation of scripts to C++ (Vern Paxson, Corelight)
* fixes for -O gen-standalone-C++ (Vern Paxson, Corelight)
* new ZEEK_FILE_ONLY and ZEEK_FUNC_ONLY environment variables for debugging script optimization - replaces ZEEK_ONLY (Vern Paxson, Corelight)
* fix for compiling record constructors to C++ (Vern Paxson, Corelight)
* fixes for compiling vector operations to C++ (Vern Paxson, Corelight)
* fixed for profiling missing some profile elements (Vern Paxson, Corelight)
* minor efficiency tweak for ZAM record construction (Vern Paxson, Corelight)
4.2.0-dev.456 | 2021-12-14 09:23:47 -0700
* GH-1860: Add double_to_int() bif (Tim Wojtulewicz, Corelight)
4.2.0-dev.454 | 2021-12-13 09:41:32 -0700
* Check for sets before attempting to check for same Yield types (Tim Wojtulewicz)
* Add early bail-outs to same_type() (Tim Wojtulewicz)
* Fix types for Analyzer::register_for_port(s) to be the same (Tim Wojtulewicz)
* Update cmake submodule across all other submodules (Tim Wojtulewicz, Corelight)
4.2.0-dev.448 | 2021-12-10 15:35:34 -0700
* update btest to no longer use (unsupported) %S formatting, no longer needed (Vern Paxson, Corelight)
* replace --optimize-only with --optimize-funcs and --optimize-files (Vern Paxson, Corelight)
4.2.0-dev.444 | 2021-12-10 13:13:13 -0700
* reintroduction of "-O add-C++" option (Vern Paxson, Corelight)
4.2.0-dev.442 | 2021-12-10 13:12:43 -0700
* fixes for vector operations (Vern Paxson, Corelight)
* flag globals initialized to opaque values as non-compilable (Vern Paxson, Corelight)
* skip type signatures for lambdas (Vern Paxson, Corelight)
* fix for translating filenames beginning with numbers to C++ variable names (Vern Paxson, Corelight)
4.2.0-dev.436 | 2021-12-10 13:11:36 -0700
* update script-to-C++ compilation for new record constructor internals (Vern Paxson, Corelight)
4.2.0-dev.434 | 2021-12-10 13:11:10 -0700
* updates to ZAM to track recent changes in script semantics (Vern Paxson, Corelight)
4.2.0-dev.432 | 2021-12-10 09:28:23 -0700
* GH-1741: Print error if calling a non-hook with hook keyword (Tim Wojtulewicz, Corelight)
* GH-1740: Report a better error message if table key is not a list (Tim Wojtulewicz, Corelight)
4.2.0-dev.428 | 2021-12-09 14:58:53 -0700
* GH-1125: Support GRE ARUBA headers (Tim Wojtulewicz, Corelight)
* Fix ethertype for ARP in Geneve forwarding rules (Tim Wojtulewicz, Corelight)
4.2.0-dev.425 | 2021-12-09 13:45:17 -0800
* Add LogAscii::json_include_unset_fields flag to control unset field rendering (Christian Kreibich, Corelight)
4.2.0-dev.423 | 2021-12-09 19:56:43 +0000
* Improve error message for clash between variable and function name (Johanna Amann, Corelight)
Fixes GH-1832
* Restore --disable-zeekctl configure argument (Tim Wojtulewicz, Corelight)
* Update plugin.hooks baseline for recent Geneve change (Tim Wojtulewicz, Corelight)
4.2.0-dev.419 | 2021-12-07 09:34:45 -0700
* GH-1764: Update mappings for Geneve analyzer to IP4/IP6/ARP (Tim Wojtulewicz, Corelight)
4.2.0-dev.417 | 2021-12-06 17:00:16 -0800
* Flip C++ unit tests to being enabled by default (Christian Kreibich, Corelight)
To disable them, configure with --disable-cpp-tests.
* Support for unit tests in plugins (Christian Kreibich, Corelight)
4.2.0-dev.410 | 2021-12-06 11:29:32 -0700
* Remove separate Tag types, note breaking change in NEWS (Tim Wojtulewicz, Corelight)
4.2.0-dev.408 | 2021-12-06 09:15:24 -0700
* GH-1768: Properly cleanup existing log stream when recreated on with the same ID (Tim Wojtulewicz, Corelight)
4.2.0-dev.406 | 2021-12-01 10:32:34 -0700
* Format Python scripts with yapf. (Benjamin Bannier, Corelight)
We also add a very basic yapf configuration file. Most of the changes in
this patch were performed automatically, but we broke one overly long
string into multiple components on `src/make_dbg_constants.py`.
* Format shell scripts with shfmt. (Benjamin Bannier, Corelight)
All changes in this patch were performed automatically with `shfmt` with
configuration flags specified in `.pre-commit-config.yaml`.
4.2.0-dev.403 | 2021-12-01 10:25:32 -0700
* fix btest comment to more accurately describe the test (Vern Paxson, Corelight)
* btests for erroneous script conditionals (Vern Paxson, Corelight)
* avoid compiling-to-C++ for functions potentially influenced by conditionals (Vern Paxson, Corelight)
* track the use of conditionals in functions and files (Vern Paxson, Corelight)
* AST profiles track the associated function/body/expression (Vern Paxson, Corelight)
4.2.0-dev.396 | 2021-12-01 09:44:03 -0700
* GH-1873: Deprecate the tag types differently to avoid type clashes (Tim Wojtulewicz, Corelight)
4.2.0-dev.394 | 2021-11-30 11:53:35 -0700
* Fix for the recent patch that allows segment offloaded packets. (Johanna Amann, Corelight)
We recently added support for segment offloaded packets. It turns out
that this can lead to problems in UDP/ICMP based parsers since I missed
correctly also updating the payloadlength there, and using the capture
length instead when segment offloading is enabled.
Credit to OSS-Fuzz for discovery
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=41391
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=41394
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=41395
(Link to details becomes public 30 days after patch release)
4.2.0-dev.393 | 2021-11-29 13:46:59 -0700
* Fix a number of Coverity findings (Tim Wojtulewicz, Corelight)
1466460: Uninitialized field in gtp-analyzer.pac
1462465: Null pointer dereference in CompositeHash::SingleValHash
1462463: Copy/paste error in TCPSessionAdapter::build_syn_packet_val
1462067: Uninitialized fields in Zinst
4.2.0-dev.391 | 2021-11-29 13:44:11 -0700
* suppress unneeded initializations (Vern Paxson, Corelight)
4.2.0-dev.387 | 2021-11-24 13:32:33 -0700
* fixes for constructing and assigning records with fields that are empty vectors (Vern Paxson, Corelight)
4.2.0-dev.385 | 2021-11-23 19:43:48 -0700
* Changes to speed up compilation of Compiled-to-C++ Zeek Scripts (Vern Paxson, Corelight)
* removing unused SubNetType class (Vern Paxson, Corelight)
4.2.0-dev.371 | 2021-11-23 19:41:10 -0700
* Add new tunnel packet analyzers, remove old ones (Tim Wojtulewicz, Corelight)
* Add PacketAnalyzer::register_for_port(s) functions (Tim Wojtulewicz, Corelight)
These allow packet analyzers to register ports as identifiers to forward from
parent analyzers, while also adding those ports to the now-global
Analyzer::ports table at the same time.
* Add analyzer_confirmation and analyzer_violation events (Tim Wojtulewicz, Corelight)
* Add utility function for tunnel analyzers to setup encapsulation (Tim Wojtulewicz, Corelight)
* Store some additional information in the packet during processing (Tim Wojtulewicz, Corelight)
- Session related to the packet
- is_orig information if a UDP header was found
* Minor fix in UDP to avoid duplicating tunnels (Tim Wojtulewicz, Corelight)
* Fix error text in IPTunnel analyzer (Tim Wojtulewicz, Corelight)
* Change Packet::ip_hdr to be a shared_ptr so it can be copied into EncapsulatingConn (Tim Wojtulewicz, Corelight)
* Add method for packet analyzers to register for protocol detection (Tim Wojtulewicz, Corelight)
* Add concept of "parent" tag namespaces (Tim Wojtulewicz, Corelight)
This allows us to create an EnumType that groups all of the analyzer
tag values into a single type, while still having the existing types
that split them up. We can then use this for certain events that benefit
from taking all of the tag types at once.
* Unify plugin::Component and plugin::TaggedComponent into a single class (Tim Wojtulewicz, Corelight)
These two are almost always used in conjunction with each other, and
TaggedComponent is never used by itself. Combining them together into
a single class will help simplify some of the code around managing
the mapping between Tags and Components.
* Remove uses of deprecated Tag types (Tim Wojtulewicz, Corelight)
* Unify all of the Tag types into one type (Tim Wojtulewicz, Corelight)
- Remove tag types for each component type (analyzer, etc)
- Add deprecated versions of the old types
- Remove unnecessary tag element from templates for TaggedComponent and ComponentManager
- Enable TaggedComponent to pass an EnumType when initializing Tag objects
- Update some tests that are affected by the tag enum values changing order
4.2.0-dev.350 | 2021-11-23 15:35:06 +0000
* Add testcase for TCP segment offloading (GH-1829). (Johanna Amann, Corelight)
4.2.0-dev.348 | 2021-11-23 13:45:39 +0000
* OpenSSL 3 compatibility (Johanna Amann, Corelight)
Zeek is now compatible with OpenSSL 3.0, our test baselines pass cleanly, and
we have a CI run for OpenSSL 3.0. This has a certain amount of new code for
X.509 certificate parsing. Apart from that, the main chainge is that we
use an older, legacy, API for OpaqueVal hashing, since the newer API
does not allow us to serialize data anymore. For details see ticket 1379.
4.2.0-dev.340 | 2021-11-23 10:10:13 +0000
* Accept packets that use tcp segment offloading. (Johanna Amann, Corelight)
When checksum offloading is enabled, we now forward packets that
have 0 header lengths set - and assume that they have TSO enabled.
If checksum offloading is not enabled, we drop the packets (GH-1829)
* Updates to NEWS to cover recent additions. [nomail] [skip ci] (Christian Kreibich, Corelight)
* Update doc and auxil/zeek-aux submodules [nomail] [skip ci] (Christian Kreibich, Corelight)
* Update cmake and aux/zeek-aux submodules [nomail] [skip ci] (Christian Kreibich, Corelight)
4.2.0-dev.333 | 2021-11-17 11:57:04 -0800
* Clean up fully after successful Docker btests (Christian Kreibich, Corelight)
4.2.0-dev.331 | 2021-11-15 10:10:52 -0800
* Fix ref-naming typo in the Github Docker workflow (Christian Kreibich, Corelight)
4.2.0-dev.328 | 2021-11-12 13:46:32 -0700
* Update libkqueue submodule (Tim Wojtulewicz, Corelight)
4.2.0-dev.326 | 2021-11-12 09:30:54 -0700
* Added plugin.unprocessed_packet_hook btest (Tim Wojtulewicz, Corelight)
* Fix whitespace in help output (Tim Wojtulewicz, Corelight)
* Add command-line option to write unprocessed packets to a file (Tim Wojtulewicz, Corelight)
This commit also changes the PcapDumper to automatically flush after
every called to Dump(). This is because pcap_dump has an internal buffer
of some sort that only writes to the file after a set amount of bytes.
When using the new option on a low-traffic network, it might be a while
before you see any packets written since it has to overcome that buffer
limit first.
* GH-1620: Add event and plugin hook to track packets not processed (Tim Wojtulewicz, Corelight)
4.2.0-dev.319 | 2021-11-10 10:20:01 -0700
* Install include headers from `src/3rdparty/`. (Benjamin Bannier, Corelight)
This is a fixup commit for 72cbc7cd13b7c1bda98658104431c3b530ff68d6
where we move some header files from `src/` to `src/3rdparty/` but
missed adding install rules for these header. Since some of these
headers are exposed in installed headers they need to be installed as
well.
4.2.0-dev.317 | 2021-11-10 11:33:29 +0000
* Add case-insensitive search for find_str and rfind_str (Abdel)
4.2.0-dev.314 | 2021-11-10 11:16:28 +0100
* GH-1757: Add new hook `HookLoadFileExtended` that allows plugins
to supply Zeek script and signature code to parse. (Robin Sommer)
The new hook works similar to the existing `HookLoadFile` but,
additionally, allows the plugin to return a string that contains
the code to be used for the file being loaded. If the plugin does
so, the content of any actual file on disk will be ignored. This
works for both Zeek scripts and signatures.
* Fix an issue where signature files supplied on the command line
wouldn't pass through the file loading hooks. (Robin Sommer,
Corelight)
4.2.0-dev.310 | 2021-11-09 10:29:59 -0700
* Add Github action exercising pre-commit (Benjamin Bannier, Corelight)
This patch adds a Github action which exercises pre-commit linters for
commits to the `master` branch or for pull requests. We adds this task
as a Github action since we expect it to finish quickly; running outside
of Cirrus makes it possible provide feedback quickly.
* Add pre-commit config. (Benjamin Bannier, Corelight)
This patch adds `clang-format` as only linter for now. This replaces the
previously used script from `auxil/run-clang-format` which we remove.
This requires the Python program `pre-commit`
(https://pypi.org/project/pre-commit/). With that one can then run
`clang-format` on the whole codebase with
$ pre-commit run -a clang-format
or on just the staged files
# Explicitly selecting linter.
$ pre-commit run clang-format
# Run all linters (currently just `clang-format`).
$ pre-commit
`pre-commit` supports managing Git commit hooks so that linters are run
on commit. Linters can be installed with
$ pre-commit install
The documentation at https://pre-commit.com/ covers these topics in
addition to more information.
* Format code with `clang-format` (Benjamin Bannier, Corelight)
This patch formats files not conforming to the C++ formatting with
`clang-format`.
* Remove stale files `src/DebugCmdInfoConstants.*` (Benjamin Bannier, Corelight)
The files generated from `src/DebugCmdInfoConstants.in` are placed in
`build/src/` by the build setup, and generated file in `src/` removed
here were unused and possibly out-of-date.
* Disable formatting for files in `testing/btest/plugins` (Benjamin Bannier, Corelight)
Files in that folder were previously not formatted. With this patch we
now disable formatting in that folder explicitly by adding a dedicated
`clang-format` config which deactivates any formatting changes.
* Move 3rdparty source files to `3rdparty/` (Benjamin Bannier, Corelight)
This patch moves in-tree 3rdparty source files to `3rdparty/`. With that
we can remove special treatment of these files for `run-clang-format`.
4.2.0-dev.303 | 2021-11-09 09:45:57 -0700
* GH-1819: Handle recursive types when describing type in binary mode (Tim Wojtulewicz, Corelight)
4.2.0-dev.301 | 2021-11-09 09:28:18 -0700
* Remove no-op false-teredo test (Tim Wojtulewicz, Corelight)
4.2.0-dev.297 | 2021-11-05 12:49:55 -0700
* Only push CI's Docker images when we're on the main repo (Christian Kreibich, Corelight)
* Add macOS Monterey and drop Catalina in CI (Christian Kreibich, Corelight)
* Add Fedora 35 to CI (Christian Kreibich, Corelight)
4.2.0-dev.292 | 2021-11-04 14:28:35 -0700
* Fix C++ set intersection code (Yacin Nadji, Corelight)
4.2.0-dev.286 | 2021-11-03 09:36:41 -0700
* GH-693: use pcap_dump_open_append where supported (Tim Wojtulewicz, Corelight)
4.2.0-dev.284 | 2021-11-03 09:35:10 -0700
* GH-1781: Add .git-blame-ignore-revs file (Tim Wojtulewicz, Corelight)
4.2.0-dev.280 | 2021-11-01 09:20:16 -0700
* Fix issue with broken libpcaps that return repeat packets (Tim Wojtulewicz, Corelight)
This is apparently a problem with the Myricom version of libpcap, where
instead of returning a null or a zero if no packets are available, it
returns the previous packet. This causes Zeek to improperly parse the
packet and crash. We thought we had fixed this previously with a check
for a null packet but that fix was not enough.
4.2.0-dev.277 | 2021-10-21 17:23:46 -0700
* Apply some missing clang-format changes (Tim Wojtulewicz, Corelight)
4.2.0-dev.274 | 2021-10-20 11:13:16 -0700
* Remove trailing whitespace from script files (Tim Wojtulewicz, Corelight)
4.2.0-dev.271 | 2021-10-19 14:54:56 +0200
* Add parsing of DNS SVCB/HTTPS records (FlyingWithJerome)
4.2.0-dev.260 | 2021-10-15 09:45:45 +0100
* logging/writers/ascii: shadow files: Add fsync() before rename(). This
prevents potential problems with leftover files after unclean shutdowns.
(Arne Welzel, Corelight)
* Fix typo in typedef changes that broke tests on 32-bit Debian 9 (Tim Wojtulewicz, Corelight)
4.2.0-dev.255 | 2021-10-12 09:22:37 -0700 4.2.0-dev.255 | 2021-10-12 09:22:37 -0700
* Replace most uses of typedef with using for type aliasing (Tim Wojtulewicz, Corelight) * Replace most uses of typedef with using for type aliasing (Tim Wojtulewicz, Corelight)

View file

@ -490,6 +490,9 @@ include(FindKqueue)
if ( (OPENSSL_VERSION VERSION_EQUAL "1.1.0") OR (OPENSSL_VERSION VERSION_GREATER "1.1.0") ) if ( (OPENSSL_VERSION VERSION_EQUAL "1.1.0") OR (OPENSSL_VERSION VERSION_GREATER "1.1.0") )
set(ZEEK_HAVE_OPENSSL_1_1 true CACHE INTERNAL "" FORCE) set(ZEEK_HAVE_OPENSSL_1_1 true CACHE INTERNAL "" FORCE)
endif() endif()
if ( (OPENSSL_VERSION VERSION_EQUAL "3.0.0") OR (OPENSSL_VERSION VERSION_GREATER "3.0.0") )
set(ZEEK_HAVE_OPENSSL_3_0 true CACHE INTERNAL "" FORCE)
endif()
# Tell the plugin code that we're building as part of the main tree. # Tell the plugin code that we're building as part of the main tree.
set(ZEEK_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE) set(ZEEK_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)

View file

@ -250,7 +250,7 @@ PROJECT (https://github.com/zeek) UNDER BSD LICENCE.
============================================================================== ==============================================================================
%%% in_cksum.cc %%% 3rdparty/in_cksum.cc
============================================================================== ==============================================================================
@ -283,7 +283,7 @@ SUCH DAMAGE.
============================================================================== ==============================================================================
%%% Patricia.c %%% 3rdparty/patricia.c
============================================================================== ==============================================================================
@ -328,7 +328,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
============================================================================== ==============================================================================
%%% strsep.c %%% 3rdparty/strsep.c
============================================================================== ==============================================================================
@ -365,7 +365,7 @@ SUCH DAMAGE.
============================================================================== ==============================================================================
%%% ConvertUTF.c %%% 3rdparty/ConvertUTF.c
============================================================================== ==============================================================================
@ -479,7 +479,7 @@ SUCH DAMAGE.
============================================================================== ==============================================================================
%%% bsd-getopt-long.c %%% 3rdparty/bsd-getopt-long.c
============================================================================== ==============================================================================
@ -555,7 +555,7 @@ limitations under the License.
============================================================================== ==============================================================================
%%% bro_inet_ntop.c %%% 3rdparty/bro_inet_ntop.c
============================================================================== ==============================================================================
@ -578,7 +578,7 @@ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
============================================================================== ==============================================================================
%%% modp_numtoa.h %%% 3rdparty/modp_numtoa.h
============================================================================== ==============================================================================

163
NEWS
View file

@ -6,6 +6,17 @@ release. For an exhaustive list of changes, see the ``CHANGES`` file
Zeek 4.2.0 Zeek 4.2.0
========== ==========
Breaking Changes
----------------
- The existing ``Tag`` types in C++ (``zeek::Analyzer::Tag``, etc) have been
merged into a single type called ``zeek::Tag``. This is a breaking change, and
may result in plugins failing to build where they were relying on those types
being different for function overloading and such. We attempted to include
deprecated versions of the old types, but were unable to do so because of
changes to return types from a number of methods. With this change, any uses
of the `zeek::*::Tag` types will need to be replaced by `zeek::Tag`.
New Functionality New Functionality
----------------- -----------------
@ -22,19 +33,89 @@ New Functionality
example to build a Zeek plugin. You can add any required system packages in a example to build a Zeek plugin. You can add any required system packages in a
derived image, or install them directly in the running container. derived image, or install them directly in the running container.
- Zeek now supports formatting the C++ code using clang-format. It requires at - Zeek now supports formatting the C++ code using clang-format. Also provided is
least clang-format 12.0.1 due to some additions that were made in that version a configuration for ``pre-commit`` to run clang-format when add new commits via
to better support the Whitesmiths style. Zeek also includes a set of python ``git``. More details can be found at https://github.com/zeek/zeek/wiki/Coding-Style-and-Conventions#clang-format.
scripts to more easily reformat in the auxil/run-clang-format directory. An
example command to reformat the code:
`python3 auxil/run-clang-format/run-clang-format.py --clang-format-executable `which clang-format-12` -r src -i`
- Experimental support for speeding up Zeek script execution by compiling - Experimental support for speeding up Zeek script execution by compiling
scripts to a low-level form called "ZAM". You activate this feature by scripts to a low-level form called "ZAM". You activate this feature by
specifying ``-O ZAM`` on the command line. See specifying ``-O ZAM`` on the command line. See
``src/script_opt/ZAM/README.md`` for more information. ``src/script_opt/ZAM/README.md`` for more information.
- Improvements for compiling scripts to C++ (an experimental optimization
feature introduced in 4.1). The generated C++ now compiles much faster than
previously, though it can still take quite a while when using C++ optimization
on large sets of scripts. You can incrementally compile additional scripts
using ``-O add-C++``. See ``src/script_opt/CPP/README.md`` for details.
- The new flags --optimize-files=/pat/ and --optimize-funcs=/pat/ apply
to both ZAM and compile-to-C++ script optimization. The first instructs
Zeek to optimize any functions/hooks/event handlers residing in files
matching the given pattern (unanchored). The second does the same but
based on the function name, and with the pattern anchored (so for example
--optimize-funcs=foo will optimize any functions named "foo" but not
those named "foobar", or "MYSCOPE::foo"). The flags can be combined
and can also be used multiple times to specify a set of patterns.
If neither flag is used then optimization is applied to all loaded
scripts; if used, then only to those that match.
- The ``-uu`` flag for analyzing potentially unused record fields has been
removed because, due to other changes in script optimization, keeping it
would now require about 1,800 lines of code not otherwise needed.
- The DNS analyzer has initial support for the SVCB and HTTPS types. The new
events are ``dns_SVCB`` and ``dns_HTTPS``.
- The ``find_str`` and ``rfind_str`` bifs now support case-insensitive searches.
- Added a new plugin hook for capturing packets that made it through analysis
without being processed called ``Plugin::HookUnprocessedPacket``. Currently
ARP packets or packets with a valid IP-based transport header are marked as
processed. This also adds an event called ``packet_not_processed`` that
reports the same packets.
- A new command-line option ``-c`` or ``--capture-unprocessed`` will dump any
packets not marked as being processed, similar to the new hook and event
above.
- In Zeek plugins, the new cmake function ``zeek_plugin_scripts()`` should be
used alongside ``zeek_plugin_cc()`` and related functions to establish
dependency tracking between Zeek scripts shipped with the plugin and plugin
rebuilds. Previously, updates to included Zeek scripts didn't reliably
trigger a rebuild.
- Added PacketAnalyzer::register_for_port(s) functions to the packet analyzer
framework in script-land. This allows a packet analyzer to register a port
mapping with a parent analyzer just like any other numeric identifier, while
also adding that port to the now-global Analyzer::ports table used by BPF
filtering.
- Added AllAnalyzers::Tag enum type that combines the existing Analyzer::Tag,
PacketAnalyzer::Tag, and Files::Tags into a single enum. The existing types
still exist, but the new type can be used as an argument for
functions/hooks/events that need to handle any of the analyzer types.
- Added protocol detection functionality to the packet analyzer framework.
Packet analyzers can register for protocol detection using the
``PacketAnalyzer::register_protocol_detection`` script function and implement
the ``PacketAnalyzer::DetectProtocol`` method in C++. This allows packet
analyzer plugins to detect a protocol via byte matching or other heuristics
instead of relying solely on a numeric identifier for forwarding.
- The JSON logger's new LogAscii::json_include_unset_fields flag provides
control over how to handle unset "&optional" fields. By default it continues
to skip such fields entirely. When redef'ing the flag to T it includes such
fields, with a "null" value. This simplifies data import use cases that
require fields to be present at all times, regardless of their value.
- A new external testsuite, https://github.com/zeek/zeek-testing-cluster,
focuses on testing the emerging controller framework. It leverages the new
official Zeek Docker image for building docker-compose test setups, driven via
btest. The Github CI setup now includes a workflow that deploys and runs this
testsuite.
- The GRE analyzer now supports the Aruba WLAN protocol type.
Changed Functionality Changed Functionality
--------------------- ---------------------
@ -43,19 +124,75 @@ Changed Functionality
to serialize, meaning that you can now also index with sets, vectors, to serialize, meaning that you can now also index with sets, vectors,
patterns, and even tables. patterns, and even tables.
- The traditional TSV Zeek logs are now valid UTF8 by default. It's possible - The traditional TSV Zeek logs are now valid UTF8 by default. It's possible to
to revert to the previous behavior by setting ``LogAscii::enable_utf_8`` to revert to the previous behavior by setting ``LogAscii::enable_utf_8`` to
false. false.
- The ``SYN_packet`` record now records TCP timestamps (TSval/TSecr) - The ``SYN_packet`` record now records TCP timestamps (TSval/TSecr) when
when available. available.
Removed Functionality - The ``init-plugin`` script now focuses purely on dynamic Zeek plugins. It no
--------------------- longer generates Zeek packages. To instantiate new Zeek packages, use the
``zkg create`` command instead.
- The ``ignore_checksums`` options and the ``-C`` command-line option now
additionally cause Zeek to accept IPv4 packets that provide a length of zero
in the total-length IPv4 header field. When the length is set to zero, the
capture length of the packet is used instead. This can be used to replay
traces, or analyze traffic when TCP sequence offloading is enabled on the
local NIC - which typically causes the total-length of affected packets to be
set to zero.
- The existing tunnel analyzers for AYIYA, Geneve, GTPv1, Teredo, and VXLAN are
now packet analyzers.
- C++ unit tests are now compiled in by default and can be disabled by
configuring the build with --disable-cpp-tests. We removed the former
--enable-cpp-tests configure flag. Unit tests now also work in (static and
dynamic) Zeek plugins.
- This release expands the emerging cluster controller framework. Most changes
concern internals of the framework. Agent/controller connectivity management
has become more flexible: configuration updates pushed by the client can now
convey the agent topology, removing the need to hardwire/redef settings
in the controller. The new ClusterController::API::notify_agents_ready event
declares the management infrastructure ready for use. zeek-client's CLI has
expanded to support the new functionality.
The framework is still experimental and provides only a small subset of
ZeekControl's functionality. ZeekControl remains the recommended tool for
maintaining your cluster.
Deprecated Functionality Deprecated Functionality
------------------------ ------------------------
- The ``protocol_confirmation`` and ``protocol_violation`` events along with the
corresponding ``Analyzer::ProtocolConfirmation` and
``Analyzer::ProtocolViolation`` C++ methods are marked as deprecated. They are
replaced by ``analyzer_confirmation`` and ``analyzer_violation`` which can
also now be implemented in packet analyzers.
- Declaring a local variable in an inner scope and then accessing it in an
outer scope is now deprecated. For example,
if ( foo() )
{
local a = 5;
...
}
print a;
is deprecated. You can address the issue by hoisting the declaration
to the outer scope, such as:
local a: count;
if ( foo() )
{
a = 5;
...
}
print a;
Zeek 4.1.0 Zeek 4.1.0
========== ==========

View file

@ -1 +1 @@
4.2.0-dev.255 5.0.0-dev.2

@ -1 +1 @@
Subproject commit 6bd264351813eedb92753d2d4ed76ac6ddc076b3 Subproject commit a7d9233b37daac558314625566bb8c8a993f2904

@ -1 +1 @@
Subproject commit 8169f1630993b34189b2c221d0e5ab8ba9777967 Subproject commit 8b1322d3060a1fecdc586693e6215ad7ef8ab0e9

@ -1 +1 @@
Subproject commit 47cac80cbe1e1bde8e3b425903e50d62715972a2 Subproject commit d9e84400534b968e33ab01cfadfb569c0d7b2929

@ -1 +1 @@
Subproject commit 0a37819d484358999a47e76ac473da74799ab08d Subproject commit 5f954ec65cb78b17f7156455c8c3c905a816ae96

@ -1 +1 @@
Subproject commit 6c1717dea2dc34a91d32e07d2cae34b1afa0a84e Subproject commit aeaeed21198d6f41d0cf70bda63fe0f424922ac5

@ -1 +1 @@
Subproject commit dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1 Subproject commit fd3dc29a5c2852df569e1ea81dbde2c412ac5051

@ -1 +0,0 @@
Subproject commit 39081c9c42768ab5e8321127a7494ad1647c6a2f

@ -1 +1 @@
Subproject commit f3a1e8fe464c0425688eff67e30f35c678914ad2 Subproject commit 479e8a85fd58936c16d361dbf3de4e7268d751f8

@ -1 +1 @@
Subproject commit 296383d577a3f089c4f491061a985293cf6736e6 Subproject commit 12be5e3e51a4a97ab3aa0fa4a02da194a83c7f24

@ -1 +1 @@
Subproject commit afe253c77591e87b2a6cf6d5682cd02caa78e9d2 Subproject commit 553d897734b6d9abbc2e4467fae89f68a2c7315d

@ -1 +1 @@
Subproject commit d31885671d74932d951778c029fa74d44cf3e542 Subproject commit 95b048298a77bb14d2c54dcca8bb549c86eb96b9

View file

@ -1,6 +1,6 @@
#! /usr/bin/env bash #! /usr/bin/env bash
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
. ${SCRIPT_DIR}/common.sh . ${SCRIPT_DIR}/common.sh
set -e set -e
@ -10,6 +10,11 @@ set -x
# some problems with Catalina specifically, but it doesn't break anything on Big Sur either. # some problems with Catalina specifically, but it doesn't break anything on Big Sur either.
if [[ "${CIRRUS_OS}" == "darwin" ]]; then if [[ "${CIRRUS_OS}" == "darwin" ]]; then
export ZEEK_CI_CONFIGURE_FLAGS="${ZEEK_CI_CONFIGURE_FLAGS} --osx-sysroot=$(xcrun --show-sdk-path)" export ZEEK_CI_CONFIGURE_FLAGS="${ZEEK_CI_CONFIGURE_FLAGS} --osx-sysroot=$(xcrun --show-sdk-path)"
# Starting with Monterey & Xcode 13.1 we need to help it find OpenSSL
if [ -d /usr/local/opt/openssl@1.1/lib/pkgconfig ]; then
export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/opt/openssl@1.1/lib/pkgconfig
fi
fi fi
if [[ "${ZEEK_CI_CREATE_ARTIFACT}" != "1" ]]; then if [[ "${ZEEK_CI_CREATE_ARTIFACT}" != "1" ]]; then

View file

@ -3,10 +3,10 @@
# On Cirrus, oversubscribe the CPUs when on Linux. This uses Cirrus' "greedy" feature. # On Cirrus, oversubscribe the CPUs when on Linux. This uses Cirrus' "greedy" feature.
if [[ "${CIRRUS_OS}" == linux ]]; then if [[ "${CIRRUS_OS}" == linux ]]; then
if [[ -n "${ZEEK_CI_CPUS}" ]]; then if [[ -n "${ZEEK_CI_CPUS}" ]]; then
ZEEK_CI_CPUS=$(( 2 * ${ZEEK_CI_CPUS} )) ZEEK_CI_CPUS=$((2 * ${ZEEK_CI_CPUS}))
fi fi
if [[ -n "${ZEEK_CI_BTEST_JOBS}" ]]; then if [[ -n "${ZEEK_CI_BTEST_JOBS}" ]]; then
ZEEK_CI_BTEST_JOBS=$(( 2 * ${ZEEK_CI_BTEST_JOBS} )) ZEEK_CI_BTEST_JOBS=$((2 * ${ZEEK_CI_BTEST_JOBS}))
fi fi
fi fi

23
ci/fedora-35/Dockerfile Normal file
View file

@ -0,0 +1,23 @@
FROM fedora:35
RUN dnf -y install \
bison \
cmake \
diffutils \
findutils \
flex \
git \
gcc \
gcc-c++ \
libpcap-devel \
make \
openssl-devel \
python3-devel \
python3-pip\
sqlite \
swig \
which \
zlib-devel \
&& dnf clean all && rm -rf /var/cache/dnf
RUN pip3 install junit2html

View file

@ -8,6 +8,6 @@ set -x
env ASSUME_ALWAYS_YES=YES pkg bootstrap env ASSUME_ALWAYS_YES=YES pkg bootstrap
pkg install -y bash git cmake swig bison python3 base64 pkg install -y bash git cmake swig bison python3 base64
pkg upgrade -y curl pkg upgrade -y curl
pyver=`python3 -c 'import sys; print(f"py{sys.version_info[0]}{sys.version_info[1]}")'` pyver=$(python3 -c 'import sys; print(f"py{sys.version_info[0]}{sys.version_info[1]}")')
pkg install -y $pyver-sqlite3 $pyver-pip pkg install -y $pyver-sqlite3 $pyver-pip
pip install junit2html pip install junit2html

View file

@ -1,13 +1,12 @@
#! /usr/bin/env bash #! /usr/bin/env bash
function banner function banner {
{
local msg="${1}" local msg="${1}"
printf "+--------------------------------------------------------------+\n" printf "+--------------------------------------------------------------+\n"
printf "| %-60s |\n" "$(date)" printf "| %-60s |\n" "$(date)"
printf "| %-60s |\n" "${msg}" printf "| %-60s |\n" "${msg}"
printf "+--------------------------------------------------------------+\n" printf "+--------------------------------------------------------------+\n"
} }
set -e set -e
@ -52,8 +51,8 @@ if [[ -n "${CIRRUS_CI}" ]] && [[ "${CIRRUS_REPO_OWNER}" == "zeek" ]] && [[ ! -d
fi fi
banner "Trying to clone zeek-testing-private git repo" banner "Trying to clone zeek-testing-private git repo"
echo "${ZEEK_TESTING_PRIVATE_SSH_KEY}" > cirrus_key.b64 echo "${ZEEK_TESTING_PRIVATE_SSH_KEY}" >cirrus_key.b64
base64 -d cirrus_key.b64 > cirrus_key base64 -d cirrus_key.b64 >cirrus_key
rm cirrus_key.b64 rm cirrus_key.b64
chmod 600 cirrus_key chmod 600 cirrus_key
git --version git --version

36
ci/openssl-3.0/Dockerfile Normal file
View file

@ -0,0 +1,36 @@
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
RUN apt-get update && apt-get -y install \
git \
cmake \
make \
gcc \
g++ \
flex \
bison \
libpcap-dev \
libssl-dev \
python3 \
python3-dev \
python3-pip\
swig \
zlib1g-dev \
libmaxminddb-dev \
libkrb5-dev \
bsdmainutils \
sqlite3 \
curl \
wget \
unzip \
ruby \
bc \
lcov \
&& rm -rf /var/lib/apt/lists/*
# Note - the symlink is important, otherwise cmake uses the wrong .so files.
RUN wget https://www.openssl.org/source/openssl-3.0.0.tar.gz && tar xvf ./openssl-3.0.0.tar.gz && cd ./openssl-3.0.0 && ./Configure --prefix=/opt/openssl && make install && cd .. && rm -rf openssl-3.0.0 && ln -sf /opt/openssl/lib64 /opt/openssl/lib
RUN pip3 install junit2html
RUN gem install coveralls-lcov

View file

@ -1,65 +0,0 @@
#! /bin/sh
#
# Copyright (c) 2020 by the Zeek Project. See LICENSE for details.
base=$(git rev-parse --show-toplevel)
fix=0
pre_commit_hook=0
# Directories to run on by default. When changing, adapt .pre-commit-config.yam
# as well.
files="src"
error() {
test "${pre_commit_hook}" = 0 && echo "$@" >&2 && exit 1
exit 0
}
if [ $# != 0 ]; then
case "$1" in
--fixit)
shift
fix=1
;;
--pre-commit-hook)
shift
fix=1
pre_commit_hook=1
;;
-*)
echo "usage: $(basename $0) [--fixit | --pre-commit-hook] [<files>]"
exit 1
esac
fi
test $# != 0 && files="$@"
if [ -z "${CLANG_FORMAT}" ]; then
CLANG_FORMAT=$(which clang-format 2>/dev/null)
fi
if [ -z "${CLANG_FORMAT}" -o ! -x "${CLANG_FORMAT}" ]; then
error "Cannot find clang-format. If not in PATH, set CLANG_FORMAT."
fi
if ! (cd / && ${CLANG_FORMAT} -dump-config | grep -q SpacesInConditionalStatement); then
error "${CLANG_FORMAT} does not support SpacesInConditionalStatement. Install custom version and put it into PATH, or point CLANG_FORMAT to it."
fi
if [ ! -e .clang-format ]; then
error "Must execute in top-level directory."
fi
cmd="${base}/auxil/run-clang-format/run-clang-format.py -r --clang-format-executable ${CLANG_FORMAT} --exclude '*/3rdparty/*' ${files}"
tmp=/tmp/$(basename $0).$$.tmp
trap "rm -f ${tmp}" EXIT
eval "${cmd}" >"${tmp}"
if [ "${fix}" = 1 ]; then
test -s "${tmp}" && cat "${tmp}" | git apply -p0
true
else
cat "${tmp}"
fi

View file

@ -17,8 +17,8 @@ for fuzzer_path in ${fuzzers}; do
if [[ -e ${corpus} ]]; then if [[ -e ${corpus} ]]; then
echo "Fuzzer: ${fuzzer_exe} ${corpus}" echo "Fuzzer: ${fuzzer_exe} ${corpus}"
( rm -rf corpus && mkdir corpus ) || result=1 (rm -rf corpus && mkdir corpus) || result=1
( cd corpus && unzip ../${corpus} >/dev/null ) || result=1 (cd corpus && unzip ../${corpus} >/dev/null) || result=1
${fuzzer_path} corpus/* >${fuzzer_exe}.out 2>${fuzzer_exe}.err ${fuzzer_path} corpus/* >${fuzzer_exe}.out 2>${fuzzer_exe}.err
if [[ $? -eq 0 ]]; then if [[ $? -eq 0 ]]; then
@ -36,5 +36,4 @@ for fuzzer_path in ${fuzzers}; do
echo "-----------------------------------------" echo "-----------------------------------------"
done done
exit ${result} exit ${result}

View file

@ -16,47 +16,41 @@ if [[ -z "${CIRRUS_CI}" ]]; then
ZEEK_CI_BTEST_RETRIES=2 ZEEK_CI_BTEST_RETRIES=2
fi fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
. ${SCRIPT_DIR}/common.sh . ${SCRIPT_DIR}/common.sh
function pushd function pushd {
{ command pushd "$@" >/dev/null || exit 1
command pushd "$@" > /dev/null || exit 1 }
}
function popd function popd {
{ command popd "$@" >/dev/null || exit 1
command popd "$@" > /dev/null || exit 1 }
}
function banner function banner {
{
local msg="${1}" local msg="${1}"
printf "+--------------------------------------------------------------+\n" printf "+--------------------------------------------------------------+\n"
printf "| %-60s |\n" "$(date)" printf "| %-60s |\n" "$(date)"
printf "| %-60s |\n" "${msg}" printf "| %-60s |\n" "${msg}"
printf "+--------------------------------------------------------------+\n" printf "+--------------------------------------------------------------+\n"
} }
function run_unit_tests function run_unit_tests {
{
banner "Running unit tests" banner "Running unit tests"
pushd build pushd build
( . ./zeek-path-dev.sh && zeek --test ) || result=1 (. ./zeek-path-dev.sh && zeek --test) || result=1
popd popd
return 0 return 0
} }
function prep_artifacts function prep_artifacts {
{
banner "Prepare artifacts" banner "Prepare artifacts"
[[ -d .tmp ]] && rm -rf .tmp/script-coverage && tar -czf tmp.tar.gz .tmp [[ -d .tmp ]] && rm -rf .tmp/script-coverage && tar -czf tmp.tar.gz .tmp
junit2html btest-results.xml btest-results.html junit2html btest-results.xml btest-results.html
} }
function run_btests function run_btests {
{
banner "Running baseline tests: zeek" banner "Running baseline tests: zeek"
pushd testing/btest pushd testing/btest
@ -73,10 +67,9 @@ function run_btests
prep_artifacts prep_artifacts
popd popd
return 0 return 0
} }
function run_external_btests function run_external_btests {
{
# Commenting out this line in btest.cfg causes the script profiling/coverage # Commenting out this line in btest.cfg causes the script profiling/coverage
# to be disabled. We do this for the sanitizer build right now because of a # to be disabled. We do this for the sanitizer build right now because of a
# fairly significant performance bug when running tests. # fairly significant performance bug when running tests.
@ -120,7 +113,7 @@ function run_external_btests
else else
banner "Skipping private tests (not available for PRs)" banner "Skipping private tests (not available for PRs)"
fi fi
} }
banner "Start tests: ${ZEEK_CI_CPUS} cpus, ${ZEEK_CI_BTEST_JOBS} btest jobs" banner "Start tests: ${ZEEK_CI_CPUS} cpus, ${ZEEK_CI_BTEST_JOBS} btest jobs"

View file

@ -1,15 +1,15 @@
#! /usr/bin/env bash #! /usr/bin/env bash
unset ZEEK_DISABLE_ZEEKYGEN; unset ZEEK_DISABLE_ZEEKYGEN
# If running this from btest, unset any of the environment # If running this from btest, unset any of the environment
# variables that alter default script values. # variables that alter default script values.
unset ZEEK_DEFAULT_LISTEN_ADDRESS; unset ZEEK_DEFAULT_LISTEN_ADDRESS
unset ZEEK_DEFAULT_LISTEN_RETRY; unset ZEEK_DEFAULT_LISTEN_RETRY
unset ZEEK_DEFAULT_CONNECT_RETRY; unset ZEEK_DEFAULT_CONNECT_RETRY
dir="$( cd "$( dirname "$0" )" && pwd )" dir="$(cd "$(dirname "$0")" && pwd)"
source_dir="$( cd $dir/.. && pwd )" source_dir="$(cd $dir/.. && pwd)"
build_dir=$source_dir/build build_dir=$source_dir/build
conf_file=$build_dir/zeekygen-test.conf conf_file=$build_dir/zeekygen-test.conf
output_dir=$source_dir/doc output_dir=$source_dir/doc
@ -21,15 +21,14 @@ fi
case $output_dir in case $output_dir in
/*) ;; /*) ;;
*) output_dir=`pwd`/$output_dir ;; *) output_dir=$(pwd)/$output_dir ;;
esac esac
cd $build_dir cd $build_dir
. zeek-path-dev.sh . zeek-path-dev.sh
export ZEEK_SEED_FILE=$source_dir/testing/btest/random.seed export ZEEK_SEED_FILE=$source_dir/testing/btest/random.seed
function run_zeek function run_zeek {
{
ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen >/dev/null 2>$zeek_error_file ZEEK_ALLOW_INIT_ERRORS=1 zeek -X $conf_file zeekygen >/dev/null 2>$zeek_error_file
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
@ -37,23 +36,22 @@ function run_zeek
echo "See stderr in $zeek_error_file" echo "See stderr in $zeek_error_file"
exit 1 exit 1
fi fi
} }
scripts_output_dir=$output_dir/scripts scripts_output_dir=$output_dir/scripts
rm -rf $scripts_output_dir rm -rf $scripts_output_dir
printf "script\t*\t$scripts_output_dir/" > $conf_file printf "script\t*\t$scripts_output_dir/" >$conf_file
echo "Generating $scripts_output_dir/" echo "Generating $scripts_output_dir/"
run_zeek run_zeek
script_ref_dir=$output_dir/script-reference script_ref_dir=$output_dir/script-reference
mkdir -p $script_ref_dir mkdir -p $script_ref_dir
function generate_index function generate_index {
{
echo "Generating $script_ref_dir/$2" echo "Generating $script_ref_dir/$2"
printf "$1\t*\t$script_ref_dir/$2\n" > $conf_file printf "$1\t*\t$script_ref_dir/$2\n" >$conf_file
run_zeek run_zeek
} }
generate_index "script_index" "autogenerated-script-index.rst" generate_index "script_index" "autogenerated-script-index.rst"
generate_index "package_index" "autogenerated-package-index.rst" generate_index "package_index" "autogenerated-package-index.rst"

2
cmake

@ -1 +1 @@
Subproject commit 4d1990f0e4c273cf51ec52278add6ff256f9c889 Subproject commit 12fbc1a3bc206a57b079505e3df938c3a993ba58

243
configure vendored
View file

@ -54,51 +54,51 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
install --home [PATH/lib/python] install --home [PATH/lib/python]
Optional Features: Optional Features:
--enable-debug compile in debugging mode (like --build-type=Debug)
--enable-coverage compile with code coverage support (implies debugging mode) --enable-coverage compile with code coverage support (implies debugging mode)
--enable-debug compile in debugging mode (like --build-type=Debug)
--enable-fuzzers build fuzzer targets --enable-fuzzers build fuzzer targets
--enable-jemalloc link against jemalloc
--enable-mobile-ipv6 analyze mobile IPv6 features defined by RFC 6275 --enable-mobile-ipv6 analyze mobile IPv6 features defined by RFC 6275
--enable-perftools enable use of Google perftools (use tcmalloc) --enable-perftools enable use of Google perftools (use tcmalloc)
--enable-perftools-debug use Google's perftools for debugging --enable-perftools-debug use Google's perftools for debugging
--enable-jemalloc link against jemalloc
--enable-static-broker build Broker statically (ignored if --with-broker is specified)
--enable-static-binpac build binpac statically (ignored if --with-binpac is specified) --enable-static-binpac build binpac statically (ignored if --with-binpac is specified)
--enable-cpp-tests build Zeek's C++ unit tests --enable-static-broker build Broker statically (ignored if --with-broker is specified)
--enable-zeek-client install the Zeek cluster management client (experimental) --enable-zeek-client install the Zeek cluster management client (experimental)
--disable-zeekctl don't install ZeekControl
--disable-auxtools don't build or install auxiliary tools
--disable-archiver don't build or install zeek-archiver tool --disable-archiver don't build or install zeek-archiver tool
--disable-auxtools don't build or install auxiliary tools
--disable-broker-tests don't try to build Broker unit tests
--disable-btest don't install BTest --disable-btest don't install BTest
--disable-btest-pcaps don't install Zeek's BTest input pcaps --disable-btest-pcaps don't install Zeek's BTest input pcaps
--disable-cpp-tests don't build Zeek's C++ unit tests
--disable-python don't try to build python bindings for Broker --disable-python don't try to build python bindings for Broker
--disable-broker-tests don't try to build Broker unit tests --disable-zeekctl don't install ZeekControl
--disable-zkg don't install zkg --disable-zkg don't install zkg
Required Packages in Non-Standard Locations: Required Packages in Non-Standard Locations:
--with-openssl=PATH path to OpenSSL install root
--with-bind=PATH path to BIND install root
--with-pcap=PATH path to libpcap install root
--with-binpac=PATH path to BinPAC executable
(useful for cross-compiling)
--with-bifcl=PATH path to Zeek BIF compiler executable --with-bifcl=PATH path to Zeek BIF compiler executable
(useful for cross-compiling) (useful for cross-compiling)
--with-flex=PATH path to flex executable --with-bind=PATH path to BIND install root
--with-binpac=PATH path to BinPAC executable
(useful for cross-compiling)
--with-bison=PATH path to bison executable --with-bison=PATH path to bison executable
--with-python=PATH path to Python executable
--with-broker=PATH path to Broker install root --with-broker=PATH path to Broker install root
(Zeek uses an embedded version by default) (Zeek uses an embedded version by default)
--with-caf=PATH path to C++ Actor Framework install root --with-caf=PATH path to C++ Actor Framework install root
(a Broker dependency that is embedded by default) (a Broker dependency that is embedded by default)
--with-flex=PATH path to flex executable
--with-libkqueue=PATH path to libkqueue install root --with-libkqueue=PATH path to libkqueue install root
(Zeek uses an embedded version by default) (Zeek uses an embedded version by default)
--with-openssl=PATH path to OpenSSL install root
--with-pcap=PATH path to libpcap install root
--with-python=PATH path to Python executable
Optional Packages in Non-Standard Locations: Optional Packages in Non-Standard Locations:
--with-geoip=PATH path to the libmaxminddb install root --with-geoip=PATH path to the libmaxminddb install root
--with-jemalloc=PATH path to jemalloc install root
--with-krb5=PATH path to krb5 install root --with-krb5=PATH path to krb5 install root
--with-perftools=PATH path to Google Perftools install root --with-perftools=PATH path to Google Perftools install root
--with-jemalloc=PATH path to jemalloc install root
--with-python-lib=PATH path to libpython
--with-python-inc=PATH path to Python headers --with-python-inc=PATH path to Python headers
--with-python-lib=PATH path to libpython
--with-swig=PATH path to SWIG executable --with-swig=PATH path to SWIG executable
Packaging Options (for developers): Packaging Options (for developers):
@ -118,7 +118,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
CXXFLAGS C++ compiler flags CXXFLAGS C++ compiler flags
" "
sourcedir="$( cd "$( dirname "$0" )" && pwd )" sourcedir="$(cd "$(dirname "$0")" && pwd)"
if [ ! -e "$sourcedir/cmake/COPYING" ] && [ -d "$sourcedir/.git" ]; then if [ ! -e "$sourcedir/cmake/COPYING" ] && [ -d "$sourcedir/.git" ]; then
echo "\ echo "\
@ -128,8 +128,8 @@ This typically means that you performed a non-recursive git clone of
Zeek. To check out the required subdirectories, please execute: Zeek. To check out the required subdirectories, please execute:
( cd $sourcedir && git submodule update --recursive --init ) ( cd $sourcedir && git submodule update --recursive --init )
" >&2; " >&2
exit 1; exit 1
fi fi
# Function to append a CMake cache entry definition to the # Function to append a CMake cache entry definition to the
@ -137,14 +137,14 @@ fi
# $1 is the cache entry variable name # $1 is the cache entry variable name
# $2 is the cache entry variable type # $2 is the cache entry variable type
# $3 is the cache entry variable value # $3 is the cache entry variable value
append_cache_entry () { append_cache_entry() {
CMakeCacheEntries="$CMakeCacheEntries -D $1:$2=$3" CMakeCacheEntries="$CMakeCacheEntries -D $1:$2=$3"
} }
# Function to remove a CMake cache entry definition from the # Function to remove a CMake cache entry definition from the
# CMakeCacheEntries variable # CMakeCacheEntries variable
# $1 is the cache entry variable name # $1 is the cache entry variable name
remove_cache_entry () { remove_cache_entry() {
CMakeCacheEntries="$CMakeCacheEntries -U $1" CMakeCacheEntries="$CMakeCacheEntries -U $1"
# Even with -U, cmake still warns by default if # Even with -U, cmake still warns by default if
@ -156,22 +156,23 @@ remove_cache_entry () {
builddir=build builddir=build
prefix=/usr/local/zeek prefix=/usr/local/zeek
CMakeCacheEntries="" CMakeCacheEntries=""
append_cache_entry CMAKE_INSTALL_PREFIX PATH $prefix append_cache_entry CMAKE_INSTALL_PREFIX PATH $prefix
append_cache_entry ZEEK_ROOT_DIR PATH $prefix append_cache_entry ZEEK_ROOT_DIR PATH $prefix
append_cache_entry ZEEK_SCRIPT_INSTALL_PATH STRING $prefix/share/zeek append_cache_entry ZEEK_SCRIPT_INSTALL_PATH STRING $prefix/share/zeek
append_cache_entry ZEEK_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry ZEEK_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_JEMALLOC BOOL false append_cache_entry ENABLE_JEMALLOC BOOL false
append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry ENABLE_ZEEK_UNIT_TESTS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_BTEST BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true
append_cache_entry INSTALL_BTEST_PCAPS BOOL true append_cache_entry INSTALL_BTEST BOOL true
append_cache_entry INSTALL_ZEEK_ARCHIVER BOOL true append_cache_entry INSTALL_BTEST_PCAPS BOOL true
append_cache_entry INSTALL_ZEEKCTL BOOL true append_cache_entry INSTALL_ZEEK_ARCHIVER BOOL true
append_cache_entry INSTALL_ZKG BOOL true append_cache_entry INSTALL_ZEEKCTL BOOL true
append_cache_entry INSTALL_ZKG BOOL true
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
append_cache_entry ZEEK_SANITIZERS STRING "" append_cache_entry ZEEK_SANITIZERS STRING ""
append_cache_entry ZEEK_INCLUDE_PLUGINS STRING "" append_cache_entry ZEEK_INCLUDE_PLUGINS STRING ""
has_enable_mobile_ipv6=0 has_enable_mobile_ipv6=0
@ -179,12 +180,12 @@ has_enable_mobile_ipv6=0
# parse arguments # parse arguments
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
case "$1" in case "$1" in
-*=*) optarg=`echo "$1" | sed 's/[-_a-zA-Z0-9]*=//'` ;; -*=*) optarg=$(echo "$1" | sed 's/[-_a-zA-Z0-9]*=//') ;;
*) optarg= ;; *) optarg= ;;
esac esac
case "$1" in case "$1" in
--help|-h) --help | -h)
echo "${usage}" 1>&2 echo "${usage}" 1>&2
exit 1 exit 1
;; ;;
@ -198,110 +199,105 @@ while [ $# -ne 0 ]; do
builddir=$optarg builddir=$optarg
;; ;;
--build-type=*) --build-type=*)
append_cache_entry CMAKE_BUILD_TYPE STRING $optarg append_cache_entry CMAKE_BUILD_TYPE STRING $optarg
if [ $(echo "$optarg" | tr [:upper:] [:lower:]) = "debug" ]; then if [ $(echo "$optarg" | tr [:upper:] [:lower:]) = "debug" ]; then
append_cache_entry ENABLE_DEBUG BOOL true append_cache_entry ENABLE_DEBUG BOOL true
fi fi
;; ;;
--generator=*) --generator=*)
CMakeGenerator="$optarg" CMakeGenerator="$optarg"
;; ;;
--ccache) --ccache)
append_cache_entry ENABLE_CCACHE BOOL true append_cache_entry ENABLE_CCACHE BOOL true
;; ;;
--toolchain=*) --toolchain=*)
append_cache_entry CMAKE_TOOLCHAIN_FILE PATH $optarg append_cache_entry CMAKE_TOOLCHAIN_FILE PATH $optarg
;; ;;
--include-plugins=*) --include-plugins=*)
append_cache_entry ZEEK_INCLUDE_PLUGINS STRING $optarg append_cache_entry ZEEK_INCLUDE_PLUGINS STRING $optarg
;; ;;
--prefix=*) --prefix=*)
prefix=$optarg prefix=$optarg
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
append_cache_entry ZEEK_ROOT_DIR PATH $optarg append_cache_entry ZEEK_ROOT_DIR PATH $optarg
;; ;;
--libdir=*) --libdir=*)
append_cache_entry CMAKE_INSTALL_LIBDIR PATH $optarg append_cache_entry CMAKE_INSTALL_LIBDIR PATH $optarg
;; ;;
--plugindir=*) --plugindir=*)
append_cache_entry ZEEK_PLUGIN_DIR PATH $optarg append_cache_entry ZEEK_PLUGIN_DIR PATH $optarg
;; ;;
--python-dir=*) --python-dir=*)
append_cache_entry ZEEK_PYTHON_DIR PATH $optarg append_cache_entry ZEEK_PYTHON_DIR PATH $optarg
;; ;;
--python-prefix=*) --python-prefix=*)
append_cache_entry ZEEK_PYTHON_PREFIX PATH $optarg append_cache_entry ZEEK_PYTHON_PREFIX PATH $optarg
;; ;;
--python-home=*) --python-home=*)
append_cache_entry ZEEK_PYTHON_HOME PATH $optarg append_cache_entry ZEEK_PYTHON_HOME PATH $optarg
;; ;;
--scriptdir=*) --scriptdir=*)
append_cache_entry ZEEK_SCRIPT_INSTALL_PATH STRING $optarg append_cache_entry ZEEK_SCRIPT_INSTALL_PATH STRING $optarg
user_set_scriptdir="true" user_set_scriptdir="true"
;; ;;
--conf-files-dir=*) --conf-files-dir=*)
append_cache_entry ZEEK_ETC_INSTALL_DIR PATH $optarg append_cache_entry ZEEK_ETC_INSTALL_DIR PATH $optarg
user_set_conffilesdir="true" user_set_conffilesdir="true"
;; ;;
--localstatedir=*) --localstatedir=*)
append_cache_entry ZEEK_LOCAL_STATE_DIR PATH $optarg append_cache_entry ZEEK_LOCAL_STATE_DIR PATH $optarg
;; ;;
--spooldir=*) --spooldir=*)
append_cache_entry ZEEK_SPOOL_DIR PATH $optarg append_cache_entry ZEEK_SPOOL_DIR PATH $optarg
;; ;;
--logdir=*) --logdir=*)
append_cache_entry ZEEK_LOG_DIR PATH $optarg append_cache_entry ZEEK_LOG_DIR PATH $optarg
;; ;;
--mandir=*) --mandir=*)
append_cache_entry ZEEK_MAN_INSTALL_PATH PATH $optarg append_cache_entry ZEEK_MAN_INSTALL_PATH PATH $optarg
;; ;;
--enable-coverage) --enable-coverage)
append_cache_entry ENABLE_COVERAGE BOOL true append_cache_entry ENABLE_COVERAGE BOOL true
append_cache_entry ENABLE_DEBUG BOOL true append_cache_entry ENABLE_DEBUG BOOL true
;;
--enable-debug)
append_cache_entry ENABLE_DEBUG BOOL true
;; ;;
--enable-fuzzers) --enable-fuzzers)
append_cache_entry ZEEK_ENABLE_FUZZERS BOOL true append_cache_entry ZEEK_ENABLE_FUZZERS BOOL true
;; ;;
--enable-debug) --enable-jemalloc)
append_cache_entry ENABLE_DEBUG BOOL true append_cache_entry ENABLE_JEMALLOC BOOL true
;; ;;
--enable-mobile-ipv6) --enable-mobile-ipv6)
has_enable_mobile_ipv6=1 has_enable_mobile_ipv6=1
;; ;;
--enable-perftools) --enable-perftools)
append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry ENABLE_PERFTOOLS BOOL true
;; ;;
--enable-perftools-debug) --enable-perftools-debug)
append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry ENABLE_PERFTOOLS BOOL true
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true
;;
--sanitizers=*)
append_cache_entry ZEEK_SANITIZERS STRING $optarg
;;
--enable-jemalloc)
append_cache_entry ENABLE_JEMALLOC BOOL true
;;
--enable-static-broker)
append_cache_entry BUILD_STATIC_BROKER BOOL true
;; ;;
--enable-static-binpac) --enable-static-binpac)
append_cache_entry BUILD_STATIC_BINPAC BOOL true append_cache_entry BUILD_STATIC_BINPAC BOOL true
;; ;;
--enable-cpp-tests) --enable-static-broker)
append_cache_entry ENABLE_ZEEK_UNIT_TESTS BOOL true append_cache_entry BUILD_STATIC_BROKER BOOL true
;; ;;
--enable-zeek-client) --enable-zeek-client)
append_cache_entry INSTALL_ZEEK_CLIENT BOOL true append_cache_entry INSTALL_ZEEK_CLIENT BOOL true
;;
--disable-zeekctl)
append_cache_entry INSTALL_ZEEKCTL BOOL false
;;
--disable-auxtools)
append_cache_entry INSTALL_AUX_TOOLS BOOL false
;; ;;
--disable-archiver) --disable-archiver)
append_cache_entry INSTALL_ZEEK_ARCHIVER BOOL false append_cache_entry INSTALL_ZEEK_ARCHIVER BOOL false
;;
--disable-auxtools)
append_cache_entry INSTALL_AUX_TOOLS BOOL false
;;
--disable-broker-tests)
append_cache_entry BROKER_DISABLE_TESTS BOOL true
append_cache_entry BROKER_DISABLE_DOC_EXAMPLES BOOL true
;; ;;
--disable-btest) --disable-btest)
append_cache_entry INSTALL_BTEST BOOL false append_cache_entry INSTALL_BTEST BOOL false
@ -309,71 +305,76 @@ while [ $# -ne 0 ]; do
--disable-btest-pcaps) --disable-btest-pcaps)
append_cache_entry INSTALL_BTEST_PCAPS BOOL false append_cache_entry INSTALL_BTEST_PCAPS BOOL false
;; ;;
--disable-python) --disable-cpp-tests)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true append_cache_entry ENABLE_ZEEK_UNIT_TESTS BOOL false
;; ;;
--disable-broker-tests) --disable-python)
append_cache_entry BROKER_DISABLE_TESTS BOOL true append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
append_cache_entry BROKER_DISABLE_DOC_EXAMPLES BOOL true ;;
--disable-zeekctl)
append_cache_entry INSTALL_ZEEKCTL BOOL false
;; ;;
--disable-zkg) --disable-zkg)
append_cache_entry INSTALL_ZKG BOOL false append_cache_entry INSTALL_ZKG BOOL false
;; ;;
--with-openssl=*) --with-bifcl=*)
append_cache_entry OPENSSL_ROOT_DIR PATH $optarg append_cache_entry BIFCL_EXE_PATH PATH $optarg
;; ;;
--with-bind=*) --with-bind=*)
append_cache_entry BIND_ROOT_DIR PATH $optarg append_cache_entry BIND_ROOT_DIR PATH $optarg
;; ;;
--with-pcap=*)
append_cache_entry PCAP_ROOT_DIR PATH $optarg
;;
--with-binpac=*) --with-binpac=*)
append_cache_entry BINPAC_EXE_PATH PATH $optarg append_cache_entry BINPAC_EXE_PATH PATH $optarg
;;
--with-bifcl=*)
append_cache_entry BIFCL_EXE_PATH PATH $optarg
;;
--with-flex=*)
append_cache_entry FLEX_EXECUTABLE PATH $optarg
;; ;;
--with-bison=*) --with-bison=*)
append_cache_entry BISON_EXECUTABLE PATH $optarg append_cache_entry BISON_EXECUTABLE PATH $optarg
;; ;;
--with-broker=*)
append_cache_entry BROKER_ROOT_DIR PATH $optarg
;;
--with-caf=*)
append_cache_entry CAF_ROOT PATH $optarg
;;
--with-flex=*)
append_cache_entry FLEX_EXECUTABLE PATH $optarg
;;
--with-geoip=*) --with-geoip=*)
append_cache_entry LibMMDB_ROOT_DIR PATH $optarg append_cache_entry LibMMDB_ROOT_DIR PATH $optarg
;; ;;
--with-jemalloc=*)
append_cache_entry JEMALLOC_ROOT_DIR PATH $optarg
append_cache_entry ENABLE_JEMALLOC BOOL true
;;
--with-krb5=*) --with-krb5=*)
append_cache_entry LibKrb5_ROOT_DIR PATH $optarg append_cache_entry LibKrb5_ROOT_DIR PATH $optarg
;; ;;
--with-libkqueue=*)
append_cache_entry LIBKQUEUE_ROOT_DIR PATH $optarg
;;
--with-pcap=*)
append_cache_entry PCAP_ROOT_DIR PATH $optarg
;;
--with-perftools=*) --with-perftools=*)
append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg
;; ;;
--with-jemalloc=*) --with-openssl=*)
append_cache_entry JEMALLOC_ROOT_DIR PATH $optarg append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
append_cache_entry ENABLE_JEMALLOC BOOL true
;; ;;
--with-python=*) --with-python=*)
append_cache_entry PYTHON_EXECUTABLE PATH $optarg append_cache_entry PYTHON_EXECUTABLE PATH $optarg
;;
--with-python-lib=*)
append_cache_entry PYTHON_LIBRARY PATH $optarg
;; ;;
--with-python-inc=*) --with-python-inc=*)
append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg
append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg
;;
--with-python-lib=*)
append_cache_entry PYTHON_LIBRARY PATH $optarg
;; ;;
--with-swig=*) --with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg append_cache_entry SWIG_EXECUTABLE PATH $optarg
;; ;;
--with-broker=*) --sanitizers=*)
append_cache_entry BROKER_ROOT_DIR PATH $optarg append_cache_entry ZEEK_SANITIZERS STRING $optarg
;;
--with-caf=*)
append_cache_entry CAF_ROOT PATH $optarg
;;
--with-libkqueue=*)
append_cache_entry LIBKQUEUE_ROOT_DIR PATH $optarg
;; ;;
--binary-package) --binary-package)
append_cache_entry BINARY_PACKAGING_MODE BOOL true append_cache_entry BINARY_PACKAGING_MODE BOOL true
@ -400,15 +401,15 @@ done
if [ -z "$CMakeCommand" ]; then if [ -z "$CMakeCommand" ]; then
# prefer cmake3 over "regular" cmake (cmake == cmake2 on RHEL) # prefer cmake3 over "regular" cmake (cmake == cmake2 on RHEL)
if command -v cmake3 >/dev/null 2>&1 ; then if command -v cmake3 >/dev/null 2>&1; then
CMakeCommand="cmake3" CMakeCommand="cmake3"
elif command -v cmake >/dev/null 2>&1 ; then elif command -v cmake >/dev/null 2>&1; then
CMakeCommand="cmake" CMakeCommand="cmake"
else else
echo "This package requires CMake, please install it first." echo "This package requires CMake, please install it first."
echo "Then you may use this script to configure the CMake build." echo "Then you may use this script to configure the CMake build."
echo "Note: pass --cmake=PATH to use cmake in non-standard locations." echo "Note: pass --cmake=PATH to use cmake in non-standard locations."
exit 1; exit 1
fi fi
fi fi
@ -442,8 +443,8 @@ else
"$CMakeCommand" $CMakeCacheEntries $sourcedir "$CMakeCommand" $CMakeCacheEntries $sourcedir
fi fi
echo "# This is the command used to configure this build" > config.status echo "# This is the command used to configure this build" >config.status
echo $command >> config.status echo $command >>config.status
chmod u+x config.status chmod u+x config.status
if [ $has_enable_mobile_ipv6 -eq 1 ]; then if [ $has_enable_mobile_ipv6 -eq 1 ]; then

2
doc

@ -1 +1 @@
Subproject commit fefd7e6ceb67dd011c268c658171967f1281b970 Subproject commit b8ae1f336272371d6c46fda133e472a075f69e3d

View file

@ -1,11 +1,11 @@
DIAG=diag.log DIAG=diag.log
BTEST=../../auxil/btest/btest BTEST=../../auxil/btest/btest
all: cleanup btest-verbose all: btest-verbose clean
# Showing all tests. # Showing all tests.
btest-verbose: btest-verbose:
@$(BTEST) -d -j -f $(DIAG) @$(BTEST) -d -j -f $(DIAG)
cleanup: clean:
@rm -f $(DIAG) @rm -rf $(DIAG) .tmp .btest.failed.dat

View file

@ -66,7 +66,7 @@ print version and exit
print contents of state file print contents of state file
.TP .TP
\fB\-C\fR,\ \-\-no\-checksums \fB\-C\fR,\ \-\-no\-checksums
ignore checksums When this option is set, Zeek ignores invalid packet checksums and does process the packets. Furthermore, if this option is set Zeek also processes IP packets with a zero total length field, which is typically caused by TCP (TCP Segment Offloading) on the NIC.
.TP .TP
\fB\-F\fR,\ \-\-force\-dns \fB\-F\fR,\ \-\-force\-dns
force DNS force DNS

View file

@ -9,6 +9,13 @@
##! These tags are defined internally by ##! These tags are defined internally by
##! the analyzers themselves, and documented in their analyzer-specific ##! the analyzers themselves, and documented in their analyzer-specific
##! description along with the events that they generate. ##! description along with the events that they generate.
##!
##! Analyzer tags are also inserted into a global :zeek:type:`AllAnalyzers::Tag` enum
##! type. This type contains duplicates of all of the :zeek:type:`Analyzer::Tag`,
##! :zeek:type:`PacketAnalyzer::Tag` and :zeek:type:`Files::Tag` enum values
##! and can be used for arguments to function/hook/event definitions where they
##! need to handle any analyzer type. See :zeek:id:`Analyzer::register_for_ports`
##! for an example.
@load base/frameworks/packet-filter/utils @load base/frameworks/packet-filter/utils
@ -66,13 +73,13 @@ export {
## tag: The tag of the analyzer. ## tag: The tag of the analyzer.
## ##
## Returns: The set of ports. ## Returns: The set of ports.
global registered_ports: function(tag: Analyzer::Tag) : set[port]; global registered_ports: function(tag: AllAnalyzers::Tag) : set[port];
## Returns a table of all ports-to-analyzer mappings currently registered. ## Returns a table of all ports-to-analyzer mappings currently registered.
## ##
## Returns: A table mapping each analyzer to the set of ports ## Returns: A table mapping each analyzer to the set of ports
## registered for it. ## registered for it.
global all_registered_ports: function() : table[Analyzer::Tag] of set[port]; global all_registered_ports: function() : table[AllAnalyzers::Tag] of set[port];
## Translates an analyzer type to a string with the analyzer's name. ## Translates an analyzer type to a string with the analyzer's name.
## ##
@ -126,12 +133,16 @@ export {
global disabled_analyzers: set[Analyzer::Tag] = { global disabled_analyzers: set[Analyzer::Tag] = {
ANALYZER_TCPSTATS, ANALYZER_TCPSTATS,
} &redef; } &redef;
## A table of ports mapped to analyzers that handle those ports. This is
## used by BPF filtering and DPD. Session analyzers can add to this using
## Analyzer::register_for_port(s) and packet analyzers can add to this
## using PacketAnalyzer::register_for_port(s).
global ports: table[AllAnalyzers::Tag] of set[port];
} }
@load base/bif/analyzer.bif @load base/bif/analyzer.bif
global ports: table[Analyzer::Tag] of set[port];
event zeek_init() &priority=5 event zeek_init() &priority=5
{ {
if ( disable_all ) if ( disable_all )
@ -176,22 +187,22 @@ function register_for_port(tag: Analyzer::Tag, p: port) : bool
return T; return T;
} }
function registered_ports(tag: Analyzer::Tag) : set[port] function registered_ports(tag: AllAnalyzers::Tag) : set[port]
{ {
return tag in ports ? ports[tag] : set(); return tag in ports ? ports[tag] : set();
} }
function all_registered_ports(): table[Analyzer::Tag] of set[port] function all_registered_ports(): table[AllAnalyzers::Tag] of set[port]
{ {
return ports; return ports;
} }
function name(atype: Analyzer::Tag) : string function name(atype: AllAnalyzers::Tag) : string
{ {
return __name(atype); return __name(atype);
} }
function get_tag(name: string): Analyzer::Tag function get_tag(name: string): AllAnalyzers::Tag
{ {
return __tag(name); return __tag(name);
} }
@ -223,4 +234,3 @@ function get_bpf(): string
} }
return output; return output;
} }

View file

@ -2,7 +2,7 @@
##! ##!
##! The manager is passive (the workers connect to us), and once connected ##! The manager is passive (the workers connect to us), and once connected
##! the manager registers for the events on the workers that are needed ##! the manager registers for the events on the workers that are needed
##! to get the desired data from the workers. This script will be ##! to get the desired data from the workers. This script will be
##! automatically loaded if necessary based on the type of node being started. ##! automatically loaded if necessary based on the type of node being started.
##! This is where the cluster manager sets it's specific settings for other ##! This is where the cluster manager sets it's specific settings for other

View file

@ -364,7 +364,7 @@ event zeek_init() &priority=-5
if ( manager_is_logger ) if ( manager_is_logger )
{ {
local mgr = nodes_with_type(Cluster::MANAGER); local mgr = nodes_with_type(Cluster::MANAGER);
if ( |mgr| > 0 ) if ( |mgr| > 0 )
{ {
local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes; local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes;
@ -438,7 +438,7 @@ event zeek_init() &priority=-5
pet = pool_eligibility[pool$spec$node_type]; pet = pool_eligibility[pool$spec$node_type];
local nodes_to_init = |pet$eligible_nodes|; local nodes_to_init = |pet$eligible_nodes|;
if ( pool$spec?$max_nodes && if ( pool$spec?$max_nodes &&
pool$spec$max_nodes < |pet$eligible_nodes| ) pool$spec$max_nodes < |pet$eligible_nodes| )
nodes_to_init = pool$spec$max_nodes; nodes_to_init = pool$spec$max_nodes;

View file

@ -35,7 +35,7 @@ export {
## Number of protocol violations to tolerate before disabling an analyzer. ## Number of protocol violations to tolerate before disabling an analyzer.
option max_violations: table[Analyzer::Tag] of count = table() &default = 5; option max_violations: table[Analyzer::Tag] of count = table() &default = 5;
## Analyzers which you don't want to throw ## Analyzers which you don't want to throw
option ignore_violations: set[Analyzer::Tag] = set(); option ignore_violations: set[Analyzer::Tag] = set();
## Ignore violations which go this many bytes into the connection. ## Ignore violations which go this many bytes into the connection.
@ -53,7 +53,7 @@ event zeek_init() &priority=5
Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd", $policy=log_policy]); Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd", $policy=log_policy]);
} }
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=10 event analyzer_confirmation(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=10
{ {
local analyzer = Analyzer::name(atype); local analyzer = Analyzer::name(atype);
@ -63,7 +63,7 @@ event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &pr
add c$service[analyzer]; add c$service[analyzer];
} }
event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, event analyzer_violation(c: connection, atype: AllAnalyzers::Tag, aid: count,
reason: string) &priority=10 reason: string) &priority=10
{ {
local analyzer = Analyzer::name(atype); local analyzer = Analyzer::name(atype);
@ -85,7 +85,7 @@ event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count,
c$dpd = info; c$dpd = info;
} }
event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) &priority=5 event analyzer_violation(c: connection, atype: AllAnalyzers::Tag, aid: count, reason: string) &priority=5
{ {
if ( atype in ignore_violations ) if ( atype in ignore_violations )
return; return;
@ -114,8 +114,8 @@ event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason
} }
} }
event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, event analyzer_violation(c: connection, atype: AllAnalyzers::Tag, aid: count,
reason: string) &priority=-5 reason: string) &priority=-5
{ {
if ( c?$dpd ) if ( c?$dpd )
{ {

View file

@ -252,7 +252,7 @@ signature file-mpqgame {
file-magic /^MPQ\x1a/ file-magic /^MPQ\x1a/
} }
# Blizzard CASC Format game file # Blizzard CASC Format game file
signature file-blizgame { signature file-blizgame {
file-mime "application/x-blizgame", 100 file-mime "application/x-blizgame", 100
file-magic /^BLTE/ file-magic /^BLTE/
@ -302,4 +302,3 @@ signature file-iso9660 {
file-mime "application/x-iso9660-image", 99 file-mime "application/x-iso9660-image", 99
file-magic /CD001/ file-magic /CD001/
} }

View file

@ -1,7 +1,6 @@
# This signature is non-specific and terrible but after # This signature is non-specific and terrible but after
# searching for a long time there doesn't seem to be a # searching for a long time there doesn't seem to be a
# better option. # better option.
signature file-msword { signature file-msword {
file-magic /^\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1/ file-magic /^\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1/
file-mime "application/msword", 50 file-mime "application/msword", 50

View file

@ -104,7 +104,7 @@ export {
missing_bytes: count &log &default=0; missing_bytes: count &log &default=0;
## The number of bytes in the file stream that were not delivered to ## The number of bytes in the file stream that were not delivered to
## stream file analyzers. This could be overlapping bytes or ## stream file analyzers. This could be overlapping bytes or
## bytes that couldn't be reassembled. ## bytes that couldn't be reassembled.
overflow_bytes: count &log &default=0; overflow_bytes: count &log &default=0;
@ -150,7 +150,7 @@ export {
## f: the file. ## f: the file.
global enable_reassembly: function(f: fa_file); global enable_reassembly: function(f: fa_file);
## Disables the file reassembler on this file. If the file is not ## Disables the file reassembler on this file. If the file is not
## transferred out of order this will have no effect. ## transferred out of order this will have no effect.
## ##
## f: the file. ## f: the file.
@ -266,7 +266,7 @@ export {
}; };
## Register callbacks for protocols that work with the Files framework. ## Register callbacks for protocols that work with the Files framework.
## The callbacks must uniquely identify a file and each protocol can ## The callbacks must uniquely identify a file and each protocol can
## only have a single callback registered for it. ## only have a single callback registered for it.
## ##
## tag: Tag for the protocol analyzer having a callback being registered. ## tag: Tag for the protocol analyzer having a callback being registered.
@ -280,7 +280,7 @@ export {
## manipulation when they are being added to a file before the core code ## manipulation when they are being added to a file before the core code
## takes over. This is unlikely to be interesting for users and should ## takes over. This is unlikely to be interesting for users and should
## only be called by file analyzer authors but is *not required*. ## only be called by file analyzer authors but is *not required*.
## ##
## tag: Tag for the file analyzer. ## tag: Tag for the file analyzer.
## ##
## callback: Function to execute when the given file analyzer is being added. ## callback: Function to execute when the given file analyzer is being added.

View file

@ -49,7 +49,7 @@ export {
## A URL for more information about the data. ## A URL for more information about the data.
url: string &optional; url: string &optional;
}; };
## Represents a piece of intelligence. ## Represents a piece of intelligence.
type Item: record { type Item: record {
## The intelligence indicator. ## The intelligence indicator.
@ -57,12 +57,12 @@ export {
## The type of data that the indicator field represents. ## The type of data that the indicator field represents.
indicator_type: Type; indicator_type: Type;
## Metadata for the item. Typically represents more deeply ## Metadata for the item. Typically represents more deeply
## descriptive data for a piece of intelligence. ## descriptive data for a piece of intelligence.
meta: MetaData; meta: MetaData;
}; };
## Enum to represent where data came from when it was discovered. ## Enum to represent where data came from when it was discovered.
## The convention is to prefix the name with ``IN_``. ## The convention is to prefix the name with ``IN_``.
type Where: enum { type Where: enum {
@ -158,8 +158,8 @@ export {
global extend_match: hook(info: Info, s: Seen, items: set[Item]); global extend_match: hook(info: Info, s: Seen, items: set[Item]);
## The expiration timeout for intelligence items. Once an item expires, the ## The expiration timeout for intelligence items. Once an item expires, the
## :zeek:id:`Intel::item_expired` hook is called. Reinsertion of an item ## :zeek:id:`Intel::item_expired` hook is called. Reinsertion of an item
## resets the timeout. A negative value disables expiration of intelligence ## resets the timeout. A negative value disables expiration of intelligence
## items. ## items.
const item_expiration = -1 min &redef; const item_expiration = -1 min &redef;

View file

@ -66,6 +66,11 @@ export {
## This option is also available as a per-filter ``$config`` option. ## This option is also available as a per-filter ``$config`` option.
const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef; const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef;
## Handling of optional fields when writing out JSON. By default the
## JSON formatter skips key and val when the field is absent. Setting
## the following field to T includes the key, with a null value.
const json_include_unset_fields = F &redef;
## If true, include lines with log meta information such as column names ## If true, include lines with log meta information such as column names
## with types, the values of ASCII logging options that are in use, and ## with types, the values of ASCII logging options that are in use, and
## the time when the file was opened and closed (the latter at the end). ## the time when the file was opened and closed (the latter at the end).

View file

@ -41,7 +41,7 @@ export {
name: function(state: PluginState) : string; name: function(state: PluginState) : string;
## If true, plugin can expire rules itself. If false, the NetControl ## If true, plugin can expire rules itself. If false, the NetControl
## framework will manage rule expiration. ## framework will manage rule expiration.
can_expire: bool; can_expire: bool;
## One-time initialization function called when plugin gets registered, and ## One-time initialization function called when plugin gets registered, and

View file

@ -46,7 +46,7 @@ function debug_add_rule(p: PluginState, r: Rule) : bool
local s = fmt("add_rule: %s", r); local s = fmt("add_rule: %s", r);
debug_log(p, s); debug_log(p, s);
if ( do_something(p) ) if ( do_something(p) )
{ {
event NetControl::rule_added(r, p); event NetControl::rule_added(r, p);
return T; return T;
@ -76,12 +76,10 @@ global debug_plugin = Plugin(
function create_debug(do_something: bool) : PluginState function create_debug(do_something: bool) : PluginState
{ {
local p: PluginState = [$plugin=debug_plugin]; local p: PluginState = [$plugin=debug_plugin];
# FIXME: Why's the default not working? # FIXME: Why's the default not working?
p$config = table(); p$config = table();
p$config["all"] = (do_something ? "1" : "0"); p$config["all"] = (do_something ? "1" : "0");
return p; return p;
} }

View file

@ -1,7 +1,7 @@
##! NetControl plugin for the process-level PacketFilter that comes with ##! NetControl plugin for the process-level PacketFilter that comes with
##! Zeek. Since the PacketFilter in Zeek is quite limited in scope ##! Zeek. Since the PacketFilter in Zeek is quite limited in scope
##! and can only add/remove filters for addresses, this is quite ##! and can only add/remove filters for addresses, this is quite
##! limited in scope at the moment. ##! limited in scope at the moment.
@load ../plugin @load ../plugin
@ -110,4 +110,3 @@ function create_packetfilter() : PluginState
return p; return p;
} }

View file

@ -1,7 +1,7 @@
##! This file defines the types that are used by the NetControl framework. ##! This file defines the types that are used by the NetControl framework.
##! ##!
##! The most important type defined in this file is :zeek:see:`NetControl::Rule`, ##! The most important type defined in this file is :zeek:see:`NetControl::Rule`,
##! which is used to describe all rules that can be expressed by the NetControl framework. ##! which is used to describe all rules that can be expressed by the NetControl framework.
module NetControl; module NetControl;

View file

@ -1,6 +1,6 @@
##! This script adds geographic location data to notices for the "remote" ##! This script adds geographic location data to notices for the "remote"
##! host in a connection. It does make the assumption that one of the ##! host in a connection. It does make the assumption that one of the
##! addresses in a connection is "local" and one is "remote" which is ##! addresses in a connection is "local" and one is "remote" which is
##! probably a safe assumption to make in most cases. If both addresses ##! probably a safe assumption to make in most cases. If both addresses
##! are remote, it will use the $src address. ##! are remote, it will use the $src address.
@ -17,13 +17,13 @@ export {
## in order for this to work. ## in order for this to work.
ACTION_ADD_GEODATA ACTION_ADD_GEODATA
}; };
redef record Info += { redef record Info += {
## If GeoIP support is built in, notices can have geographic ## If GeoIP support is built in, notices can have geographic
## information attached to them. ## information attached to them.
remote_location: geo_location &log &optional; remote_location: geo_location &log &optional;
}; };
## Notice types which should have the "remote" location looked up. ## Notice types which should have the "remote" location looked up.
## If GeoIP support is not built in, this does nothing. ## If GeoIP support is not built in, this does nothing.
option lookup_location_types: set[Notice::Type] = {}; option lookup_location_types: set[Notice::Type] = {};
@ -35,7 +35,7 @@ hook policy(n: Notice::Info) &priority=10
add n$actions[ACTION_ADD_GEODATA]; add n$actions[ACTION_ADD_GEODATA];
} }
# This is handled at a high priority in case other notice handlers # This is handled at a high priority in case other notice handlers
# want to use the data. # want to use the data.
hook notice(n: Notice::Info) &priority=10 hook notice(n: Notice::Info) &priority=10
{ {

View file

@ -10,9 +10,9 @@ module Notice;
export { export {
redef enum Action += { redef enum Action += {
## Indicate that the generated email should be addressed to the ## Indicate that the generated email should be addressed to the
## appropriate email addresses as found by the ## appropriate email addresses as found by the
## :zeek:id:`Site::get_emails` function based on the relevant ## :zeek:id:`Site::get_emails` function based on the relevant
## address or addresses indicated in the notice. ## address or addresses indicated in the notice.
ACTION_EMAIL_ADMIN ACTION_EMAIL_ADMIN
}; };
@ -23,7 +23,6 @@ hook notice(n: Notice::Info)
if ( |Site::local_admins| > 0 && if ( |Site::local_admins| > 0 &&
ACTION_EMAIL_ADMIN in n$actions ) ACTION_EMAIL_ADMIN in n$actions )
{ {
local email = "";
if ( n?$src && |Site::get_emails(n$src)| > 0 ) if ( n?$src && |Site::get_emails(n$src)| > 0 )
add n$email_dest[Site::get_emails(n$src)]; add n$email_dest[Site::get_emails(n$src)];
if ( n?$dst && |Site::get_emails(n$dst)| > 0 ) if ( n?$dst && |Site::get_emails(n$dst)| > 0 )

View file

@ -112,12 +112,12 @@ function lookup_controller(name: string): vector of Controller
if ( Cluster::local_node_type() != Cluster::MANAGER ) if ( Cluster::local_node_type() != Cluster::MANAGER )
return vector(); return vector();
# I am not quite sure if we can actually get away with this - in the # I am not quite sure if we can actually get away with this - in the
# current state, this means that the individual nodes cannot lookup # current state, this means that the individual nodes cannot lookup
# a controller by name. # a controller by name.
# #
# This means that there can be no reactions to things on the actual # This means that there can be no reactions to things on the actual
# worker nodes - because they cannot look up a name. On the other hand - # worker nodes - because they cannot look up a name. On the other hand -
# currently we also do not even send the events to the worker nodes (at least # currently we also do not even send the events to the worker nodes (at least
# not if we are using broker). Because of that I am not really feeling that # not if we are using broker). Because of that I am not really feeling that
# badly about it... # badly about it...

View file

@ -60,7 +60,7 @@ export {
SIG_ALARM_PER_ORIG, SIG_ALARM_PER_ORIG,
## Alarm once and then never again. ## Alarm once and then never again.
SIG_ALARM_ONCE, SIG_ALARM_ONCE,
## Count signatures per responder host and alarm with the ## Count signatures per responder host and alarm with the
## :zeek:enum:`Signatures::Count_Signature` notice if a threshold ## :zeek:enum:`Signatures::Count_Signature` notice if a threshold
## defined by :zeek:id:`Signatures::count_thresholds` is reached. ## defined by :zeek:id:`Signatures::count_thresholds` is reached.
SIG_COUNT_PER_RESP, SIG_COUNT_PER_RESP,
@ -100,15 +100,15 @@ export {
## Number of hosts, from a summary count. ## Number of hosts, from a summary count.
host_count: count &log &optional; host_count: count &log &optional;
}; };
## Actions for a signature. ## Actions for a signature.
const actions: table[string] of Action = { const actions: table[string] of Action = {
["unspecified"] = SIG_IGNORE, # place-holder ["unspecified"] = SIG_IGNORE, # place-holder
} &redef &default = SIG_ALARM; } &redef &default = SIG_ALARM;
## Signature IDs that should always be ignored. ## Signature IDs that should always be ignored.
option ignored_ids = /NO_DEFAULT_MATCHES/; option ignored_ids = /NO_DEFAULT_MATCHES/;
## Generate a notice if, for a pair [orig, signature], the number of ## Generate a notice if, for a pair [orig, signature], the number of
## different responders has reached one of the thresholds. ## different responders has reached one of the thresholds.
const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef; const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef;
@ -120,7 +120,7 @@ export {
## Generate a notice if a :zeek:enum:`Signatures::SIG_COUNT_PER_RESP` ## Generate a notice if a :zeek:enum:`Signatures::SIG_COUNT_PER_RESP`
## signature is triggered as often as given by one of these thresholds. ## signature is triggered as often as given by one of these thresholds.
const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef; const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef;
## The interval between when :zeek:enum:`Signatures::Signature_Summary` ## The interval between when :zeek:enum:`Signatures::Signature_Summary`
## notices are generated. ## notices are generated.
option summary_interval = 1 day; option summary_interval = 1 day;
@ -147,7 +147,7 @@ event zeek_init() &priority=5
{ {
Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy]); Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy]);
} }
# Returns true if the given signature has already been triggered for the given # Returns true if the given signature has already been triggered for the given
# [orig, resp] pair. # [orig, resp] pair.
function has_signature_matched(id: string, orig: addr, resp: addr): bool function has_signature_matched(id: string, orig: addr, resp: addr): bool
@ -173,7 +173,7 @@ event signature_match(state: signature_state, msg: string, data: string)
# Trim the matched data down to something reasonable # Trim the matched data down to something reasonable
if ( |data| > 140 ) if ( |data| > 140 )
data = fmt("%s...", sub_bytes(data, 0, 140)); data = fmt("%s...", sub_bytes(data, 0, 140));
local src_addr: addr; local src_addr: addr;
local src_port: port; local src_port: port;
local dst_addr: addr; local dst_addr: addr;
@ -212,7 +212,7 @@ event signature_match(state: signature_state, msg: string, data: string)
local notice = F; local notice = F;
if ( action == SIG_ALARM ) if ( action == SIG_ALARM )
notice = T; notice = T;
if ( action == SIG_COUNT_PER_RESP ) if ( action == SIG_COUNT_PER_RESP )
{ {
local dst = state$conn$id$resp_h; local dst = state$conn$id$resp_h;
@ -252,7 +252,7 @@ event signature_match(state: signature_state, msg: string, data: string)
$conn=state$conn, $src=src_addr, $conn=state$conn, $src=src_addr,
$dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg), $dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg),
$sub=data]); $sub=data]);
if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY ) if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY )
return; return;
@ -279,7 +279,7 @@ event signature_match(state: signature_state, msg: string, data: string)
fmt("%s has triggered signature %s on %d hosts", fmt("%s has triggered signature %s on %d hosts",
orig, sig_id, hcount); orig, sig_id, hcount);
Log::write(Signatures::LOG, Log::write(Signatures::LOG,
[$ts=network_time(), $note=Multiple_Sig_Responders, [$ts=network_time(), $note=Multiple_Sig_Responders,
$src_addr=orig, $sig_id=sig_id, $event_msg=msg, $src_addr=orig, $sig_id=sig_id, $event_msg=msg,
$host_count=hcount, $sub_msg=horz_scan_msg]); $host_count=hcount, $sub_msg=horz_scan_msg]);
@ -296,9 +296,9 @@ event signature_match(state: signature_state, msg: string, data: string)
fmt("%s has triggered %d different signatures on host %s", fmt("%s has triggered %d different signatures on host %s",
orig, vcount, resp); orig, vcount, resp);
Log::write(Signatures::LOG, Log::write(Signatures::LOG,
[$ts=network_time(), [$ts=network_time(),
$note=Multiple_Signatures, $note=Multiple_Signatures,
$src_addr=orig, $src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount, $dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount), $event_msg=fmt("%s different signatures triggered", vcount),
@ -311,4 +311,3 @@ event signature_match(state: signature_state, msg: string, data: string)
last_vthresh[orig] = vcount; last_vthresh[orig] = vcount;
} }
} }

View file

@ -13,18 +13,18 @@ module Software;
export { export {
## The software logging stream identifier. ## The software logging stream identifier.
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
## A default logging policy hook for the stream. ## A default logging policy hook for the stream.
global log_policy: Log::PolicyHook; global log_policy: Log::PolicyHook;
## Scripts detecting new types of software need to redef this enum to add ## Scripts detecting new types of software need to redef this enum to add
## their own specific software types which would then be used when they ## their own specific software types which would then be used when they
## create :zeek:type:`Software::Info` records. ## create :zeek:type:`Software::Info` records.
type Type: enum { type Type: enum {
## A placeholder type for when the type of software is not known. ## A placeholder type for when the type of software is not known.
UNKNOWN, UNKNOWN,
}; };
## A structure to represent the numeric version of software. ## A structure to represent the numeric version of software.
type Version: record { type Version: record {
## Major version number. ## Major version number.
@ -38,7 +38,7 @@ export {
## Additional version string (e.g. "beta42"). ## Additional version string (e.g. "beta42").
addl: string &optional; addl: string &optional;
} &log; } &log;
## The record type that is used for representing and logging software. ## The record type that is used for representing and logging software.
type Info: record { type Info: record {
## The time at which the software was detected. ## The time at which the software was detected.
@ -58,9 +58,9 @@ export {
## parsing doesn't always work reliably in all cases and this ## parsing doesn't always work reliably in all cases and this
## acts as a fallback in the logs. ## acts as a fallback in the logs.
unparsed_version: string &log &optional; unparsed_version: string &log &optional;
## This can indicate that this software being detected should ## This can indicate that this software being detected should
## definitely be sent onward to the logging framework. By ## definitely be sent onward to the logging framework. By
## default, only software that is "interesting" due to a change ## default, only software that is "interesting" due to a change
## in version or it being currently unknown is sent to the ## in version or it being currently unknown is sent to the
## logging framework. This can be set to T to force the record ## logging framework. This can be set to T to force the record
@ -68,7 +68,7 @@ export {
## tracking needs to happen in a specific way to the software. ## tracking needs to happen in a specific way to the software.
force_log: bool &default=F; force_log: bool &default=F;
}; };
## Hosts whose software should be detected and tracked. ## Hosts whose software should be detected and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS.
option asset_tracking = LOCAL_HOSTS; option asset_tracking = LOCAL_HOSTS;
@ -78,21 +78,21 @@ export {
## id: The connection id where the software was discovered. ## id: The connection id where the software was discovered.
## ##
## info: A record representing the software discovered. ## info: A record representing the software discovered.
## ##
## Returns: T if the software was logged, F otherwise. ## Returns: T if the software was logged, F otherwise.
global found: function(id: conn_id, info: Info): bool; global found: function(id: conn_id, info: Info): bool;
## Compare two version records. ## Compare two version records.
## ##
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2. ## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
## If the numerical version numbers match, the *addl* string ## If the numerical version numbers match, the *addl* string
## is compared lexicographically. ## is compared lexicographically.
global cmp_versions: function(v1: Version, v2: Version): int; global cmp_versions: function(v1: Version, v2: Version): int;
## Sometimes software will expose itself on the network with ## Sometimes software will expose itself on the network with
## slight naming variations. This table provides a mechanism ## slight naming variations. This table provides a mechanism
## for a piece of software to be renamed to a single name ## for a piece of software to be renamed to a single name
## even if it exposes itself with an alternate name. The ## even if it exposes itself with an alternate name. The
## yielded string is the name that will be logged and generally ## yielded string is the name that will be logged and generally
## used for everything. ## used for everything.
global alternate_names: table[string] of string { global alternate_names: table[string] of string {
@ -100,17 +100,17 @@ export {
} &default=function(a: string): string { return a; }; } &default=function(a: string): string { return a; };
## Type to represent a collection of :zeek:type:`Software::Info` records. ## Type to represent a collection of :zeek:type:`Software::Info` records.
## It's indexed with the name of a piece of software such as "Firefox" ## It's indexed with the name of a piece of software such as "Firefox"
## and it yields a :zeek:type:`Software::Info` record with more ## and it yields a :zeek:type:`Software::Info` record with more
## information about the software. ## information about the software.
type SoftwareSet: table[string] of Info; type SoftwareSet: table[string] of Info;
## The set of software associated with an address. Data expires from ## The set of software associated with an address. Data expires from
## this table after one day by default so that a detected piece of ## this table after one day by default so that a detected piece of
## software will be logged once each day. In a cluster, this table is ## software will be logged once each day. In a cluster, this table is
## uniformly distributed among proxy nodes. ## uniformly distributed among proxy nodes.
global tracked: table[addr] of SoftwareSet &create_expire=1day; global tracked: table[addr] of SoftwareSet &create_expire=1day;
## This event can be handled to access the :zeek:type:`Software::Info` ## This event can be handled to access the :zeek:type:`Software::Info`
## record as it is sent on to the logging framework. ## record as it is sent on to the logging framework.
global log_software: event(rec: Info); global log_software: event(rec: Info);
@ -128,7 +128,7 @@ event zeek_init() &priority=5
{ {
Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software", $policy=log_policy]); Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software", $policy=log_policy]);
} }
type Description: record { type Description: record {
name: string; name: string;
version: Version; version: Version;
@ -138,13 +138,13 @@ type Description: record {
# Defining this here because of a circular dependency between two functions. # Defining this here because of a circular dependency between two functions.
global parse_mozilla: function(unparsed_version: string): Description; global parse_mozilla: function(unparsed_version: string): Description;
# Don't even try to understand this now, just make sure the tests are # Don't even try to understand this now, just make sure the tests are
# working. # working.
function parse(unparsed_version: string): Description function parse(unparsed_version: string): Description
{ {
local software_name = "<parse error>"; local software_name = "<parse error>";
local v: Version; local v: Version;
# Parse browser-alike versions separately # Parse browser-alike versions separately
if ( /^(Mozilla|Opera)\/[0-9]+\./ in unparsed_version ) if ( /^(Mozilla|Opera)\/[0-9]+\./ in unparsed_version )
{ {
@ -220,10 +220,10 @@ function parse(unparsed_version: string): Description
{ {
v$addl = strip(version_parts[2]); v$addl = strip(version_parts[2]);
} }
} }
} }
if ( 3 in version_numbers && version_numbers[3] != "" ) if ( 3 in version_numbers && version_numbers[3] != "" )
v$minor3 = extract_count(version_numbers[3]); v$minor3 = extract_count(version_numbers[3]);
if ( 2 in version_numbers && version_numbers[2] != "" ) if ( 2 in version_numbers && version_numbers[2] != "" )
@ -234,7 +234,7 @@ function parse(unparsed_version: string): Description
v$major = extract_count(version_numbers[0]); v$major = extract_count(version_numbers[0]);
} }
} }
return [$version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]]; return [$version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]];
} }
@ -245,7 +245,7 @@ function parse_with_cache(unparsed_version: string): Description
{ {
if (unparsed_version in parse_cache) if (unparsed_version in parse_cache)
return parse_cache[unparsed_version]; return parse_cache[unparsed_version];
local res = parse(unparsed_version); local res = parse(unparsed_version);
parse_cache[unparsed_version] = res; parse_cache[unparsed_version] = res;
return res; return res;
@ -256,7 +256,7 @@ function parse_mozilla(unparsed_version: string): Description
local software_name = "<unknown browser>"; local software_name = "<unknown browser>";
local v: Version; local v: Version;
local parts: string_vec; local parts: string_vec;
if ( /Opera [0-9\.]*$/ in unparsed_version ) if ( /Opera [0-9\.]*$/ in unparsed_version )
{ {
software_name = "Opera"; software_name = "Opera";
@ -349,7 +349,7 @@ function parse_mozilla(unparsed_version: string): Description
if ( 2 in parts ) if ( 2 in parts )
v = parse(parts[2])$version; v = parse(parts[2])$version;
} }
else if ( /AdobeAIR\/[0-9\.]*/ in unparsed_version ) else if ( /AdobeAIR\/[0-9\.]*/ in unparsed_version )
{ {
software_name = "AdobeAIR"; software_name = "AdobeAIR";
@ -392,7 +392,7 @@ function cmp_versions(v1: Version, v2: Version): int
else else
return v1?$major ? 1 : -1; return v1?$major ? 1 : -1;
} }
if ( v1?$minor && v2?$minor ) if ( v1?$minor && v2?$minor )
{ {
if ( v1$minor < v2$minor ) if ( v1$minor < v2$minor )
@ -407,7 +407,7 @@ function cmp_versions(v1: Version, v2: Version): int
else else
return v1?$minor ? 1 : -1; return v1?$minor ? 1 : -1;
} }
if ( v1?$minor2 && v2?$minor2 ) if ( v1?$minor2 && v2?$minor2 )
{ {
if ( v1$minor2 < v2$minor2 ) if ( v1$minor2 < v2$minor2 )
@ -462,7 +462,7 @@ function software_endpoint_name(id: conn_id, host: addr): string
# Convert a version into a string "a.b.c-x". # Convert a version into a string "a.b.c-x".
function software_fmt_version(v: Version): string function software_fmt_version(v: Version): string
{ {
return fmt("%s%s%s%s%s", return fmt("%s%s%s%s%s",
v?$major ? fmt("%d", v$major) : "0", v?$major ? fmt("%d", v$major) : "0",
v?$minor ? fmt(".%d", v$minor) : "", v?$minor ? fmt(".%d", v$minor) : "",
v?$minor2 ? fmt(".%d", v$minor2) : "", v?$minor2 ? fmt(".%d", v$minor2) : "",
@ -510,10 +510,10 @@ event Software::register(info: Info)
local changed = cmp_versions(old$version, info$version) != 0; local changed = cmp_versions(old$version, info$version) != 0;
if ( changed ) if ( changed )
event Software::version_change(old, info); event Software::version_change(old, info);
else if ( ! info$force_log ) else if ( ! info$force_log )
# If the version hasn't changed, then we're just redetecting the # If the version hasn't changed, then we're just redetecting the
# same thing, then we don't care. # same thing, then we don't care.
return; return;
} }
@ -526,7 +526,7 @@ function found(id: conn_id, info: Info): bool
if ( ! info$force_log && ! addr_matches_host(info$host, asset_tracking) ) if ( ! info$force_log && ! addr_matches_host(info$host, asset_tracking) )
return F; return F;
if ( ! info?$ts ) if ( ! info?$ts )
info$ts = network_time(); info$ts = network_time();
if ( info?$version ) if ( info?$version )

View file

@ -220,7 +220,7 @@ event zeek_init() &priority=100
# This variable is maintained by manager nodes as they collect and aggregate # This variable is maintained by manager nodes as they collect and aggregate
# results. # results.
# Index on a uid. # Index on a uid.
global stats_keys: table[string] of set[Key] &read_expire=1min global stats_keys: table[string] of set[Key] &read_expire=1min
&expire_func=function(s: table[string] of set[Key], idx: string): interval &expire_func=function(s: table[string] of set[Key], idx: string): interval
{ {
Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx)); Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx));

View file

@ -510,7 +510,7 @@ function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: dou
return F; return F;
# Add in the extra ResultVals to make threshold_vals easier to write. # Add in the extra ResultVals to make threshold_vals easier to write.
# This length comparison should work because we just need to make # This length comparison should work because we just need to make
# sure that we have the same number of reducers and results. # sure that we have the same number of reducers and results.
if ( |ss$reducers| != |result| ) if ( |ss$reducers| != |result| )
{ {
@ -568,4 +568,3 @@ function threshold_crossed(ss: SumStat, key: Key, result: Result)
ss$threshold_crossed(key, result); ss$threshold_crossed(key, result);
} }

View file

@ -95,7 +95,7 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{ {
local other_vector: vector of Observation; local other_vector: vector of Observation;
local othercount: count; local othercount: count;
if ( rv1$sample_elements > rv2$sample_elements ) if ( rv1$sample_elements > rv2$sample_elements )
{ {
result$samples = copy(rv1$samples); result$samples = copy(rv1$samples);

View file

@ -46,7 +46,7 @@ hook register_observe_plugins()
if ( ! r?$unique_max || |rv$unique_vals| <= r$unique_max ) if ( ! r?$unique_max || |rv$unique_vals| <= r$unique_max )
add rv$unique_vals[obs]; add rv$unique_vals[obs];
rv$unique = |rv$unique_vals|; rv$unique = |rv$unique_vals|;
}); });
} }

View file

@ -90,20 +90,9 @@ export {
global finalize_tunnel: Conn::RemovalHook; global finalize_tunnel: Conn::RemovalHook;
} }
const ayiya_ports = { 5072/udp };
const teredo_ports = { 3544/udp };
const gtpv1_ports = { 2152/udp, 2123/udp };
redef likely_server_ports += { ayiya_ports, teredo_ports, gtpv1_ports, vxlan_ports, geneve_ports };
event zeek_init() &priority=5 event zeek_init() &priority=5
{ {
Log::create_stream(Tunnel::LOG, [$columns=Info, $path="tunnel", $policy=log_policy]); Log::create_stream(Tunnel::LOG, [$columns=Info, $path="tunnel", $policy=log_policy]);
Analyzer::register_for_ports(Analyzer::ANALYZER_AYIYA, ayiya_ports);
Analyzer::register_for_ports(Analyzer::ANALYZER_TEREDO, teredo_ports);
Analyzer::register_for_ports(Analyzer::ANALYZER_GTPV1, gtpv1_ports);
Analyzer::register_for_ports(Analyzer::ANALYZER_VXLAN, vxlan_ports);
Analyzer::register_for_ports(Analyzer::ANALYZER_GENEVE, geneve_ports);
} }
function register_all(ecv: EncapsulatingConnVector) function register_all(ecv: EncapsulatingConnVector)

View file

@ -1016,9 +1016,16 @@ const TCP_RESET = 6; ##< Endpoint has sent RST.
const UDP_INACTIVE = 0; ##< Endpoint is still inactive. const UDP_INACTIVE = 0; ##< Endpoint is still inactive.
const UDP_ACTIVE = 1; ##< Endpoint has sent something. const UDP_ACTIVE = 1; ##< Endpoint has sent something.
## If true, don't verify checksums. Useful for running on altered trace ## If true, don't verify checksums, and accept packets that give a length of
## files, and for saving a few cycles, but at the risk of analyzing invalid ## zero in the IPv4 header. This is useful when running against traces of local
## data. Note that the ``-C`` command-line option overrides the setting of this ## traffic and the NIC checksum offloading feature is enabled. It can also
## be useful for running on altered trace files, and for saving a few cycles
## at the risk of analyzing invalid data.
## With this option, packets that have a value of zero in the total-length field
## of the IPv4 header are also accepted, and the capture-length is used instead.
## The total-length field is commonly set to zero when the NIC sequence offloading
## feature is enabled.
## Note that the ``-C`` command-line option overrides the setting of this
## variable. ## variable.
const ignore_checksums = F &redef; const ignore_checksums = F &redef;
@ -3884,6 +3891,14 @@ type dns_loc_rr: record {
is_query: count; ##< The RR is a query/Response. is_query: count; ##< The RR is a query/Response.
}; };
## DNS SVCB and HTTPS RRs
##
## .. zeek:see:: dns_SVCB dns_HTTPS
type dns_svcb_rr: record {
svc_priority: count; ##< Service priority for the current record, 0 indicates that this record is in AliasMode and cannot carry svc_params; otherwise this is in ServiceMode, and may include svc_params
target_name: string; ##< Target name, the hostname of the service endpoint.
};
# DNS answer types. # DNS answer types.
# #
# .. zeek:see:: dns_answerr # .. zeek:see:: dns_answerr
@ -5021,14 +5036,14 @@ export {
## With this set, the Teredo analyzer waits until it sees both sides ## With this set, the Teredo analyzer waits until it sees both sides
## of a connection using a valid Teredo encapsulation before issuing ## of a connection using a valid Teredo encapsulation before issuing
## a :zeek:see:`protocol_confirmation`. If it's false, the first ## a :zeek:see:`analyzer_confirmation`. If it's false, the first
## occurrence of a packet with valid Teredo encapsulation causes a ## occurrence of a packet with valid Teredo encapsulation causes a
## confirmation. ## confirmation.
const delay_teredo_confirmation = T &redef; const delay_teredo_confirmation = T &redef;
## With this set, the GTP analyzer waits until the most-recent upflow ## With this set, the GTP analyzer waits until the most-recent upflow
## and downflow packets are a valid GTPv1 encapsulation before ## and downflow packets are a valid GTPv1 encapsulation before
## issuing :zeek:see:`protocol_confirmation`. If it's false, the ## issuing :zeek:see:`analyzer_confirmation`. If it's false, the
## first occurrence of a packet with valid GTPv1 encapsulation causes ## first occurrence of a packet with valid GTPv1 encapsulation causes
## confirmation. Since the same inner connection can be carried ## confirmation. Since the same inner connection can be carried
## differing outer upflow/downflow connections, setting to false ## differing outer upflow/downflow connections, setting to false
@ -5045,17 +5060,6 @@ export {
## may choose whether to perform the validation. ## may choose whether to perform the validation.
const validate_vxlan_checksums = T &redef; const validate_vxlan_checksums = T &redef;
## The set of UDP ports used for VXLAN traffic. Traffic using this
## UDP destination port will attempt to be decapsulated. Note that if
## if you customize this, you may still want to manually ensure that
## :zeek:see:`likely_server_ports` also gets populated accordingly.
const vxlan_ports: set[port] = { 4789/udp } &redef;
## The set of UDP ports used for Geneve traffic. Traffic using this
## UDP destination port will attempt to be decapsulated. Note that if
## if you customize this, you may still want to manually ensure that
## :zeek:see:`likely_server_ports` also gets populated accordingly.
const geneve_ports: set[port] = { 6081/udp } &redef;
} # end export } # end export
module Reporter; module Reporter;

View file

@ -1,3 +1,5 @@
@load ./main.zeek
@load base/packet-protocols/root @load base/packet-protocols/root
@load base/packet-protocols/ip @load base/packet-protocols/ip
@load base/packet-protocols/skip @load base/packet-protocols/skip
@ -12,9 +14,15 @@
@load base/packet-protocols/pppoe @load base/packet-protocols/pppoe
@load base/packet-protocols/vlan @load base/packet-protocols/vlan
@load base/packet-protocols/mpls @load base/packet-protocols/mpls
@load base/packet-protocols/gre
@load base/packet-protocols/iptunnel
@load base/packet-protocols/vntag @load base/packet-protocols/vntag
@load base/packet-protocols/udp @load base/packet-protocols/udp
@load base/packet-protocols/tcp @load base/packet-protocols/tcp
@load base/packet-protocols/icmp @load base/packet-protocols/icmp
@load base/packet-protocols/gre
@load base/packet-protocols/iptunnel
@load base/packet-protocols/ayiya
@load base/packet-protocols/geneve
@load base/packet-protocols/vxlan
@load base/packet-protocols/teredo
@load base/packet-protocols/gtpv1

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,19 @@
module PacketAnalyzer::AYIYA;
# Needed for port registration for BPF
@load base/frameworks/analyzer/main
const IPPROTO_IPV4 : count = 4;
const IPPROTO_IPV6 : count = 41;
const ayiya_ports = { 5072/udp };
redef likely_server_ports += { ayiya_ports };
event zeek_init() &priority=20
{
PacketAnalyzer::register_protocol_detection(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_AYIYA);
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_AYIYA, IPPROTO_IPV4, PacketAnalyzer::ANALYZER_IP);
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_AYIYA, IPPROTO_IPV6, PacketAnalyzer::ANALYZER_IP);
PacketAnalyzer::register_for_ports(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_AYIYA, ayiya_ports);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,27 @@
module PacketAnalyzer::Geneve;
export {
## The set of UDP ports used for Geneve traffic. Traffic using this
## UDP destination port will attempt to be decapsulated. Note that if
## if you customize this, you may still want to manually ensure that
## :zeek:see:`likely_server_ports` also gets populated accordingly.
const geneve_ports: set[port] = { 6081/udp } &redef;
}
redef likely_server_ports += { geneve_ports };
event zeek_init() &priority=20
{
PacketAnalyzer::register_for_ports(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_GENEVE, geneve_ports);
# This is defined by IANA as being "Trans Ether Bridging" but the Geneve RFC
# says to use it for Ethernet. See
# https://datatracker.ietf.org/doc/html/draft-gross-geneve-00#section-3.4
# for details.
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_GENEVE, 0x6558, PacketAnalyzer::ANALYZER_ETHERNET);
# Some additional mappings for protocols that we already handle natively.
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_GENEVE, 0x0800, PacketAnalyzer::ANALYZER_IP);
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_GENEVE, 0x08DD, PacketAnalyzer::ANALYZER_IP);
PacketAnalyzer::register_packet_analyzer(PacketAnalyzer::ANALYZER_GENEVE, 0x0806, PacketAnalyzer::ANALYZER_ARP);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,28 @@
module PacketAnalyzer::GTPV1;
# This needs to be loaded here so the function is available. Function BIFs normally aren't
# loaded until after the packet analysis init scripts are run, and then zeek complains it
# can't find the function.
@load base/bif/plugins/Zeek_GTPv1.functions.bif
# Needed for port registration for BPF
@load base/frameworks/analyzer/main
export {
## Default analyzer
const default_analyzer: PacketAnalyzer::Tag = PacketAnalyzer::ANALYZER_IP &redef;
}
const gtpv1_ports = { 2152/udp, 2123/udp };
redef likely_server_ports += { gtpv1_ports };
event zeek_init() &priority=20
{
PacketAnalyzer::register_protocol_detection(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_GTPV1);
PacketAnalyzer::register_for_ports(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_GTPV1, gtpv1_ports);
}
event connection_state_remove(c: connection)
{
remove_gtpv1_connection(c$id);
}

View file

@ -0,0 +1,61 @@
module PacketAnalyzer;
@load base/frameworks/analyzer/main.zeek
export {
## Registers a set of well-known ports for an analyzer. If a future
## connection on one of these ports is seen, the analyzer will be
## automatically assigned to parsing it. The function *adds* to all ports
## already registered, it doesn't replace them.
##
## tag: The tag of the analyzer.
##
## ports: The set of well-known ports to associate with the analyzer.
##
## Returns: True if the ports were successfully registered.
global register_for_ports: function(parent: PacketAnalyzer::Tag,
child: PacketAnalyzer::Tag,
ports: set[port]) : bool;
## Registers an individual well-known port for an analyzer. If a future
## connection on this port is seen, the analyzer will be automatically
## assigned to parsing it. The function *adds* to all ports already
## registered, it doesn't replace them.
##
## tag: The tag of the analyzer.
##
## p: The well-known port to associate with the analyzer.
##
## Returns: True if the port was successfully registered.
global register_for_port: function(parent: PacketAnalyzer::Tag,
child: PacketAnalyzer::Tag,
p: port) : bool;
}
function register_for_ports(parent: PacketAnalyzer::Tag,
child: PacketAnalyzer::Tag,
ports: set[port]) : bool
{
local rc = T;
for ( p in ports )
{
if ( ! register_for_port(parent, child, p) )
rc = F;
}
return rc;
}
function register_for_port(parent: PacketAnalyzer::Tag,
child: PacketAnalyzer::Tag,
p: port) : bool
{
register_packet_analyzer(parent, port_to_count(p), child);
if ( child !in Analyzer::ports )
Analyzer::ports[child] = set();
add Analyzer::ports[child][p];
return T;
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,28 @@
module PacketAnalyzer::TEREDO;
# This needs to be loaded here so the functions are available. Function BIFs normally aren't
# loaded until after the packet analysis init scripts are run, and then zeek complains it
# can't find the function.
@load base/bif/plugins/Zeek_Teredo.functions.bif
# Needed for port registration for BPF
@load base/frameworks/analyzer/main
export {
## Default analyzer
const default_analyzer: PacketAnalyzer::Tag = PacketAnalyzer::ANALYZER_IP &redef;
}
const teredo_ports = { 3544/udp };
redef likely_server_ports += { teredo_ports };
event zeek_init() &priority=20
{
PacketAnalyzer::register_protocol_detection(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_TEREDO);
PacketAnalyzer::register_for_ports(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_TEREDO, teredo_ports);
}
event connection_state_remove(c: connection)
{
remove_teredo_connection(c$id);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,20 @@
module PacketAnalyzer::VXLAN;
export {
# There's no indicator in the VXLAN packet header format about what the next protocol
# in the chain is. All of the documentation just lists Ethernet, so default to that.
const default_analyzer: PacketAnalyzer::Tag = PacketAnalyzer::ANALYZER_ETHERNET &redef;
## The set of UDP ports used for VXLAN traffic. Traffic using this
## UDP destination port will attempt to be decapsulated. Note that if
## if you customize this, you may still want to manually ensure that
## :zeek:see:`likely_server_ports` also gets populated accordingly.
const vxlan_ports: set[port] = { 4789/udp } &redef;
}
redef likely_server_ports += { vxlan_ports };
event zeek_init() &priority=20
{
PacketAnalyzer::register_for_ports(PacketAnalyzer::ANALYZER_UDP, PacketAnalyzer::ANALYZER_VXLAN, vxlan_ports);
}

View file

@ -1,5 +1,5 @@
##! This script can be used to extract either the originator's data or the ##! This script can be used to extract either the originator's data or the
##! responders data or both. By default nothing is extracted, and in order ##! responders data or both. By default nothing is extracted, and in order
##! to actually extract data the ``c$extract_orig`` and/or the ##! to actually extract data the ``c$extract_orig`` and/or the
##! ``c$extract_resp`` variable must be set to ``T``. One way to achieve this ##! ``c$extract_resp`` variable must be set to ``T``. One way to achieve this
##! would be to handle the :zeek:id:`connection_established` event elsewhere ##! would be to handle the :zeek:id:`connection_established` event elsewhere
@ -19,7 +19,7 @@ export {
## The prefix given to files containing extracted connections as they ## The prefix given to files containing extracted connections as they
## are opened on disk. ## are opened on disk.
option extraction_prefix = "contents"; option extraction_prefix = "contents";
## If this variable is set to ``T``, then all contents of all ## If this variable is set to ``T``, then all contents of all
## connections will be extracted. ## connections will be extracted.
option default_extract = F; option default_extract = F;
@ -38,7 +38,7 @@ event connection_established(c: connection) &priority=-5
local orig_f = open(orig_file); local orig_f = open(orig_file);
set_contents_file(c$id, CONTENTS_ORIG, orig_f); set_contents_file(c$id, CONTENTS_ORIG, orig_f);
} }
if ( c$extract_resp ) if ( c$extract_resp )
{ {
local resp_file = generate_extraction_filename(extraction_prefix, c, "resp.dat"); local resp_file = generate_extraction_filename(extraction_prefix, c, "resp.dat");

View file

@ -6,19 +6,19 @@ module Conn;
export { export {
## Define inactivity timeouts by the service detected being used over ## Define inactivity timeouts by the service detected being used over
## the connection. ## the connection.
option analyzer_inactivity_timeouts: table[Analyzer::Tag] of interval = { option analyzer_inactivity_timeouts: table[AllAnalyzers::Tag] of interval = {
# For interactive services, allow longer periods of inactivity. # For interactive services, allow longer periods of inactivity.
[[Analyzer::ANALYZER_SSH, Analyzer::ANALYZER_FTP]] = 1 hrs, [[Analyzer::ANALYZER_SSH, Analyzer::ANALYZER_FTP]] = 1 hrs,
}; };
## Define inactivity timeouts based on common protocol ports. ## Define inactivity timeouts based on common protocol ports.
option port_inactivity_timeouts: table[port] of interval = { option port_inactivity_timeouts: table[port] of interval = {
[[21/tcp, 22/tcp, 23/tcp, 513/tcp]] = 1 hrs, [[21/tcp, 22/tcp, 23/tcp, 513/tcp]] = 1 hrs,
}; };
} }
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) event analyzer_confirmation(c: connection, atype: AllAnalyzers::Tag, aid: count)
{ {
if ( atype in analyzer_inactivity_timeouts ) if ( atype in analyzer_inactivity_timeouts )
set_inactivity_timeout(c$id, analyzer_inactivity_timeouts[atype]); set_inactivity_timeout(c$id, analyzer_inactivity_timeouts[atype]);

View file

@ -17,7 +17,7 @@ export {
## The connection's 4-tuple of endpoint addresses/ports. ## The connection's 4-tuple of endpoint addresses/ports.
id : conn_id &log; id : conn_id &log;
## Round trip time from the request to the response. ## Round trip time from the request to the response.
## If either the request or response wasn't seen, ## If either the request or response wasn't seen,
## this will be null. ## this will be null.
rtt : interval &log &optional; rtt : interval &log &optional;

View file

@ -78,7 +78,7 @@ export {
## The DHCP message types seen by this DHCP transaction ## The DHCP message types seen by this DHCP transaction
msg_types: vector of string &log &default=string_vec(); msg_types: vector of string &log &default=string_vec();
## Duration of the DHCP "session" representing the ## Duration of the DHCP "session" representing the
## time from the first message to the last. ## time from the first message to the last.
duration: interval &log &default=0secs; duration: interval &log &default=0secs;

View file

@ -172,4 +172,15 @@ export {
[4] = "SHA384", [4] = "SHA384",
} &default = function(n: count): string { return fmt("digest-%d", n); }; } &default = function(n: count): string { return fmt("digest-%d", n); };
## SVCB/HTTPS SvcParam keys, as defined in
## https://www.ietf.org/archive/id/draft-ietf-dnsop-svcb-https-07.txt, sec 14.3.2
const svcparam_keys = {
[0] = "mandatory",
[1] = "alpn",
[2] = "no-default-alpn",
[3] = "port",
[4] = "ipv4hint",
[5] = "ech",
[6] = "ipv6hint",
} &default = function(n: count): string { return fmt("key-%d", n); };
} }

View file

@ -375,7 +375,7 @@ hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
if ( ! c$dns?$rtt ) if ( ! c$dns?$rtt )
{ {
c$dns$rtt = network_time() - c$dns$ts; c$dns$rtt = network_time() - c$dns$ts;
# This could mean that only a reply was seen since # This could mean that only a reply was seen since
# we assume there must be some passage of time between # we assume there must be some passage of time between
# request and response. # request and response.
if ( c$dns$rtt == 0secs ) if ( c$dns$rtt == 0secs )
@ -547,9 +547,9 @@ event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer, target: string
# #
# } # }
# event dns_EDNS_ecs(c: connection, msg: dns_msg, opt: dns_edns_ecs) # event dns_EDNS_ecs(c: connection, msg: dns_msg, opt: dns_edns_ecs)
# { # {
# #
# } # }
# #
#event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional) #event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional)
# { # {

View file

@ -18,14 +18,14 @@ export {
## Describe the file being transferred. ## Describe the file being transferred.
global describe_file: function(f: fa_file): string; global describe_file: function(f: fa_file): string;
redef record fa_file += { redef record fa_file += {
ftp: FTP::Info &optional; ftp: FTP::Info &optional;
}; };
} }
function get_file_handle(c: connection, is_orig: bool): string function get_file_handle(c: connection, is_orig: bool): string
{ {
if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected ) if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected )
return ""; return "";
return cat(Analyzer::ANALYZER_FTP_DATA, c$start_time, c$id, is_orig); return cat(Analyzer::ANALYZER_FTP_DATA, c$start_time, c$id, is_orig);
@ -54,7 +54,7 @@ event zeek_init() &priority=5
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{ {
if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected ) if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected )
return; return;
local ftp = ftp_data_expected[c$id$resp_h, c$id$resp_p]; local ftp = ftp_data_expected[c$id$resp_h, c$id$resp_p];

View file

@ -11,12 +11,12 @@ export {
## Counter to track how many commands have been executed. ## Counter to track how many commands have been executed.
seq: count &default=0; seq: count &default=0;
}; };
## Structure for tracking pending commands in the event that the client ## Structure for tracking pending commands in the event that the client
## sends a large number of commands before the server has a chance to ## sends a large number of commands before the server has a chance to
## reply. ## reply.
type PendingCmds: table[count] of CmdArg; type PendingCmds: table[count] of CmdArg;
## Possible response codes for a wide variety of FTP commands. ## Possible response codes for a wide variety of FTP commands.
option cmd_reply_code: set[string, count] = { option cmd_reply_code: set[string, count] = {
# According to RFC 959 # According to RFC 959
@ -65,7 +65,7 @@ export {
["MDTM", [213, 500, 501, 550]], # RFC3659 ["MDTM", [213, 500, 501, 550]], # RFC3659
["MLST", [150, 226, 250, 500, 501, 550]], # RFC3659 ["MLST", [150, 226, 250, 500, 501, 550]], # RFC3659
["MLSD", [150, 226, 250, 500, 501, 550]], # RFC3659 ["MLSD", [150, 226, 250, 500, 501, 550]], # RFC3659
["CLNT", [200, 500]], # No RFC (indicate client software) ["CLNT", [200, 500]], # No RFC (indicate client software)
["MACB", [200, 500, 550]], # No RFC (test for MacBinary support) ["MACB", [200, 500, 550]], # No RFC (test for MacBinary support)
@ -79,11 +79,11 @@ function add_pending_cmd(pc: PendingCmds, cmd: string, arg: string): CmdArg
{ {
local ca = [$cmd = cmd, $arg = arg, $seq=|pc|+1, $ts=network_time()]; local ca = [$cmd = cmd, $arg = arg, $seq=|pc|+1, $ts=network_time()];
pc[ca$seq] = ca; pc[ca$seq] = ca;
return ca; return ca;
} }
# Determine which is the best command to match with based on the # Determine which is the best command to match with based on the
# response code and message. # response code and message.
function get_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg function get_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg
{ {
@ -94,18 +94,18 @@ function get_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string):
for ( cmd_seq, cmd in pc ) for ( cmd_seq, cmd in pc )
{ {
local score: int = 0; local score: int = 0;
# if the command is compatible with the reply code # if the command is compatible with the reply code
# code 500 (syntax error) is compatible with all commands # code 500 (syntax error) is compatible with all commands
if ( reply_code == 500 || [cmd$cmd, reply_code] in cmd_reply_code ) if ( reply_code == 500 || [cmd$cmd, reply_code] in cmd_reply_code )
score = score + 100; score = score + 100;
# if the command or the command arg appears in the reply message # if the command or the command arg appears in the reply message
if ( strstr(reply_msg, cmd$cmd) > 0 ) if ( strstr(reply_msg, cmd$cmd) > 0 )
score = score + 20; score = score + 20;
if ( strstr(reply_msg, cmd$arg) > 0 ) if ( strstr(reply_msg, cmd$arg) > 0 )
score = score + 10; score = score + 10;
if ( score > best_score || if ( score > best_score ||
( score == best_score && best_seq > cmd_seq ) ) # break tie with sequence number ( score == best_score && best_seq > cmd_seq ) ) # break tie with sequence number
{ {
@ -132,7 +132,7 @@ function remove_pending_cmd(pc: PendingCmds, ca: CmdArg): bool
else else
return F; return F;
} }
function pop_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg function pop_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg
{ {
local ca = get_pending_cmd(pc, reply_code, reply_msg); local ca = get_pending_cmd(pc, reply_code, reply_msg);

View file

@ -97,7 +97,7 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{ {
if ( f$source == "HTTP" && c?$http ) if ( f$source == "HTTP" && c?$http )
{ {
f$http = c$http; f$http = c$http;
@ -199,6 +199,6 @@ event file_sniff(f: fa_file, meta: fa_metadata) &priority=5
event http_end_entity(c: connection, is_orig: bool) &priority=5 event http_end_entity(c: connection, is_orig: bool) &priority=5
{ {
if ( c?$http && c$http?$current_entity ) if ( c?$http && c$http?$current_entity )
delete c$http$current_entity; delete c$http$current_entity;
} }

View file

@ -16,7 +16,7 @@ export {
## ##
## Returns: A vector of strings containing the keys. ## Returns: A vector of strings containing the keys.
global extract_keys: function(data: string, kv_splitter: pattern): string_vec; global extract_keys: function(data: string, kv_splitter: pattern): string_vec;
## Creates a URL from an :zeek:type:`HTTP::Info` record. This should ## Creates a URL from an :zeek:type:`HTTP::Info` record. This should
## handle edge cases such as proxied requests appropriately. ## handle edge cases such as proxied requests appropriately.
## ##
@ -24,7 +24,7 @@ export {
## ##
## Returns: A URL, not prefixed by ``"http://"``. ## Returns: A URL, not prefixed by ``"http://"``.
global build_url: function(rec: Info): string; global build_url: function(rec: Info): string;
## Creates a URL from an :zeek:type:`HTTP::Info` record. This should ## Creates a URL from an :zeek:type:`HTTP::Info` record. This should
## handle edge cases such as proxied requests appropriately. ## handle edge cases such as proxied requests appropriately.
## ##
@ -41,7 +41,7 @@ export {
function extract_keys(data: string, kv_splitter: pattern): string_vec function extract_keys(data: string, kv_splitter: pattern): string_vec
{ {
local key_vec: vector of string = vector(); local key_vec: vector of string = vector();
local parts = split_string(data, kv_splitter); local parts = split_string(data, kv_splitter);
for ( part_index in parts ) for ( part_index in parts )
{ {
@ -64,7 +64,7 @@ function build_url(rec: Info): string
host = fmt("%s:%d", host, resp_p); host = fmt("%s:%d", host, resp_p);
return fmt("%s%s", host, uri); return fmt("%s%s", host, uri);
} }
function build_url_http(rec: Info): string function build_url_http(rec: Info): string
{ {
return fmt("http://%s", build_url(rec)); return fmt("http://%s", build_url(rec));

View file

@ -31,7 +31,7 @@ event zeek_init() &priority=5
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{ {
if ( [c$id$resp_h, c$id$resp_p] !in dcc_expected_transfers ) if ( [c$id$resp_h, c$id$resp_p] !in dcc_expected_transfers )
return; return;
local irc = dcc_expected_transfers[c$id$resp_h, c$id$resp_p]; local irc = dcc_expected_transfers[c$id$resp_h, c$id$resp_p];

View file

@ -1,11 +1,11 @@
##! Implements the core IRC analysis support. The logging model is to log ##! Implements the core IRC analysis support. The logging model is to log
##! IRC commands along with the associated response and some additional ##! IRC commands along with the associated response and some additional
##! metadata about the connection if it's available. ##! metadata about the connection if it's available.
module IRC; module IRC;
export { export {
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
global log_policy: Log::PolicyHook; global log_policy: Log::PolicyHook;
@ -21,7 +21,7 @@ export {
nick: string &log &optional; nick: string &log &optional;
## Username given for the connection. ## Username given for the connection.
user: string &log &optional; user: string &log &optional;
## Command given by the client. ## Command given by the client.
command: string &log &optional; command: string &log &optional;
## Value for the command given by the client. ## Value for the command given by the client.
@ -29,8 +29,8 @@ export {
## Any additional data for the command. ## Any additional data for the command.
addl: string &log &optional; addl: string &log &optional;
}; };
## Event that can be handled to access the IRC record as it is sent on ## Event that can be handled to access the IRC record as it is sent on
## to the logging framework. ## to the logging framework.
global irc_log: event(rec: Info); global irc_log: event(rec: Info);
} }
@ -48,7 +48,7 @@ event zeek_init() &priority=5
Log::create_stream(IRC::LOG, [$columns=Info, $ev=irc_log, $path="irc", $policy=log_policy]); Log::create_stream(IRC::LOG, [$columns=Info, $ev=irc_log, $path="irc", $policy=log_policy]);
Analyzer::register_for_ports(Analyzer::ANALYZER_IRC, ports); Analyzer::register_for_ports(Analyzer::ANALYZER_IRC, ports);
} }
function new_session(c: connection): Info function new_session(c: connection): Info
{ {
local info: Info; local info: Info;
@ -57,12 +57,12 @@ function new_session(c: connection): Info
info$id = c$id; info$id = c$id;
return info; return info;
} }
function set_session(c: connection) function set_session(c: connection)
{ {
if ( ! c?$irc ) if ( ! c?$irc )
c$irc = new_session(c); c$irc = new_session(c);
c$irc$ts=network_time(); c$irc$ts=network_time();
} }

View file

@ -95,7 +95,7 @@ function set_session(c: connection): bool
$id = c$id); $id = c$id);
Conn::register_removal_hook(c, finalize_krb); Conn::register_removal_hook(c, finalize_krb);
} }
return c$krb$logged; return c$krb$logged;
} }
@ -115,7 +115,7 @@ event krb_error(c: connection, msg: Error_Msg) &priority=5
if ( msg?$error_text && msg$error_text in ignored_errors ) if ( msg?$error_text && msg$error_text in ignored_errors )
{ {
if ( c?$krb ) if ( c?$krb )
delete c$krb; delete c$krb;
return; return;
@ -174,7 +174,7 @@ event krb_as_response(c: connection, msg: KDC_Response) &priority=5
if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) )
{ {
c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "",
msg?$client_realm ? msg$client_realm : ""); msg?$client_realm ? msg$client_realm : "");
} }
@ -202,7 +202,7 @@ event krb_tgs_request(c: connection, msg: KDC_Request) &priority=5
c$krb$request_type = "TGS"; c$krb$request_type = "TGS";
if ( msg?$service_name ) if ( msg?$service_name )
c$krb$service = msg$service_name; c$krb$service = msg$service_name;
if ( msg?$from ) if ( msg?$from )
c$krb$from = msg$from; c$krb$from = msg$from;
if ( msg?$till ) if ( msg?$till )
c$krb$till = msg$till; c$krb$till = msg$till;
@ -221,7 +221,7 @@ event krb_tgs_response(c: connection, msg: KDC_Response) &priority=5
if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) )
{ {
c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "",
msg?$client_realm ? msg$client_realm : ""); msg?$client_realm ? msg$client_realm : "");
} }

View file

@ -33,7 +33,7 @@ export {
## Indicate whether or not the authentication was successful. ## Indicate whether or not the authentication was successful.
success : bool &log &optional; success : bool &log &optional;
## Internally used field to indicate if the login attempt ## Internally used field to indicate if the login attempt
## has already been logged. ## has already been logged.
done: bool &default=F; done: bool &default=F;
}; };

View file

@ -24,7 +24,7 @@ export {
mac : string &log &optional; mac : string &log &optional;
## The address given to the network access server, if ## The address given to the network access server, if
## present. This is only a hint from the RADIUS server ## present. This is only a hint from the RADIUS server
## and the network access server is not required to honor ## and the network access server is not required to honor
## the address. ## the address.
framed_addr : addr &log &optional; framed_addr : addr &log &optional;
## Address (IPv4, IPv6, or FQDN) of the initiator end of the tunnel, ## Address (IPv4, IPv6, or FQDN) of the initiator end of the tunnel,
@ -33,7 +33,7 @@ export {
tunnel_client: string &log &optional; tunnel_client: string &log &optional;
## Connect info, if present. ## Connect info, if present.
connect_info : string &log &optional; connect_info : string &log &optional;
## Reply message from the server challenge. This is ## Reply message from the server challenge. This is
## frequently shown to the user authenticating. ## frequently shown to the user authenticating.
reply_msg : string &log &optional; reply_msg : string &log &optional;
## Successful or failed authentication. ## Successful or failed authentication.

View file

@ -41,15 +41,15 @@ export {
desktop_width: count &log &optional; desktop_width: count &log &optional;
## Desktop height of the client machine. ## Desktop height of the client machine.
desktop_height: count &log &optional; desktop_height: count &log &optional;
## The color depth requested by the client in ## The color depth requested by the client in
## the high_color_depth field. ## the high_color_depth field.
requested_color_depth: string &log &optional; requested_color_depth: string &log &optional;
## If the connection is being encrypted with native ## If the connection is being encrypted with native
## RDP encryption, this is the type of cert ## RDP encryption, this is the type of cert
## being used. ## being used.
cert_type: string &log &optional; cert_type: string &log &optional;
## The number of certs seen. X.509 can transfer an ## The number of certs seen. X.509 can transfer an
## entire certificate chain. ## entire certificate chain.
cert_count: count &log &default=0; cert_count: count &log &default=0;
## Indicates if the provided certificate or certificate ## Indicates if the provided certificate or certificate
@ -57,7 +57,7 @@ export {
cert_permanent: bool &log &optional; cert_permanent: bool &log &optional;
## Encryption level of the connection. ## Encryption level of the connection.
encryption_level: string &log &optional; encryption_level: string &log &optional;
## Encryption method of the connection. ## Encryption method of the connection.
encryption_method: string &log &optional; encryption_method: string &log &optional;
}; };
@ -65,7 +65,7 @@ export {
## continuing to process encrypted traffic. ## continuing to process encrypted traffic.
option disable_analyzer_after_detection = F; option disable_analyzer_after_detection = F;
## The amount of time to monitor an RDP session from when it is first ## The amount of time to monitor an RDP session from when it is first
## identified. When this interval is reached, the session is logged. ## identified. When this interval is reached, the session is logged.
option rdp_check_interval = 10secs; option rdp_check_interval = 10secs;
@ -113,7 +113,7 @@ function write_log(c: connection)
info$done = T; info$done = T;
# Verify that the RDP session contains # Verify that the RDP session contains
# RDP data before writing it to the log. # RDP data before writing it to the log.
if ( info?$cookie || info?$keyboard_layout || info?$result ) if ( info?$cookie || info?$keyboard_layout || info?$result )
Log::write(RDP::LOG, info); Log::write(RDP::LOG, info);
} }
@ -124,16 +124,16 @@ event check_record(c: connection)
if ( c$rdp$done ) if ( c$rdp$done )
return; return;
# If the value rdp_check_interval has passed since the # If the value rdp_check_interval has passed since the
# RDP session was started, then log the record. # RDP session was started, then log the record.
local diff = network_time() - c$rdp$ts; local diff = network_time() - c$rdp$ts;
if ( diff > rdp_check_interval ) if ( diff > rdp_check_interval )
{ {
write_log(c); write_log(c);
# Remove the analyzer if it is still attached. # Remove the analyzer if it is still attached.
if ( disable_analyzer_after_detection && if ( disable_analyzer_after_detection &&
connection_exists(c$id) && connection_exists(c$id) &&
c$rdp?$analyzer_id ) c$rdp?$analyzer_id )
{ {
disable_analyzer(c$id, c$rdp$analyzer_id); disable_analyzer(c$id, c$rdp$analyzer_id);
@ -240,7 +240,7 @@ event rdp_server_certificate(c: connection, cert_type: count, permanently_issued
# now so we manually count this one. # now so we manually count this one.
if ( c$rdp$cert_type == "RSA" ) if ( c$rdp$cert_type == "RSA" )
++c$rdp$cert_count; ++c$rdp$cert_count;
c$rdp$cert_permanent = permanently_issued; c$rdp$cert_permanent = permanently_issued;
} }
@ -265,7 +265,7 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
} }
} }
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5 event analyzer_confirmation(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=5
{ {
if ( atype == Analyzer::ANALYZER_RDP ) if ( atype == Analyzer::ANALYZER_RDP )
{ {
@ -274,7 +274,7 @@ event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &pr
} }
} }
event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) &priority=5 event analyzer_violation(c: connection, atype: AllAnalyzers::Tag, aid: count, reason: string) &priority=5
{ {
# If a protocol violation occurs, then log the record immediately. # If a protocol violation occurs, then log the record immediately.
if ( c?$rdp ) if ( c?$rdp )

View file

@ -107,13 +107,13 @@ export {
} &redef &default=function(i: count):string { return fmt("unknown-wksta-command-%d", i); }; } &redef &default=function(i: count):string { return fmt("unknown-wksta-command-%d", i); };
type rpc_cmd_table: table[count] of string; type rpc_cmd_table: table[count] of string;
## The subcommands for RPC endpoints. ## The subcommands for RPC endpoints.
const rpc_sub_cmds: table[string] of rpc_cmd_table = { const rpc_sub_cmds: table[string] of rpc_cmd_table = {
["4b324fc8-1670-01d3-1278-5a47bf6ee188"] = srv_cmds, ["4b324fc8-1670-01d3-1278-5a47bf6ee188"] = srv_cmds,
["6bffd098-a112-3610-9833-46c3f87e345a"] = wksta_cmds, ["6bffd098-a112-3610-9833-46c3f87e345a"] = wksta_cmds,
} &redef &default=function(i: string):rpc_cmd_table { return table() &default=function(j: string):string { return fmt("unknown-uuid-%s", j); }; }; } &redef &default=function(i: string):rpc_cmd_table { return table() &default=function(j: string):string { return fmt("unknown-uuid-%s", j); }; };
} }
module SMB1; module SMB1;
@ -195,37 +195,37 @@ export {
} &default=function(i: count):string { return fmt("unknown-%d", i); }; } &default=function(i: count):string { return fmt("unknown-%d", i); };
const trans2_sub_commands: table[count] of string = { const trans2_sub_commands: table[count] of string = {
[0x00] = "OPEN2", [0x00] = "OPEN2",
[0x01] = "FIND_FIRST2", [0x01] = "FIND_FIRST2",
[0x02] = "FIND_NEXT2", [0x02] = "FIND_NEXT2",
[0x03] = "QUERY_FS_INFORMATION", [0x03] = "QUERY_FS_INFORMATION",
[0x04] = "SET_FS_INFORMATION", [0x04] = "SET_FS_INFORMATION",
[0x05] = "QUERY_PATH_INFORMATION", [0x05] = "QUERY_PATH_INFORMATION",
[0x06] = "SET_PATH_INFORMATION", [0x06] = "SET_PATH_INFORMATION",
[0x07] = "QUERY_FILE_INFORMATION", [0x07] = "QUERY_FILE_INFORMATION",
[0x08] = "SET_FILE_INFORMATION", [0x08] = "SET_FILE_INFORMATION",
[0x09] = "FSCTL", [0x09] = "FSCTL",
[0x0A] = "IOCTL", [0x0A] = "IOCTL",
[0x0B] = "FIND_NOTIFY_FIRST", [0x0B] = "FIND_NOTIFY_FIRST",
[0x0C] = "FIND_NOTIFY_NEXT", [0x0C] = "FIND_NOTIFY_NEXT",
[0x0D] = "CREATE_DIRECTORY", [0x0D] = "CREATE_DIRECTORY",
[0x0E] = "SESSION_SETUP", [0x0E] = "SESSION_SETUP",
[0x10] = "GET_DFS_REFERRAL", [0x10] = "GET_DFS_REFERRAL",
[0x11] = "REPORT_DFS_INCONSISTENCY", [0x11] = "REPORT_DFS_INCONSISTENCY",
} &default=function(i: count):string { return fmt("unknown-trans2-sub-cmd-%d", i); }; } &default=function(i: count):string { return fmt("unknown-trans2-sub-cmd-%d", i); };
const trans_sub_commands: table[count] of string = { const trans_sub_commands: table[count] of string = {
[0x01] = "SET_NMPIPE_STATE", [0x01] = "SET_NMPIPE_STATE",
[0x11] = "RAW_READ_NMPIPE", [0x11] = "RAW_READ_NMPIPE",
[0x21] = "QUERY_NMPIPE_STATE", [0x21] = "QUERY_NMPIPE_STATE",
[0x22] = "QUERY_NMPIPE_INFO", [0x22] = "QUERY_NMPIPE_INFO",
[0x23] = "PEEK_NMPIPE", [0x23] = "PEEK_NMPIPE",
[0x26] = "TRANSACT_NMPIPE", [0x26] = "TRANSACT_NMPIPE",
[0x31] = "RAW_WRITE_NMPIPE", [0x31] = "RAW_WRITE_NMPIPE",
[0x36] = "READ_NMPIPE", [0x36] = "READ_NMPIPE",
[0x37] = "WRITE_NMPIPE", [0x37] = "WRITE_NMPIPE",
[0x53] = "WAIT_NMPIPE", [0x53] = "WAIT_NMPIPE",
[0x54] = "CALL_NMPIPE", [0x54] = "CALL_NMPIPE",
} &default=function(i: count):string { return fmt("unknown-trans-sub-cmd-%d", i); }; } &default=function(i: count):string { return fmt("unknown-trans-sub-cmd-%d", i); };
} }

View file

@ -14,7 +14,7 @@ export {
function get_file_handle(c: connection, is_orig: bool): string function get_file_handle(c: connection, is_orig: bool): string
{ {
if ( ! (c$smb_state?$current_file && if ( ! (c$smb_state?$current_file &&
(c$smb_state$current_file?$name || (c$smb_state$current_file?$name ||
c$smb_state$current_file?$path)) ) c$smb_state$current_file?$path)) )
{ {
# TODO - figure out what are the cases where this happens. # TODO - figure out what are the cases where this happens.

View file

@ -5,7 +5,7 @@
module SMB; module SMB;
export { export {
redef enum Log::ID += { redef enum Log::ID += {
AUTH_LOG, AUTH_LOG,
MAPPING_LOG, MAPPING_LOG,
FILES_LOG FILES_LOG
@ -13,7 +13,7 @@ export {
global log_policy_files: Log::PolicyHook; global log_policy_files: Log::PolicyHook;
global log_policy_mapping: Log::PolicyHook; global log_policy_mapping: Log::PolicyHook;
## Abstracted actions for SMB file actions. ## Abstracted actions for SMB file actions.
type Action: enum { type Action: enum {
FILE_READ, FILE_READ,
@ -55,7 +55,7 @@ export {
id : conn_id &log; id : conn_id &log;
## Unique ID of the file. ## Unique ID of the file.
fuid : string &log &optional; fuid : string &log &optional;
## Action this log record represents. ## Action this log record represents.
action : Action &log &optional; action : Action &log &optional;
## Path pulled from the tree this file was transferred to or from. ## Path pulled from the tree this file was transferred to or from.
@ -99,14 +99,14 @@ export {
uid : string &log; uid : string &log;
## ID of the connection the request was sent over. ## ID of the connection the request was sent over.
id : conn_id &log; id : conn_id &log;
## The command sent by the client. ## The command sent by the client.
command : string &log; command : string &log;
## The subcommand sent by the client, if present. ## The subcommand sent by the client, if present.
sub_command : string &log &optional; sub_command : string &log &optional;
## Command argument sent by the client, if any. ## Command argument sent by the client, if any.
argument : string &log &optional; argument : string &log &optional;
## Server reply to the client's command. ## Server reply to the client's command.
status : string &log &optional; status : string &log &optional;
## Round trip time from the request to the response. ## Round trip time from the request to the response.
@ -116,13 +116,13 @@ export {
## Authenticated username, if available. ## Authenticated username, if available.
username : string &log &optional; username : string &log &optional;
## If this is related to a tree, this is the tree ## If this is related to a tree, this is the tree
## that was used for the current command. ## that was used for the current command.
tree : string &log &optional; tree : string &log &optional;
## The type of tree (disk share, printer share, named pipe, etc.). ## The type of tree (disk share, printer share, named pipe, etc.).
tree_service : string &log &optional; tree_service : string &log &optional;
## If the command referenced a file, store it here. ## If the command referenced a file, store it here.
referenced_file : FileInfo &log &optional; referenced_file : FileInfo &log &optional;
## If the command referenced a tree, store it here. ## If the command referenced a tree, store it here.
@ -138,7 +138,7 @@ export {
current_file : FileInfo &optional; current_file : FileInfo &optional;
## A reference to the current tree. ## A reference to the current tree.
current_tree : TreeInfo &optional; current_tree : TreeInfo &optional;
## Indexed on MID to map responses to requests. ## Indexed on MID to map responses to requests.
pending_cmds : table[count] of CmdInfo &optional; pending_cmds : table[count] of CmdInfo &optional;
## File map to retrieve file information based on the file ID. ## File map to retrieve file information based on the file ID.
@ -161,7 +161,7 @@ export {
redef record connection += { redef record connection += {
smb_state : State &optional; smb_state : State &optional;
}; };
## This is an internally used function. ## This is an internally used function.
const set_current_file: function(smb_state: State, file_id: count) &redef; const set_current_file: function(smb_state: State, file_id: count) &redef;
@ -195,7 +195,7 @@ function set_current_file(smb_state: State, file_id: count)
smb_state$fid_map[file_id] = smb_state$current_cmd$referenced_file; smb_state$fid_map[file_id] = smb_state$current_cmd$referenced_file;
smb_state$fid_map[file_id]$fid = file_id; smb_state$fid_map[file_id]$fid = file_id;
} }
smb_state$current_cmd$referenced_file = smb_state$fid_map[file_id]; smb_state$current_cmd$referenced_file = smb_state$fid_map[file_id];
smb_state$current_file = smb_state$current_cmd$referenced_file; smb_state$current_file = smb_state$current_cmd$referenced_file;
} }
@ -203,7 +203,7 @@ function set_current_file(smb_state: State, file_id: count)
function write_file_log(state: State) function write_file_log(state: State)
{ {
local f = state$current_file; local f = state$current_file;
if ( f?$name && if ( f?$name &&
f$action in logged_file_actions ) f$action in logged_file_actions )
{ {
# Everything in this if statement is to avoid overlogging # Everything in this if statement is to avoid overlogging
@ -225,7 +225,7 @@ function write_file_log(state: State)
else else
add state$recent_files[file_ident]; add state$recent_files[file_ident];
} }
Log::write(FILES_LOG, f); Log::write(FILES_LOG, f);
} }
} }
@ -240,7 +240,7 @@ event file_state_remove(f: fa_file) &priority=-5
{ {
if ( f$source != "SMB" ) if ( f$source != "SMB" )
return; return;
for ( id, c in f$conns ) for ( id, c in f$conns )
{ {
if ( c?$smb_state && c$smb_state?$current_file) if ( c?$smb_state && c$smb_state?$current_file)

View file

@ -39,12 +39,12 @@ event smb1_message(c: connection, hdr: SMB1::Header, is_orig: bool) &priority=5
{ {
smb_state$current_cmd$tree = smb_state$current_tree$path; smb_state$current_cmd$tree = smb_state$current_tree$path;
} }
if ( smb_state$current_tree?$service ) if ( smb_state$current_tree?$service )
{ {
smb_state$current_cmd$tree_service = smb_state$current_tree$service; smb_state$current_cmd$tree_service = smb_state$current_tree$service;
} }
if ( mid !in smb_state$pending_cmds ) if ( mid !in smb_state$pending_cmds )
{ {
local tmp_cmd = SMB::CmdInfo($uid=c$uid, $id=c$id, $version="SMB1", $command = SMB1::commands[hdr$command]); local tmp_cmd = SMB::CmdInfo($uid=c$uid, $id=c$id, $version="SMB1", $command = SMB1::commands[hdr$command]);
@ -52,10 +52,10 @@ event smb1_message(c: connection, hdr: SMB1::Header, is_orig: bool) &priority=5
local tmp_file = SMB::FileInfo($uid=c$uid, $id=c$id); local tmp_file = SMB::FileInfo($uid=c$uid, $id=c$id);
tmp_cmd$referenced_file = tmp_file; tmp_cmd$referenced_file = tmp_file;
tmp_cmd$referenced_tree = smb_state$current_tree; tmp_cmd$referenced_tree = smb_state$current_tree;
smb_state$pending_cmds[mid] = tmp_cmd; smb_state$pending_cmds[mid] = tmp_cmd;
} }
smb_state$current_cmd = smb_state$pending_cmds[mid]; smb_state$current_cmd = smb_state$pending_cmds[mid];
if ( !is_orig ) if ( !is_orig )
@ -97,11 +97,11 @@ event smb1_negotiate_response(c: connection, hdr: SMB1::Header, response: SMB1::
delete c$smb_state$current_cmd$smb1_offered_dialects; delete c$smb_state$current_cmd$smb1_offered_dialects;
} }
} }
event smb1_negotiate_response(c: connection, hdr: SMB1::Header, response: SMB1::NegotiateResponse) &priority=-5 event smb1_negotiate_response(c: connection, hdr: SMB1::Header, response: SMB1::NegotiateResponse) &priority=-5
{ {
} }
event smb1_tree_connect_andx_request(c: connection, hdr: SMB1::Header, path: string, service: string) &priority=5 event smb1_tree_connect_andx_request(c: connection, hdr: SMB1::Header, path: string, service: string) &priority=5
{ {
local tmp_tree = SMB::TreeInfo($uid=c$uid, $id=c$id, $path=path, $service=service); local tmp_tree = SMB::TreeInfo($uid=c$uid, $id=c$id, $path=path, $service=service);
@ -117,7 +117,7 @@ event smb1_tree_connect_andx_response(c: connection, hdr: SMB1::Header, service:
c$smb_state$current_cmd$referenced_tree$share_type = "PIPE"; c$smb_state$current_cmd$referenced_tree$share_type = "PIPE";
c$smb_state$current_cmd$tree_service = service; c$smb_state$current_cmd$tree_service = service;
if ( native_file_system != "" ) if ( native_file_system != "" )
c$smb_state$current_cmd$referenced_tree$native_file_system = native_file_system; c$smb_state$current_cmd$referenced_tree$native_file_system = native_file_system;
@ -150,13 +150,13 @@ event smb1_nt_create_andx_response(c: connection, hdr: SMB1::Header, file_id: co
# I'm seeing negative data from IPC tree transfers # I'm seeing negative data from IPC tree transfers
if ( time_to_double(times$modified) > 0.0 ) if ( time_to_double(times$modified) > 0.0 )
c$smb_state$current_cmd$referenced_file$times = times; c$smb_state$current_cmd$referenced_file$times = times;
# We can identify the file by its file id now so let's stick it # We can identify the file by its file id now so let's stick it
# in the file map. # in the file map.
c$smb_state$fid_map[file_id] = c$smb_state$current_cmd$referenced_file; c$smb_state$fid_map[file_id] = c$smb_state$current_cmd$referenced_file;
c$smb_state$current_file = c$smb_state$fid_map[file_id]; c$smb_state$current_file = c$smb_state$fid_map[file_id];
SMB::write_file_log(c$smb_state); SMB::write_file_log(c$smb_state);
} }
@ -167,7 +167,7 @@ event smb1_read_andx_request(c: connection, hdr: SMB1::Header, file_id: count, o
if ( c$smb_state$current_file?$name ) if ( c$smb_state$current_file?$name )
c$smb_state$current_cmd$argument = c$smb_state$current_file$name; c$smb_state$current_cmd$argument = c$smb_state$current_file$name;
} }
event smb1_read_andx_request(c: connection, hdr: SMB1::Header, file_id: count, offset: count, length: count) &priority=-5 event smb1_read_andx_request(c: connection, hdr: SMB1::Header, file_id: count, offset: count, length: count) &priority=-5
{ {
if ( c$smb_state$current_tree?$path && !c$smb_state$current_file?$path ) if ( c$smb_state$current_tree?$path && !c$smb_state$current_file?$path )
@ -180,12 +180,12 @@ event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count,
{ {
SMB::set_current_file(c$smb_state, file_id); SMB::set_current_file(c$smb_state, file_id);
c$smb_state$current_file$action = SMB::FILE_WRITE; c$smb_state$current_file$action = SMB::FILE_WRITE;
if ( !c$smb_state$current_cmd?$argument && if ( !c$smb_state$current_cmd?$argument &&
# TODO: figure out why name isn't getting set sometimes. # TODO: figure out why name isn't getting set sometimes.
c$smb_state$current_file?$name ) c$smb_state$current_file?$name )
c$smb_state$current_cmd$argument = c$smb_state$current_file$name; c$smb_state$current_cmd$argument = c$smb_state$current_file$name;
} }
event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count, offset: count, data_len: count) &priority=-5 event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count, offset: count, data_len: count) &priority=-5
{ {
if ( c$smb_state$current_tree?$path && !c$smb_state$current_file?$path ) if ( c$smb_state$current_tree?$path && !c$smb_state$current_file?$path )
@ -217,7 +217,7 @@ event smb1_close_request(c: connection, hdr: SMB1::Header, file_id: count) &prio
if ( fl?$name ) if ( fl?$name )
c$smb_state$current_cmd$argument = fl$name; c$smb_state$current_cmd$argument = fl$name;
delete c$smb_state$fid_map[file_id]; delete c$smb_state$fid_map[file_id];
SMB::write_file_log(c$smb_state); SMB::write_file_log(c$smb_state);
@ -254,7 +254,7 @@ event smb1_session_setup_andx_response(c: connection, hdr: SMB1::Header, respons
{ {
# No behavior yet. # No behavior yet.
} }
event smb1_transaction_request(c: connection, hdr: SMB1::Header, name: string, sub_cmd: count, parameters: string, data: string) event smb1_transaction_request(c: connection, hdr: SMB1::Header, name: string, sub_cmd: count, parameters: string, data: string)
{ {
c$smb_state$current_cmd$sub_command = SMB1::trans_sub_commands[sub_cmd]; c$smb_state$current_cmd$sub_command = SMB1::trans_sub_commands[sub_cmd];
@ -267,7 +267,7 @@ event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count,
# TODO: figure out why the uuid isn't getting set sometimes. # TODO: figure out why the uuid isn't getting set sometimes.
return; return;
} }
c$smb_state$pipe_map[file_id] = c$smb_state$current_file$uuid; c$smb_state$pipe_map[file_id] = c$smb_state$current_file$uuid;
} }
@ -278,11 +278,11 @@ event smb_pipe_bind_ack_response(c: connection, hdr: SMB1::Header)
# TODO: figure out why the uuid isn't getting set sometimes. # TODO: figure out why the uuid isn't getting set sometimes.
return; return;
} }
c$smb_state$current_cmd$sub_command = "RPC_BIND_ACK"; c$smb_state$current_cmd$sub_command = "RPC_BIND_ACK";
c$smb_state$current_cmd$argument = SMB::rpc_uuids[c$smb_state$current_file$uuid]; c$smb_state$current_cmd$argument = SMB::rpc_uuids[c$smb_state$current_file$uuid];
} }
event smb_pipe_bind_request(c: connection, hdr: SMB1::Header, uuid: string, version: string) event smb_pipe_bind_request(c: connection, hdr: SMB1::Header, uuid: string, version: string)
{ {
if ( ! c$smb_state?$current_file || ! c$smb_state$current_file?$uuid ) if ( ! c$smb_state?$current_file || ! c$smb_state$current_file?$uuid )

View file

@ -19,7 +19,7 @@ event smb2_message(c: connection, hdr: SMB2::Header, is_orig: bool) &priority=5
state$pipe_map = table(); state$pipe_map = table();
c$smb_state = state; c$smb_state = state;
} }
local smb_state = c$smb_state; local smb_state = c$smb_state;
local tid = hdr$tree_id; local tid = hdr$tree_id;
local mid = hdr$message_id; local mid = hdr$message_id;
@ -159,10 +159,10 @@ event smb2_create_response(c: connection, hdr: SMB2::Header, response: SMB2::Cre
if ( time_to_double(response$times$modified) > 0.0 ) if ( time_to_double(response$times$modified) > 0.0 )
c$smb_state$current_file$times = response$times; c$smb_state$current_file$times = response$times;
# We can identify the file by its file id now so let's stick it # We can identify the file by its file id now so let's stick it
# in the file map. # in the file map.
c$smb_state$fid_map[response$file_id$persistent+response$file_id$volatile] = c$smb_state$current_file; c$smb_state$fid_map[response$file_id$persistent+response$file_id$volatile] = c$smb_state$current_file;
c$smb_state$current_file = c$smb_state$fid_map[response$file_id$persistent+response$file_id$volatile]; c$smb_state$current_file = c$smb_state$fid_map[response$file_id$persistent+response$file_id$volatile];
} }
@ -193,7 +193,7 @@ event smb2_read_request(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, o
} }
event smb2_read_request(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, offset: count, length: count) &priority=-5 event smb2_read_request(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, offset: count, length: count) &priority=-5
{ {
SMB::write_file_log(c$smb_state); SMB::write_file_log(c$smb_state);
} }
@ -249,7 +249,7 @@ event smb2_file_rename(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, ds
if ( c$smb_state$current_file?$name ) if ( c$smb_state$current_file?$name )
c$smb_state$current_file$prev_name = c$smb_state$current_file$name; c$smb_state$current_file$prev_name = c$smb_state$current_file$name;
c$smb_state$current_file$name = dst_filename; c$smb_state$current_file$name = dst_filename;
switch ( c$smb_state$current_tree$share_type ) switch ( c$smb_state$current_tree$share_type )

View file

@ -355,7 +355,7 @@ event ssh_server_host_key(c: connection, hash: string) &priority=5
c$ssh$host_key = hash; c$ssh$host_key = hash;
} }
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=20 event analyzer_confirmation(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=20
{ {
if ( atype == Analyzer::ANALYZER_SSH ) if ( atype == Analyzer::ANALYZER_SSH )
{ {

View file

@ -474,7 +474,7 @@ hook finalize_ssl(c: connection)
finish(c, F); finish(c, F);
} }
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5 event analyzer_confirmation(c: connection, atype: AllAnalyzers::Tag, aid: count) &priority=5
{ {
if ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS ) if ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS )
{ {
@ -494,7 +494,7 @@ event ssl_plaintext_data(c: connection, is_orig: bool, record_version: count, co
Weird::weird(wi); Weird::weird(wi);
} }
event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, event analyzer_violation(c: connection, atype: AllAnalyzers::Tag, aid: count,
reason: string) &priority=5 reason: string) &priority=5
{ {
if ( c?$ssl && ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS ) ) if ( c?$ssl && ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS ) )

View file

@ -31,7 +31,7 @@ export {
[23] = "LOCAL7", [23] = "LOCAL7",
[999] = "UNSPECIFIED", [999] = "UNSPECIFIED",
} &default=function(c: count): string { return fmt("?-%d", c); }; } &default=function(c: count): string { return fmt("?-%d", c); };
## Mapping between the constants and string values for syslog severities. ## Mapping between the constants and string values for syslog severities.
const severity_codes: table[count] of string = { const severity_codes: table[count] of string = {
[0] = "EMERG", [0] = "EMERG",

Some files were not shown because too many files have changed in this diff Show more