mirror of
https://github.com/zeek/zeek.git
synced 2025-10-05 16:18:19 +00:00
Merge remote-tracking branch 'origin/master' into topic/johanna/spicy-tls
* origin/master: (139 commits) Given the -C flag, set script-layer ignore_checksums to true. Add btest for "-C" flag vs the script-layer ignore_checksums global. Update doc submodule [nomail] [skip ci] Remove references to bro_broker in broker/Manager.h cmake: Fixup BRO_PLUGIN_INSTALL_PATH references testing/external: Bump hashes for community_id addition NEWS: Add entry for Community ID policy: Import zeek-community-id scripts into protocols/conn frameworks/notice Add irc_dcc_send_ack event and fix missing fields Fix install directory for plugins Update doc submodule [nomail] [skip ci] Add community_id_v1() based on corelight/zeek-community-id Update NEWS to cover cluster framework changes. Add cluster_started restart tests. Add basic cluster_started tests. Add cluster_started and node_fully_connected events. Add hook into cluster connection setup. Add broadcast_topics set. Generalize Cluster::worker_count. Edit pass over the current 6.0 NEWS entries. [nomail] [skip ci] ...
This commit is contained in:
commit
63a4cc824a
462 changed files with 10072 additions and 4434 deletions
16
.cirrus.yml
16
.cirrus.yml
|
@ -8,7 +8,7 @@
|
||||||
cpus: &CPUS 4
|
cpus: &CPUS 4
|
||||||
btest_jobs: &BTEST_JOBS 4
|
btest_jobs: &BTEST_JOBS 4
|
||||||
btest_retries: &BTEST_RETRIES 2
|
btest_retries: &BTEST_RETRIES 2
|
||||||
memory: &MEMORY 12GB
|
memory: &MEMORY 16GB
|
||||||
|
|
||||||
config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache
|
config: &CONFIG --build-type=release --disable-broker-tests --prefix=$CIRRUS_WORKING_DIR/install --ccache
|
||||||
static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache
|
static_config: &STATIC_CONFIG --build-type=release --disable-broker-tests --enable-static-broker --enable-static-binpac --prefix=$CIRRUS_WORKING_DIR/install --ccache
|
||||||
|
@ -61,7 +61,7 @@ builds_only_if_template: &BUILDS_ONLY_IF_TEMPLATE
|
||||||
( $CIRRUS_CRON == '' ) &&
|
( $CIRRUS_CRON == '' ) &&
|
||||||
( $CIRRUS_REPO_NAME != 'zeek-security' || $CIRRUS_OS != "darwin" ) &&
|
( $CIRRUS_REPO_NAME != 'zeek-security' || $CIRRUS_OS != "darwin" ) &&
|
||||||
( ( $CIRRUS_PR != '' && $CIRRUS_BRANCH !=~ 'dependabot/.*' ) ||
|
( ( $CIRRUS_PR != '' && $CIRRUS_BRANCH !=~ 'dependabot/.*' ) ||
|
||||||
( $CIRRUS_REPO_NAME == 'zeek' &&
|
( ( $CIRRUS_REPO_NAME == 'zeek' || $CIRRUS_REPO_NAME == 'zeek-security' ) &&
|
||||||
(
|
(
|
||||||
$CIRRUS_BRANCH == 'master' ||
|
$CIRRUS_BRANCH == 'master' ||
|
||||||
$CIRRUS_BRANCH =~ 'release/.*'
|
$CIRRUS_BRANCH =~ 'release/.*'
|
||||||
|
@ -201,6 +201,14 @@ centos7_task:
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *SKIP_TASK_ON_PR
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
|
debian12_task:
|
||||||
|
container:
|
||||||
|
# Debian 12 (bookworm) EOL: (not yet released)
|
||||||
|
dockerfile: ci/debian-12/Dockerfile
|
||||||
|
<< : *RESOURCES_TEMPLATE
|
||||||
|
<< : *CI_TEMPLATE
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
debian11_task:
|
debian11_task:
|
||||||
container:
|
container:
|
||||||
# Debian 11 EOL: June 2026
|
# Debian 11 EOL: June 2026
|
||||||
|
@ -254,7 +262,6 @@ ubuntu2210_task:
|
||||||
dockerfile: ci/ubuntu-22.10/Dockerfile
|
dockerfile: ci/ubuntu-22.10/Dockerfile
|
||||||
<< : *RESOURCES_TEMPLATE
|
<< : *RESOURCES_TEMPLATE
|
||||||
<< : *CI_TEMPLATE
|
<< : *CI_TEMPLATE
|
||||||
<< : *SKIP_TASK_ON_PR
|
|
||||||
|
|
||||||
ubuntu22_task:
|
ubuntu22_task:
|
||||||
container:
|
container:
|
||||||
|
@ -491,11 +498,13 @@ arm64_container_image_docker_builder:
|
||||||
env:
|
env:
|
||||||
CIRRUS_ARCH: arm64
|
CIRRUS_ARCH: arm64
|
||||||
<< : *DOCKER_BUILD_TEMPLATE
|
<< : *DOCKER_BUILD_TEMPLATE
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
amd64_container_image_docker_builder:
|
amd64_container_image_docker_builder:
|
||||||
env:
|
env:
|
||||||
CIRRUS_ARCH: amd64
|
CIRRUS_ARCH: amd64
|
||||||
<< : *DOCKER_BUILD_TEMPLATE
|
<< : *DOCKER_BUILD_TEMPLATE
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
container_image_manifest_docker_builder:
|
container_image_manifest_docker_builder:
|
||||||
cpu: 1
|
cpu: 1
|
||||||
|
@ -632,6 +641,7 @@ cluster_testing_docker_builder:
|
||||||
path: "testing/external/zeek-testing-cluster/.tmp/**"
|
path: "testing/external/zeek-testing-cluster/.tmp/**"
|
||||||
depends_on:
|
depends_on:
|
||||||
- amd64_container_image
|
- amd64_container_image
|
||||||
|
<< : *SKIP_TASK_ON_PR
|
||||||
|
|
||||||
|
|
||||||
# Test zeekctl upon master and release pushes and also when
|
# Test zeekctl upon master and release pushes and also when
|
||||||
|
|
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -73,3 +73,6 @@
|
||||||
[submodule "auxil/libunistd"]
|
[submodule "auxil/libunistd"]
|
||||||
path = auxil/libunistd
|
path = auxil/libunistd
|
||||||
url = https://github.com/zeek/libunistd
|
url = https://github.com/zeek/libunistd
|
||||||
|
[submodule "auxil/zeekjs"]
|
||||||
|
path = auxil/zeekjs
|
||||||
|
url = https://github.com/corelight/zeekjs.git
|
||||||
|
|
732
CHANGES
732
CHANGES
|
@ -1,3 +1,735 @@
|
||||||
|
6.0.0-dev.421 | 2023-04-25 12:39:01 +0200
|
||||||
|
|
||||||
|
* Given the -C flag, set script-layer ignore_checksums to true. (Christian Kreibich, Corelight)
|
||||||
|
|
||||||
|
* Remove references to bro_broker in broker/Manager.h (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.416 | 2023-04-24 18:22:27 +0200
|
||||||
|
|
||||||
|
* Add irc_dcc_send_ack event and fix missing fields (Fupeng Zhao)
|
||||||
|
|
||||||
|
6.0.0-dev.414 | 2023-04-24 14:36:32 +0200
|
||||||
|
|
||||||
|
* cmake: Fixup BRO_PLUGIN_INSTALL_PATH references (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Commit 19cbb328a9b4598b71e0aa092bf8a03732da8d63 removed setting of
|
||||||
|
BRO_PLUGIN_INSTALL_PATH in favor of ZEEK_PLUGIN_DIR. Replace left-over
|
||||||
|
references the former.
|
||||||
|
|
||||||
|
6.0.0-dev.412 | 2023-04-24 10:33:29 +0200
|
||||||
|
|
||||||
|
* Fix install directory for plugins (Dominik Charousset, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.410 | 2023-04-24 09:46:59 +0200
|
||||||
|
|
||||||
|
* policy: Import zeek-community-id scripts into protocols/conn frameworks/notice (Christian Kreibich, Corelight)
|
||||||
|
|
||||||
|
* Add community_id_v1() based on corelight/zeek-community-id (Christian Kreibich, Corelight)
|
||||||
|
|
||||||
|
"Community ID" has become an established flow hash for connection correlation
|
||||||
|
across different monitoring and storage systems. Other NSMs have had native
|
||||||
|
and built-in support for Community ID since late 2018. And even though the
|
||||||
|
roots of "Community ID" are very close to Zeek, Zeek itself has never provided
|
||||||
|
out-of-the-box support and instead required users to install an external plugin.
|
||||||
|
|
||||||
|
While we try to make that installation as easy as possible, an external plugin
|
||||||
|
always sets the bar higher for an initial setup and can be intimidating.
|
||||||
|
It also requires a rebuild operation of the plugin during upgrades. Nothing
|
||||||
|
overly complicated, but somewhat unnecessary for such popular functionality.
|
||||||
|
|
||||||
|
This isn't a 1:1 import. The options are parameters and the "verbose"
|
||||||
|
functionality has been removed. Further, instead of a `connection`
|
||||||
|
record, the new bif works with `conn_id`, allowing computation of the
|
||||||
|
hash with little effort on the command line:
|
||||||
|
|
||||||
|
$ zeek -e 'print community_id_v1([$orig_h=1.2.3.4, $orig_p=1024/tcp, $resp_h=5.6.7.8, $resp_p=80/tcp])'
|
||||||
|
1:RcCrCS5fwYUeIzgDDx64EN3+okU
|
||||||
|
|
||||||
|
Reference: https://github.com/corelight/zeek-community-id/
|
||||||
|
|
||||||
|
6.0.0-dev.405 | 2023-04-24 09:23:01 +0200
|
||||||
|
|
||||||
|
* file_analysis/Manager: Remove RunState.h include (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The file_analysis/Manager.h header only needs run_state::terminating, so
|
||||||
|
declare just that similar to what Val.h does. This breaks ZBody compilation
|
||||||
|
due to OP_READING_LIVE_TRAFFIC_V and OP_READING_TRACES_V accessing
|
||||||
|
run-state::reading_live and run_state::reading_traces. Add the include to
|
||||||
|
ZBody.cc as a fix.
|
||||||
|
|
||||||
|
This may break external plugins, too, but then they didn't have the
|
||||||
|
right includes setup to begin with.
|
||||||
|
|
||||||
|
6.0.0-dev.402 | 2023-04-21 14:04:52 -0700
|
||||||
|
|
||||||
|
* Update NEWS to cover cluster framework changes. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
* Add cluster_started restart tests. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
* Add basic cluster_started tests. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
* Add cluster_started and node_fully_connected events. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
* Add hook into cluster connection setup. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
* Add broadcast_topics set. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
* Generalize Cluster::worker_count. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.394 | 2023-04-20 16:03:45 -0700
|
||||||
|
|
||||||
|
* Edit pass over the current 6.0 NEWS entries. (Christian Kreibich, Corelight)
|
||||||
|
|
||||||
|
* Update doc submodule [nomail] [skip ci] (zeek-bot)
|
||||||
|
|
||||||
|
6.0.0-dev.391 | 2023-04-19 19:48:50 +0200
|
||||||
|
|
||||||
|
* zeek.bif: Remove cat_sep() fully var_arg changes (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
These were introduced to better catch type violations at runtime. With
|
||||||
|
bifcl doing these checks, revert to a better documented version.
|
||||||
|
|
||||||
|
* GH-2935: broker/messaging: Runtime type checks for pool (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
publish_hrw() and publish_rr() are excluded from type checking due to their
|
||||||
|
variadic nature. Passing a wrong type for the pool argument previously triggered
|
||||||
|
an abort, now the result is runtime errors. This isn't great, but it's
|
||||||
|
better than crashing Zeek.
|
||||||
|
|
||||||
|
Closes #2935
|
||||||
|
|
||||||
|
* bifcl: Bump for runtime-type checks in var_arg bifs (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.387 | 2023-04-19 09:01:32 -0700
|
||||||
|
|
||||||
|
* Add call stacks to script profiler output (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.385 | 2023-04-19 11:58:09 +0200
|
||||||
|
|
||||||
|
* different fix for MSVC compiler issues (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* more general approach for addressing MSVC compiler issues with IntrusivePtr (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* restored RecordType::Create, now marked as deprecated (Vern Paxson, Corelight)
|
||||||
|
tidying of namespaces and private class members
|
||||||
|
simplification of flagging record field initializations that should be skipped
|
||||||
|
address peculiar MSVC compilation complaint for IntrusivePtr's
|
||||||
|
|
||||||
|
* clarifications and tidying for record field initializations (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* optimize record construction by deferring initializations of aggregates (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* compile-scripts-to-C++ speedups by switching to raw record access (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* logging speedup by switching to raw record access (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* remove redundant record coercions (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.376 | 2023-04-19 10:14:02 +0200
|
||||||
|
|
||||||
|
* Improve CMake variables, update cmake submodule (Dominik Charousset, Corelight)
|
||||||
|
|
||||||
|
* Fix builds with plugins that use zeek-config (Dominik Charousset, Corelight)
|
||||||
|
|
||||||
|
When building plugins externally with `zeek-config` (as opposed to using
|
||||||
|
`ZEEK_DIST`), they point into the install prefix. There, we add a new
|
||||||
|
file `ZeekPluginBootstrap.cmake` that helps `ZeekPlugin.cmake` to find
|
||||||
|
everything else it needs from there.
|
||||||
|
|
||||||
|
Our template for plugins generates a `configure` script that sets
|
||||||
|
various variables with values from `zeek-config`. We only need
|
||||||
|
`BROKER_ROOT_DIR` with the new bootstrapping logic. Everything else, we
|
||||||
|
can get from the new bootstrapping file and from the CMake package file
|
||||||
|
for Zeek.
|
||||||
|
|
||||||
|
6.0.0-dev.373 | 2023-04-18 11:11:44 -0700
|
||||||
|
|
||||||
|
* log-caching-cluster: Wait for X509::known_log_certs to populate (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The known_log_certs table is populated asynchronously via broker after a
|
||||||
|
Broker::peer_added. It may take a variable amount of time depending on where
|
||||||
|
we run this test and it has been observed flaky specifically for the
|
||||||
|
arm_debian11 task. Instead of racing, give worker-2 3 seconds for receiving
|
||||||
|
the expected table content before continuing.
|
||||||
|
|
||||||
|
Fixes #2885
|
||||||
|
|
||||||
|
6.0.0-dev.371 | 2023-04-14 13:02:35 +0200
|
||||||
|
|
||||||
|
* ci/debian-11: Install libnode-dev, too (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Debian 11 doesn't have a new enough libnode version, so JavaScript
|
||||||
|
should not be attempted to be built.
|
||||||
|
|
||||||
|
* CMakeLists: Convert string append to list append (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Seems the builtin plugins started with string(APPEND ...) and that
|
||||||
|
was copied over. Make it list(APPEND ...) instead.
|
||||||
|
|
||||||
|
* Add experimental JavaScript support when libnode is available (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
zeek.on('zeek_init', () => {
|
||||||
|
console.log('Hello, Zeek!');
|
||||||
|
});
|
||||||
|
|
||||||
|
For interaction with external systems and HTTP APIs, JavaScript and the
|
||||||
|
Node.js ecosystem beat Zeek script. Make it more easily accessible by
|
||||||
|
including ZeekJS with Zeek directly.
|
||||||
|
|
||||||
|
When a recent enough libnode version is found on the build system, ZeekJS is
|
||||||
|
added as a builtin plugin. This behavior can be disabled via
|
||||||
|
``--disable-javascript``. Linux distributions providing such a package are
|
||||||
|
Ubuntu (22.10) and Debian (testing/bookworm) as libnode-dev.
|
||||||
|
Fedora provides it as nodejs-devel.
|
||||||
|
|
||||||
|
This plugin takes over loading of .js or .cjs files. When no such files
|
||||||
|
are provided to Zeek, Node and the V8 engine are not initialized and
|
||||||
|
should not get into the way.
|
||||||
|
|
||||||
|
This should be considered experimental.
|
||||||
|
|
||||||
|
6.0.0-dev.367 | 2023-04-14 10:32:17 +0200
|
||||||
|
|
||||||
|
* Revert "Type: Add TypeManager->TypeList() and use for ListVal()" (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This reverts commit 24c606b4df92f9871964c5bcb2fe90e43a177b1f.
|
||||||
|
|
||||||
|
This commit introduced a memory leak due to ListVal::Append() modifying
|
||||||
|
the cached TYPE_ANY type list.
|
||||||
|
|
||||||
|
6.0.0-dev.364 | 2023-04-13 15:54:00 -0700
|
||||||
|
|
||||||
|
* Add NEWS entry for the CMake changes (Dominik Charousset, Corelight)
|
||||||
|
|
||||||
|
* Redesign subdir-libs and plugin scaffolding (Dominik Charousset, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.361 | 2023-04-13 09:05:31 -0700
|
||||||
|
|
||||||
|
* GH-2920: Don't warn on uninitialized container options (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.359 | 2023-04-13 08:57:22 -0700
|
||||||
|
|
||||||
|
* Reimplement fixes to handle commas at the end of config parser lines (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Revert "Convert config framework to use std::regex" (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
This reverts commit 65ee2287e9b74f861872d9e16b9c11fb300cfabd.
|
||||||
|
|
||||||
|
* Revert "GH-636: Fix regex to handle commas at the end of config parser lines" (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
This reverts commit 05bb50978905a6c3132b20eb2cfd246715f91356.
|
||||||
|
|
||||||
|
6.0.0-dev.355 | 2023-04-13 09:24:19 +0200
|
||||||
|
|
||||||
|
* logging/Manager: Fix crash for rotation format function not returning (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
While working on a rotation format function, ran into Zeek crashing
|
||||||
|
when not returning a value from it, fix and recover the same way as
|
||||||
|
for scripting errors.
|
||||||
|
|
||||||
|
6.0.0-dev.353 | 2023-04-12 13:23:29 -0700
|
||||||
|
|
||||||
|
* iosource/Manager: Fix poll_interval updating using not-yet valid IsLive() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Testing io_poll_interval_live tweaks with @dopheide-esnet on a Myricom based
|
||||||
|
system to reduce CPU usage showed no visible effect.
|
||||||
|
|
||||||
|
It turns out, the pkt_src->IsLive() call used to update poll_interval is only
|
||||||
|
valid *after* calling ->Register() with the source. The conditional updating
|
||||||
|
of the poll_interval introduced in 4fa3e4b9b4d78e6f20f42b9dbd85216403be07db
|
||||||
|
never worked out how it was intended to.
|
||||||
|
|
||||||
|
The fix ensures that
|
||||||
|
|
||||||
|
* we actually use a poll_interval of 10 in the live case
|
||||||
|
* changing io_poll_interval_live does have an effect
|
||||||
|
|
||||||
|
This is a bit of a major change due to lowering the default poll_interval
|
||||||
|
by a magnitude, but that seemed to have been the intention always. It's also
|
||||||
|
tunable via redef, so worst case it can be adapted via configuration.
|
||||||
|
|
||||||
|
As reference, with the default a Pcap::non_fd_timeout of 20usec *and* a
|
||||||
|
poll_interval of 100, theoretically we'd be trying to ask a non-selectable
|
||||||
|
packet source 500000 per second for a new packet. This is not a likely packet
|
||||||
|
rate that a single worker would currently observe or manage to process.
|
||||||
|
|
||||||
|
6.0.0-dev.350 | 2023-04-11 15:41:31 -0700
|
||||||
|
|
||||||
|
* Only use -Wno-changes-meaning if it exists (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.348 | 2023-04-11 15:30:45 -0700
|
||||||
|
|
||||||
|
* file_analysis/File: Report overflowing chunks as weird and discard/truncate (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This is one level above the Reassembler where we still have information
|
||||||
|
about the file and source. A weird entry may looks as follows:
|
||||||
|
|
||||||
|
1679759398.237353 ... file_offset_overflow FXPLGt4SeMmlMKahJc: offset=fffffffffffffff7 len=10 F zeek HTTP
|
||||||
|
|
||||||
|
* Reassem: Reject blocks overflowing 64bit upper (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The reassembler logic isn't wrap around safe, so just truncate or
|
||||||
|
reject such blocks. For files specifically, a byte offset in the
|
||||||
|
2**64 bytes represents 16EiB which is the maximum size supported
|
||||||
|
by BTRFS or NTFS (and probably nothing we'd ever see in practice).
|
||||||
|
|
||||||
|
* zeek-setup: Load scrips before running unit tests (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
It is currently not possible to call a->Conn()->GetVal() or construct a
|
||||||
|
zeek/file_analysis/File object from within doctests, as these quickly
|
||||||
|
reference the unpopulated zeek::id namespace to construct Val objects
|
||||||
|
of various types, making it hard write basic tests without completely
|
||||||
|
re-organizing.
|
||||||
|
|
||||||
|
Move running of the unit tests after parsing the scripts, so it is possible
|
||||||
|
for some basic exercising of File objects within tests.
|
||||||
|
|
||||||
|
6.0.0-dev.344 | 2023-04-11 15:23:42 -0700
|
||||||
|
|
||||||
|
* RDP: Instantiate SSL analyzer instead of PIA (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* RDP: add some enforcement to required values based on MS-RDPBCGR docs (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.341 | 2023-04-11 15:16:39 -0700
|
||||||
|
|
||||||
|
* Stop skipping expiration of empty DNS mappings (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.339 | 2023-04-11 15:16:15 -0700
|
||||||
|
|
||||||
|
* smtp: Validate mail transaction and disable SMTP analyzer if excessive (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
An invalid mail transaction is determined as
|
||||||
|
|
||||||
|
* RCPT TO command without a preceding MAIL FROM
|
||||||
|
* a DATA command without a preceding RCPT TO
|
||||||
|
|
||||||
|
and logged as a weird.
|
||||||
|
|
||||||
|
The testing pcap for invalid mail transactions was produced with a Python
|
||||||
|
script against a local exim4 configured to accept more errors and unknown
|
||||||
|
commands than 3 by default:
|
||||||
|
|
||||||
|
# exim4.conf.template
|
||||||
|
smtp_max_synprot_errors = 100
|
||||||
|
smtp_max_unknown_commands = 100
|
||||||
|
|
||||||
|
See also: https://www.rfc-editor.org/rfc/rfc5321#section-3.3
|
||||||
|
|
||||||
|
* generic-analyzer-fuzzer: Detect disable_analyzer() from scripts (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Test if the analyzer is removed from the TCPSessionAdapter during
|
||||||
|
event processing. If we don't do this, we continue feeding the analyzer
|
||||||
|
even if scripts decided to disable the analyzer.
|
||||||
|
|
||||||
|
The analyzer instance isn't flagged as disabled itself, so we need
|
||||||
|
to look at the parent's children.
|
||||||
|
|
||||||
|
6.0.0-dev.336 | 2023-04-11 14:50:47 -0700
|
||||||
|
|
||||||
|
* ftp/main: Special case for intermediate reply lines (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The medium.trace in the private external test suite contains one
|
||||||
|
session/server that violates the multi-line reply protocol and
|
||||||
|
happened to work out fairly well regardless due to how we looked
|
||||||
|
up the pending commands unconditionally before.
|
||||||
|
|
||||||
|
Continue to match up reply lines that "look like they contain status codes"
|
||||||
|
even if cont_resp = T. This still improves runtime for the OSS-Fuzz
|
||||||
|
generated test case and keeps the external baselines valid.
|
||||||
|
|
||||||
|
The affected session can be extracted as follows:
|
||||||
|
|
||||||
|
zcat Traces/medium.trace.gz | tcpdump -r - 'port 1491 and port 21'
|
||||||
|
|
||||||
|
We could push this into the analyzer, too, minimally the RFC says:
|
||||||
|
|
||||||
|
> If an intermediary line begins with a 3-digit number, the Server
|
||||||
|
> must pad the front to avoid confusion.
|
||||||
|
|
||||||
|
* ftp/main: Skip get_pending_command() for intermediate reply lines (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Intermediate lines of multiline replies usually do not contain valid status
|
||||||
|
codes (even if servers may opt to include them). Their content may be anything
|
||||||
|
and likely unrelated to the original command. There's little reason for us
|
||||||
|
trying to match them with a corresponding command.
|
||||||
|
|
||||||
|
OSS-Fuzz generated a large command reply with very many intermediate lines
|
||||||
|
which caused long processing times due to matching every line with all
|
||||||
|
currently pending commands.
|
||||||
|
This is a DoS vector against Zeek. The new ipv6-multiline-reply.trace and
|
||||||
|
ipv6-retr-samba.trace files have been extracted from the external ipv6.trace.
|
||||||
|
|
||||||
|
6.0.0-dev.333 | 2023-04-11 12:05:29 -0700
|
||||||
|
|
||||||
|
* Add cstdint to WeirdState.h to fix compilation error on gcc13 (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Ignore -Wchanges-meaning warning in Spicy (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
This fixes a build failure with GCC 13. It's meant as a short-term fix to avoid
|
||||||
|
the failure in Zeek, but probably should be fixed more correctly upstream in
|
||||||
|
the Spicy repo.
|
||||||
|
|
||||||
|
* Update broker submodule to pull in GCC 13 fix (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* CI: Force rebuild of OpenSUSE Tumbleweed VM to pick up GCC 13 (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.327 | 2023-04-08 11:22:57 -0700
|
||||||
|
|
||||||
|
* Fix a long-standing bug in the math around continue_processing (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Add is_processing_suspended BIF (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.323 | 2023-04-06 13:46:28 -0700
|
||||||
|
|
||||||
|
* Add NEWS entries for filtered packet statistics and telemetry (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Report packet statistics via the telemetry framework (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* Add optional packet filtered statistics for packet sources (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
* ci: Add debian-12 / bookworm (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The next version of Debian (bookworm) had a hard-freeze on
|
||||||
|
2023-03-16. Seems reasonable to have it in CI now.
|
||||||
|
|
||||||
|
6.0.0-dev.317 | 2023-04-05 10:39:22 +0200
|
||||||
|
|
||||||
|
* scan.l: Support @pragma, specifically push/pop ignore-deprecations (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This allows to ignore-deprecations as follows:
|
||||||
|
|
||||||
|
@pragma push ignore-deprecations
|
||||||
|
print r$deprecated;
|
||||||
|
@pragma pop
|
||||||
|
|
||||||
|
A bit of infrastructure is put in place for the push and pop, but
|
||||||
|
nothing overly generalized for future pragmas.
|
||||||
|
|
||||||
|
* Switch deprecations to reporter->Deprecation() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Removes a bit of reliance around the magic DoLog() rendering at the
|
||||||
|
cost of needing to open-code some of it. The new obj_desc_short()
|
||||||
|
helper makes that acceptable, though.
|
||||||
|
|
||||||
|
* Reporter: Add dedicated Deprecation() method (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Minimally, provide a way to funnel all deprecations through
|
||||||
|
reporter->Deprecation() instead of various Warning() invocations.
|
||||||
|
|
||||||
|
* Desc: Add obj_desc_short() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Same as obj_desc() but use the short version and do not include the
|
||||||
|
location information by default. New method instead of bool parameters
|
||||||
|
for readability.
|
||||||
|
|
||||||
|
6.0.0-dev.312 | 2023-04-04 09:05:34 -0700
|
||||||
|
|
||||||
|
* Update gen-zam submodule for std::move warnings (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.310 | 2023-04-04 10:00:24 +0200
|
||||||
|
|
||||||
|
* test suite update for minor change in "when" error messages (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* removed skeletal (non-functioning) "when" support from ZAM (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* simplify WhenInfo and Trigger classes given removal of old capture semantics (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* introduced notion of light-weight Frame clones (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* changed function_ingredients struct to FunctionIngredients class with accessors (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.303 | 2023-04-03 16:37:02 +0200
|
||||||
|
|
||||||
|
* addressed static analysis concern about possible null pointer (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* tweaks for "-O C++" of BTest's with conditional code (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.300 | 2023-04-03 13:32:40 +0200
|
||||||
|
|
||||||
|
* minor ZAM BTest baseline updates (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* fixed type mismatch for ssl_certificate_request event (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* skip ZAM optimization of invalid scripts (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
* extended script validation to be call-able on a per-function basis (Vern Paxson, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.295 | 2023-04-03 13:19:20 +0200
|
||||||
|
|
||||||
|
* GH-2296: PktSrc: Introduce Pcap::non_fd_timeout (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Increasing this value 10x has lowered CPU usage on a Myricom based
|
||||||
|
deployment significantly with reportedly no adverse side-effects.
|
||||||
|
|
||||||
|
After reviewing the Zeek 3 IO loop, my hunch is that previously when
|
||||||
|
no packets were available, we'd sleep 20usec every loop iteration after
|
||||||
|
calling ->Process() on the packet source. With current master ->Process()
|
||||||
|
is called 10 times on a packet source before going to sleep just once
|
||||||
|
for 20 usec. Likely this explains the increased CPU usage reported.
|
||||||
|
|
||||||
|
It's probably too risky to increase the current value, so introduce
|
||||||
|
a const &redef value for advanced users to tweak it. A middle ground
|
||||||
|
might be to lower ``io_poll_interval_live`` to 5 and increase the new
|
||||||
|
``Pcap::non_fd_timeout`` setting to 100usec.
|
||||||
|
|
||||||
|
While this doesn't really fix #2296, we now have enough knobs for tweaking.
|
||||||
|
|
||||||
|
Closes #2296.
|
||||||
|
|
||||||
|
6.0.0-dev.293 | 2023-03-31 10:38:45 +0200
|
||||||
|
|
||||||
|
* Add test cases for Geneve. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
* Fix forwarding of tunnelled packets. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
This fixes a bug for AYIYA, Geneve and VXLAN forwarding encapsulated
|
||||||
|
content only if it's longer than their header. A new weird is introduced
|
||||||
|
to indicate empty tunnels.
|
||||||
|
|
||||||
|
6.0.0-dev.290 | 2023-03-31 10:33:53 +0200
|
||||||
|
|
||||||
|
* Type: Add TypeManager->TypeList() and use for ListVal() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
It turns out that for every ListVal we construct, we also allocate
|
||||||
|
and construct a new TypeList instance, even though they are all the
|
||||||
|
same. Pre-create and cache the type instances in a new TypeManager.
|
||||||
|
|
||||||
|
The following script runs ~10% faster for me after this change.
|
||||||
|
|
||||||
|
global tbl: table[string] of string;
|
||||||
|
global i = 0;
|
||||||
|
while ( ++i < 10000000 )
|
||||||
|
tbl["a"] = "a";
|
||||||
|
|
||||||
|
6.0.0-dev.288 | 2023-03-30 13:09:18 -0700
|
||||||
|
|
||||||
|
* Add reporter framework to core.packet-analyzer-override btest (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.286 | 2023-03-30 09:52:26 -0700
|
||||||
|
|
||||||
|
* Consider cap len when forwarding into packet analysis. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
When forwarding into packet analysis from TCP or UDP, the protocol's
|
||||||
|
length fields were trusted. This might be dangerous in case of truncated
|
||||||
|
packets.
|
||||||
|
|
||||||
|
6.0.0-dev.284 | 2023-03-30 09:50:57 -0700
|
||||||
|
|
||||||
|
* Reintroduce info when overriding packet analyzer mappings. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
In #2464 the warning when overriding a packet analyzer mapping was
|
||||||
|
removed. While a warning seems indeed excessive, some info would still
|
||||||
|
be nice to have.
|
||||||
|
|
||||||
|
* Update Cirrus memory limits to avoid timeouts when building (Tim Wojtulewicz)
|
||||||
|
|
||||||
|
6.0.0-dev.280 | 2023-03-30 09:35:48 +0200
|
||||||
|
|
||||||
|
* Analyzer: Add GetChildAnalyzer() and IsPreventedChildAnalyzer() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
GetChildAnalyzer() has the same semantics as HasChildAnalyzer(), but returns
|
||||||
|
the raw pointer to the child analyzer. Main issue is memory management: That
|
||||||
|
pointer is not guaranteed to stay valid. It might be disabled from script
|
||||||
|
land or otherwise removed from the analyzer tree and subsequent
|
||||||
|
deleted in one of the Forward* methods.
|
||||||
|
|
||||||
|
IsPreventedChildAnalyzer() provides minimal introspection for prevented
|
||||||
|
child analyzer tags and allows to remove some duplicated code.
|
||||||
|
|
||||||
|
6.0.0-dev.278 | 2023-03-29 14:34:26 -0700
|
||||||
|
|
||||||
|
* Bump Spicy and spicy-plugin to their latest development snapshots. (Benjamin Bannier, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.276 | 2023-03-28 09:19:59 -0700
|
||||||
|
|
||||||
|
* Fix a bunch of -Wunqualified-std-cast-call warnings from clang 15 (Tim Wojtulewicz)
|
||||||
|
|
||||||
|
6.0.0-dev.274 | 2023-03-27 21:32:53 +0200
|
||||||
|
|
||||||
|
* btest/broker: Add test using Python bindings and zeek -r (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This came up in community.zeek.org as a use-case and there
|
||||||
|
were a few gaps and holes, so add a test showing minimally
|
||||||
|
the current behavior.
|
||||||
|
|
||||||
|
* Broker: Remove network time initialization (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Remove the special case of initializing network time if it hasn't
|
||||||
|
happened yet. The argument about broker.log containing 0.0 timestamps
|
||||||
|
is more a problem of the log, not something that would justify modifying
|
||||||
|
network time globally. For broker.log and possibly cluster.log, it might
|
||||||
|
be more reasonable to use current time, anyway.
|
||||||
|
|
||||||
|
I was a bit wary about tables backed by broker stores being populated
|
||||||
|
with network_time set to 0.0, but there seems to exist logic and assumptions
|
||||||
|
that this is okay: It should be the same as if one populates a table with
|
||||||
|
expirations set within zeek_init().
|
||||||
|
|
||||||
|
In fact, staring a bit more, *not setting* network time might be more correct
|
||||||
|
as workers that don't see packets would never set zeek_start_network_time
|
||||||
|
which is used within the expiration computation.
|
||||||
|
|
||||||
|
6.0.0-dev.271 | 2023-03-24 09:12:04 +0100
|
||||||
|
|
||||||
|
* cirrus: No container images on PRs (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.268 | 2023-03-23 13:40:26 +0100
|
||||||
|
|
||||||
|
* Ensure master/release builds run for zeek-security (Tim Wojtulewicz, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.266 | 2023-03-23 13:35:28 +0100
|
||||||
|
|
||||||
|
* iosource: Remove non-existing ManagerBase friend (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* broker::Manager: use_realtime_false when allow_network_time_forward=F (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Run the broker in non-realtime mode when allow_network_time_forward=F.
|
||||||
|
This may need an extra option for really advanced use-cases, but for
|
||||||
|
now this seems reasonable.
|
||||||
|
|
||||||
|
* A set of tests around network time handling (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* broker::Manager: No more network_time forwarding (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
network_time forwarding will happen in the main-loop before draining the
|
||||||
|
EventMgr so timers/events scheduled based on broker messages should
|
||||||
|
behave similarly. This also keeps network_time unaffected during
|
||||||
|
non pseudo-realtime trace processing.
|
||||||
|
|
||||||
|
* TimerMgr: No network_time updates in Process() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
network_time forwarding will now happen centrally in the main loop.
|
||||||
|
The TimerMgr returns a valid timeout that can be waited for and will
|
||||||
|
trigger network_time advancement, so we don't need to do it.
|
||||||
|
|
||||||
|
* Event: No more network_time updates (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
The whole docs read like this was only used to update the
|
||||||
|
network_time, so there may be a follow-up to ditch EventMgr
|
||||||
|
even being an IOSource (which could be argued it's not IO).
|
||||||
|
|
||||||
|
* RunState: Implement forward_network_time_if_applicable() (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Add a central place where the decision when it's okay to update network time
|
||||||
|
to the current time (wallclock) is. It checks for pseudo_realtime and packet
|
||||||
|
source existence as well as packet source idleness.
|
||||||
|
|
||||||
|
A new const &redef allows to completely disable forwarding of network time.
|
||||||
|
|
||||||
|
* PktSrc: Add HasBeenIdleFor() method (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This method will be used by the main loop to determine if an interface
|
||||||
|
has become idle. Initially this will be used to determine when it is
|
||||||
|
acceptable to update network_time to the current time (wallclock).
|
||||||
|
|
||||||
|
* PktSrc: Move termination pseduo_realtime special case to RunState (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This also removes setting pseduo_realtime to 0.0 in the main loop
|
||||||
|
when the packet source has been closed. I had tried to understand
|
||||||
|
the implications it actually seems, if we shutdown the iosource::Manager
|
||||||
|
anyway, it shouldn't and it's just confusing.
|
||||||
|
|
||||||
|
* Bump zeekctl and doc submodule for updates regarding Site::local_nets (Christian Kreibich, Corelight)
|
||||||
|
|
||||||
|
* Update doc submodule [nomail] [skip ci] (zeek-bot)
|
||||||
|
|
||||||
|
6.0.0-dev.250 | 2023-03-21 19:17:36 +0100
|
||||||
|
|
||||||
|
* Expr: Factor out type tag extraction (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* Var: Add version to deprecated initialization (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This has been added for Zeek 5.0, so mark it for removal in Zeek 6.1.
|
||||||
|
|
||||||
|
* Stmt: Error on deprecated when/local usage (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This has been around since Zeek v4.1, so it was warned about in Zeek 5.0
|
||||||
|
LTS and we could've removed it with 5.1.
|
||||||
|
|
||||||
|
Also removed merge_top_scope() from the zeek::detail namespace, it's
|
||||||
|
unused now.
|
||||||
|
|
||||||
|
Updated the when-aggregates test somehow. I'm not quite sure what had
|
||||||
|
been tested there :-/
|
||||||
|
|
||||||
|
* Expr: Remove vector scalar operations (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
These seem to have been deprecated since 2018, so do it now.
|
||||||
|
Unfortunately the message didn't contain a version when it'll
|
||||||
|
be removed, but it's been long enough.
|
||||||
|
|
||||||
|
* parse.y: Make out-of-scope use errors (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This was marked to be done in Zeek 5.1, so do it now. The message
|
||||||
|
didn't include a version, unfortunately, but minimally there was
|
||||||
|
a comment when it should happen.
|
||||||
|
|
||||||
|
* scan.l: Remove unused deprecated_attr (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.243 | 2023-03-21 16:40:35 +0100
|
||||||
|
|
||||||
|
* Add pcap_file option to supervised nodes. (Jan Grashoefer, Corelight)
|
||||||
|
|
||||||
|
This allows to start Supervised nodes with a pcap_file argument
|
||||||
|
rather than interface.
|
||||||
|
|
||||||
|
6.0.0-dev.241 | 2023-03-21 10:10:50 +0100
|
||||||
|
|
||||||
|
* Add a new plugin test with verbose IO source output (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This is mostly for documentation/verification purposes of how the IO loop
|
||||||
|
currently does draining and when it picks up FD based (non packet) IO
|
||||||
|
source. For example, it shows that currently FD based sources are processed
|
||||||
|
fairly delayed and that we now also process two timeout sources that are ready.
|
||||||
|
|
||||||
|
* iosource: Make poll intervals configurable (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This probably should not be changed by users, but it's useful for
|
||||||
|
testing and experimentation rather than needing to recompile.
|
||||||
|
|
||||||
|
Processing 100 packets without checking an FD based IO source can
|
||||||
|
actually mean that FD based sources are never checked during a read
|
||||||
|
of a very small pcap...
|
||||||
|
|
||||||
|
* iomanager/Poll: Add zero-timeout timeout_src also when there's other events ready (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
This would generally happen the next loop iteration around anyway, but
|
||||||
|
seems nice to ensure a zero timeout source will be processed at the same
|
||||||
|
time as sources with ready FDs.
|
||||||
|
|
||||||
|
* iomanager: Collect all sources with zero timeouts as ready (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
Previously, if two iosources returned 0.0 as their timeout, only
|
||||||
|
one of them would be considered ready. An always ready source
|
||||||
|
therefore may starve other ready ones due to this and minimally
|
||||||
|
this behavior seems surprising.
|
||||||
|
|
||||||
|
Offline pcap sources are always ready and return 0.0 for
|
||||||
|
GetNextTimeout() (unless in pseudo-realtime), so we can
|
||||||
|
also remove the offline source special case.
|
||||||
|
|
||||||
|
One subtle side-effect of this change is that if an IO source
|
||||||
|
returns a 0.0 timeout *and* it's file descriptor is ready in
|
||||||
|
the same loop iteration, it may be processed twice.
|
||||||
|
|
||||||
|
6.0.0-dev.235 | 2023-03-20 10:04:55 -0700
|
||||||
|
|
||||||
|
* ci/centos-stream-8: Add which command (Arne Welzel, Corelight)
|
||||||
|
|
||||||
|
* Update zeekctl test baseline for command.peerstatus test (Tim Wojtulewicz)
|
||||||
|
|
||||||
|
* Update zeekctl test baseline for changes to Site::local_nets (Tim Wojtulewicz)
|
||||||
|
|
||||||
|
6.0.0-dev.231 | 2023-03-18 17:10:33 -0700
|
||||||
|
|
||||||
|
* Update Broker for fixing #2853 (Dominik Charousset, Corelight)
|
||||||
|
|
||||||
|
6.0.0-dev.229 | 2023-03-17 14:39:03 -0700
|
||||||
|
|
||||||
|
* Fix integer signedness comparison warning with GCC 12.2 (Christian Kreibich, Corelight)
|
||||||
|
|
||||||
|
* Update doc submodule (zeek-bot)
|
||||||
|
|
||||||
6.0.0-dev.226 | 2023-03-16 10:18:43 +0100
|
6.0.0-dev.226 | 2023-03-16 10:18:43 +0100
|
||||||
|
|
||||||
* dns: Remove AD and CD flags from log (Arne Welzel, Corelight)
|
* dns: Remove AD and CD flags from log (Arne Welzel, Corelight)
|
||||||
|
|
410
CMakeLists.txt
410
CMakeLists.txt
|
@ -9,8 +9,11 @@ endif()
|
||||||
|
|
||||||
project(Zeek C CXX)
|
project(Zeek C CXX)
|
||||||
|
|
||||||
option(ZEEK_STANDALONE "Is Zeek compiled stand-alone or embedded in a parent project." ON)
|
option(ZEEK_STANDALONE "Build Zeek as stand-alone binary?" ON)
|
||||||
option(ENABLE_ZEEK_UNIT_TESTS "Should the doctest unit tests be built?" ON)
|
option(ENABLE_ZEEK_UNIT_TESTS "Build the C++ (doctest) unit tests?" ON)
|
||||||
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON
|
||||||
|
CACHE INTERNAL "Write JSON compile commands database")
|
||||||
|
set(ZEEK_CXX_STD cxx_std_17 CACHE STRING "The C++ standard to use.")
|
||||||
|
|
||||||
list(APPEND CMAKE_MODULE_PATH ${CMAKE_BINARY_DIR})
|
list(APPEND CMAKE_MODULE_PATH ${CMAKE_BINARY_DIR})
|
||||||
list(APPEND CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR})
|
list(APPEND CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR})
|
||||||
|
@ -114,6 +117,254 @@ include(cmake/CommonCMakeConfig.cmake)
|
||||||
include(cmake/FindClangTidy.cmake)
|
include(cmake/FindClangTidy.cmake)
|
||||||
include(cmake/CheckCompilerArch.cmake)
|
include(cmake/CheckCompilerArch.cmake)
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
## Main targets and utilities.
|
||||||
|
|
||||||
|
# Variable for referring back to Zeek's top-level source dir. Used for plugins
|
||||||
|
# to tell them where to find the Zeek headers.
|
||||||
|
set(ZEEK_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||||
|
|
||||||
|
# Tell dynamic plugins where to find scripts such as
|
||||||
|
# zeek-plugin-create-package.sh. Needed by ZeekPluginConfig.cmake.in.
|
||||||
|
set(ZEEK_PLUGIN_SCRIPTS_PATH "${PROJECT_SOURCE_DIR}/cmake")
|
||||||
|
|
||||||
|
# Our C++17 base target for propagating compiler and linker flags.
|
||||||
|
# Note: for now, we only use it for passing library dependencies around.
|
||||||
|
add_library(zeek_internal INTERFACE)
|
||||||
|
add_library(Zeek::Internal ALIAS zeek_internal)
|
||||||
|
set_target_properties(zeek_internal PROPERTIES EXPORT_NAME Internal)
|
||||||
|
install(TARGETS zeek_internal EXPORT ZeekTargets)
|
||||||
|
target_compile_features(zeek_internal INTERFACE ${ZEEK_CXX_STD})
|
||||||
|
|
||||||
|
# Target for bundling the creation of auto-generated files.
|
||||||
|
add_custom_target(zeek_autogen_files)
|
||||||
|
|
||||||
|
# Define our main targets and place the output files under src (for historic
|
||||||
|
# reasons and backwards compatibility).
|
||||||
|
if (ZEEK_STANDALONE)
|
||||||
|
add_executable(zeek_exe)
|
||||||
|
target_link_libraries(zeek_exe PRIVATE $<BUILD_INTERFACE:zeek_internal>)
|
||||||
|
add_dependencies(zeek_exe zeek_autogen_files)
|
||||||
|
set_target_properties(zeek_exe PROPERTIES RUNTIME_OUTPUT_NAME zeek)
|
||||||
|
if (NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY)
|
||||||
|
set_target_properties(zeek_exe PROPERTIES RUNTIME_OUTPUT_DIRECTORY src)
|
||||||
|
endif()
|
||||||
|
install(TARGETS zeek_exe RUNTIME DESTINATION bin)
|
||||||
|
# Export symbols from zeek executable for use by plugins
|
||||||
|
set_target_properties(zeek_exe PROPERTIES ENABLE_EXPORTS ON)
|
||||||
|
if ( MSVC )
|
||||||
|
set(WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
||||||
|
endif ()
|
||||||
|
# Tell zeek_target_link_libraries to add library dependencies as PRIVATE.
|
||||||
|
set(zeek_exe_access PRIVATE)
|
||||||
|
# Also build the static library when asked for via Conan.
|
||||||
|
if (CONAN_EXPORTED)
|
||||||
|
add_library(zeek_lib STATIC)
|
||||||
|
endif ()
|
||||||
|
else ()
|
||||||
|
add_library(zeek_lib STATIC)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (TARGET zeek_lib)
|
||||||
|
target_link_libraries(zeek_lib PRIVATE $<BUILD_INTERFACE:zeek_internal>)
|
||||||
|
add_dependencies(zeek_lib zeek_autogen_files)
|
||||||
|
set_target_properties(zeek_lib PROPERTIES RUNTIME_OUTPUT_NAME libzeek)
|
||||||
|
if (NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY)
|
||||||
|
set_target_properties(zeek_lie PROPERTIES LIBRARY_OUTPUT_DIRECTORY src)
|
||||||
|
endif()
|
||||||
|
install(TARGETS zeek_lib LIBRARY DESTINATION lib)
|
||||||
|
# Tell zeek_target_link_libraries to add library dependencies as PRIVATE.
|
||||||
|
set(zeek_lib_access PRIVATE)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# When building our fuzzers, we also need one extra top-level target that
|
||||||
|
# bundles all of our object libraries and other dependencies.
|
||||||
|
if ( ZEEK_ENABLE_FUZZERS )
|
||||||
|
add_library(zeek_fuzzer_shared SHARED)
|
||||||
|
target_link_libraries(zeek_fuzzer_shared PUBLIC $<BUILD_INTERFACE:zeek_internal>)
|
||||||
|
# Tell zeek_target_link_libraries to add library dependencies as PUBLIC.
|
||||||
|
set(zeek_fuzzer_shared_access PUBLIC)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Convenience function for adding library dependencies to the main target(s).
|
||||||
|
function(zeek_target_link_libraries lib_target)
|
||||||
|
foreach (name zeek_exe zeek_lib zeek_fuzzer_shared)
|
||||||
|
if (TARGET ${name})
|
||||||
|
target_link_libraries(${name} ${${name}_access} ${lib_target})
|
||||||
|
endif ()
|
||||||
|
endforeach()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(zeek_include_directories)
|
||||||
|
foreach (name zeek_exe zeek_lib zeek_fuzzer_shared)
|
||||||
|
if (TARGET ${name})
|
||||||
|
target_include_directories(${name} ${${name}_access} ${ARGN})
|
||||||
|
endif ()
|
||||||
|
endforeach ()
|
||||||
|
endfunction ()
|
||||||
|
|
||||||
|
# Convenience function for adding a dependency to the main target(s).
|
||||||
|
function(zeek_add_dependencies dep)
|
||||||
|
foreach (name zeek_exe zeek_lib zeek_fuzzer_shared)
|
||||||
|
if (TARGET ${name})
|
||||||
|
add_dependencies(${name} ${dep})
|
||||||
|
endif ()
|
||||||
|
endforeach()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
# Interface library for propagating extra flags and include paths to dynamically
|
||||||
|
# loaded plugins. Also propagates include paths and C++17 mode on the install
|
||||||
|
# interface.
|
||||||
|
add_library(zeek_dynamic_plugin_base INTERFACE)
|
||||||
|
target_link_libraries(zeek_dynamic_plugin_base
|
||||||
|
INTERFACE
|
||||||
|
$<BUILD_INTERFACE:zeek_internal>)
|
||||||
|
target_include_directories(zeek_dynamic_plugin_base
|
||||||
|
INTERFACE
|
||||||
|
$<INSTALL_INTERFACE:include>
|
||||||
|
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
|
||||||
|
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>)
|
||||||
|
target_compile_features(zeek_dynamic_plugin_base INTERFACE ${ZEEK_CXX_STD})
|
||||||
|
add_library(Zeek::DynamicPluginBase ALIAS zeek_dynamic_plugin_base)
|
||||||
|
set_target_properties(
|
||||||
|
zeek_dynamic_plugin_base PROPERTIES
|
||||||
|
EXPORT_NAME DynamicPluginBase)
|
||||||
|
install(TARGETS zeek_dynamic_plugin_base EXPORT ZeekTargets)
|
||||||
|
|
||||||
|
# On macOS, we need to tell the linker that the modules are allowed to have
|
||||||
|
# undefined symbols.
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||||
|
target_link_options(
|
||||||
|
zeek_dynamic_plugin_base
|
||||||
|
INTERFACE
|
||||||
|
-undefined
|
||||||
|
dynamic_lookup
|
||||||
|
-Wl,-bind_at_load)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
function(add_zeek_dynamic_plugin_build_interface_include_directories)
|
||||||
|
foreach ( path ${ARGV} )
|
||||||
|
target_include_directories(
|
||||||
|
zeek_dynamic_plugin_base
|
||||||
|
INTERFACE
|
||||||
|
$<BUILD_INTERFACE:${path}>
|
||||||
|
)
|
||||||
|
endforeach ()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
add_zeek_dynamic_plugin_build_interface_include_directories(
|
||||||
|
${PROJECT_SOURCE_DIR}/src/include
|
||||||
|
${PROJECT_SOURCE_DIR}/auxil/binpac/lib
|
||||||
|
${PROJECT_SOURCE_DIR}/auxil/broker/include
|
||||||
|
${PROJECT_SOURCE_DIR}/auxil/paraglob/include
|
||||||
|
${PROJECT_SOURCE_DIR}/auxil/rapidjson/include
|
||||||
|
${CMAKE_BINARY_DIR}/src
|
||||||
|
${CMAKE_BINARY_DIR}/src/include
|
||||||
|
${CMAKE_BINARY_DIR}/auxil/binpac/lib
|
||||||
|
${CMAKE_BINARY_DIR}/auxil/broker/include
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convenience function for adding an OBJECT library that feeds directly into the
|
||||||
|
# main target(s).
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# zeek_add_subdir_library(
|
||||||
|
# <name>
|
||||||
|
# SOURCES ...
|
||||||
|
# [INCLUDE_DIRS ...]
|
||||||
|
# [DEPENDENCIES ...]
|
||||||
|
# [INTERNAL_DEPENDENCIES ...]
|
||||||
|
# [BIFS ...]
|
||||||
|
# )
|
||||||
|
function(zeek_add_subdir_library name)
|
||||||
|
# Parse arguments.
|
||||||
|
set(fn_varargs INCLUDE_DIRS DEPENDENCIES INTERNAL_DEPENDENCIES SOURCES BIFS)
|
||||||
|
cmake_parse_arguments(FN_ARGS "" "" "${fn_varargs}" ${ARGN})
|
||||||
|
if (NOT FN_ARGS_SOURCES)
|
||||||
|
message(FATAL_ERROR "zeek_add_subdir_library called without any SOURCES")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Create target and add the sources.
|
||||||
|
set(target_name "zeek_${name}_obj")
|
||||||
|
add_library(${target_name} OBJECT ${FN_ARGS_SOURCES})
|
||||||
|
add_dependencies(${target_name} zeek_autogen_files)
|
||||||
|
target_link_libraries(${target_name} PRIVATE $<BUILD_INTERFACE:zeek_internal>)
|
||||||
|
add_clang_tidy_files(${FN_ARGS_SOURCES})
|
||||||
|
|
||||||
|
# Take care of compiling BIFs.
|
||||||
|
if (FN_ARGS_BIFS)
|
||||||
|
foreach ( bif ${FN_ARGS_BIFS} )
|
||||||
|
# Generate the target and add the extra dependency.
|
||||||
|
bif_target(${bif})
|
||||||
|
endforeach ()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Optionally add include directories and extra dependencies.
|
||||||
|
if ( FN_ARGS_INCLUDE_DIRS )
|
||||||
|
target_include_directories(
|
||||||
|
${target_name}
|
||||||
|
BEFORE
|
||||||
|
PRIVATE
|
||||||
|
${FN_ARGS_INCLUDE_DIRS})
|
||||||
|
endif ()
|
||||||
|
if ( FN_ARGS_DEPENDENCIES )
|
||||||
|
target_link_libraries(${target_name} PRIVATE ${FN_ARGS_DEPENDENCIES})
|
||||||
|
endif ()
|
||||||
|
if ( FN_ARGS_INTERNAL_DEPENDENCIES )
|
||||||
|
add_dependencies(${target_name} ${FN_ARGS_INTERNAL_DEPENDENCIES})
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Feed into the main Zeek target(s).
|
||||||
|
zeek_target_link_libraries(${target_name})
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
## Utility function for forcing CMake to re-run if files change on disk.
|
||||||
|
|
||||||
|
function(zeek_watch_files)
|
||||||
|
set_property(
|
||||||
|
DIRECTORY
|
||||||
|
APPEND
|
||||||
|
PROPERTY CMAKE_CONFIGURE_DEPENDS ${ARGN}
|
||||||
|
)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
## Create empty __load__.zeek stubs (override pre-existing ones).
|
||||||
|
|
||||||
|
function(zeek_create_load_script_stubs)
|
||||||
|
set(file_comment "# Warning, this is an autogenerated file!\n")
|
||||||
|
foreach ( fpath ${ARGN} )
|
||||||
|
file(WRITE "${CMAKE_BINARY_DIR}/${fpath}" "${file_comment}")
|
||||||
|
zeek_watch_files("${CMAKE_BINARY_DIR}/${fpath}")
|
||||||
|
endforeach ()
|
||||||
|
endfunction ()
|
||||||
|
|
||||||
|
# Note: these files are filled from BifCl.cmake via `file(APPEND ...)`
|
||||||
|
zeek_create_load_script_stubs(
|
||||||
|
scripts/builtin-plugins/__load__.zeek
|
||||||
|
scripts/base/bif/plugins/__load__.zeek
|
||||||
|
scripts/base/bif/__load__.zeek
|
||||||
|
)
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
## Create empty __all__*.cc stubs (override pre-existing ones).
|
||||||
|
|
||||||
|
function(zeek_create_bif_autogen_stubs)
|
||||||
|
set(file_comment "// Warning, this is an autogenerated file!\n")
|
||||||
|
foreach ( fpath ${ARGN} )
|
||||||
|
file(WRITE "${CMAKE_BINARY_DIR}/${fpath}" "${file_comment}")
|
||||||
|
zeek_watch_files("${CMAKE_BINARY_DIR}/${fpath}")
|
||||||
|
endforeach ()
|
||||||
|
endfunction ()
|
||||||
|
|
||||||
|
# Note: these files are filled from BifCl.cmake via `file(APPEND ...)`.
|
||||||
|
zeek_create_bif_autogen_stubs(
|
||||||
|
src/__all__.bif.cc
|
||||||
|
src/__all__.bif.init.cc
|
||||||
|
src/__all__.bif.register.cc
|
||||||
|
)
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
## Project/Build Configuration
|
## Project/Build Configuration
|
||||||
|
|
||||||
|
@ -161,22 +412,17 @@ get_filename_component(ZEEK_SCRIPT_INSTALL_PATH ${ZEEK_SCRIPT_INSTALL_PATH}
|
||||||
# plugins, etc.
|
# plugins, etc.
|
||||||
set(ZEEK_LIBDIR_PATH ${CMAKE_INSTALL_FULL_LIBDIR}/zeek)
|
set(ZEEK_LIBDIR_PATH ${CMAKE_INSTALL_FULL_LIBDIR}/zeek)
|
||||||
|
|
||||||
if (ZEEK_PLUGIN_DIR)
|
if (NOT ZEEK_PLUGIN_DIR)
|
||||||
set(BRO_PLUGIN_INSTALL_PATH ${ZEEK_PLUGIN_DIR}
|
set(ZEEK_PLUGIN_DIR ${ZEEK_LIBDIR_PATH}/plugins
|
||||||
CACHE STRING "Installation path for plugins" FORCE)
|
|
||||||
else ()
|
|
||||||
set(BRO_PLUGIN_INSTALL_PATH ${ZEEK_LIBDIR_PATH}/plugins
|
|
||||||
CACHE STRING "Installation path for plugins" FORCE)
|
CACHE STRING "Installation path for plugins" FORCE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(bro_plugin_install_path "${BRO_PLUGIN_INSTALL_PATH}")
|
|
||||||
set(cmake_binary_dir "${CMAKE_BINARY_DIR}")
|
set(cmake_binary_dir "${CMAKE_BINARY_DIR}")
|
||||||
set(cmake_current_binary_dir "${CMAKE_CURRENT_BINARY_DIR}")
|
set(cmake_current_binary_dir "${CMAKE_CURRENT_BINARY_DIR}")
|
||||||
set(cmake_install_prefix "${CMAKE_INSTALL_PREFIX}")
|
set(cmake_install_prefix "${CMAKE_INSTALL_PREFIX}")
|
||||||
set(cmake_source_dir "${CMAKE_SOURCE_DIR}")
|
set(cmake_source_dir "${CMAKE_SOURCE_DIR}")
|
||||||
set(zeek_script_install_path "${ZEEK_SCRIPT_INSTALL_PATH}")
|
set(zeek_script_install_path "${ZEEK_SCRIPT_INSTALL_PATH}")
|
||||||
if ( MSVC )
|
if ( MSVC )
|
||||||
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" bro_plugin_install_path "${bro_plugin_install_path}")
|
|
||||||
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_binary_dir "${cmake_binary_dir}")
|
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_binary_dir "${cmake_binary_dir}")
|
||||||
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_current_binary_dir "${cmake_current_binary_dir}")
|
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_current_binary_dir "${cmake_current_binary_dir}")
|
||||||
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_install_prefix "${cmake_install_prefix}")
|
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" cmake_install_prefix "${cmake_install_prefix}")
|
||||||
|
@ -184,6 +430,9 @@ if ( MSVC )
|
||||||
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" zeek_script_install_path "${zeek_script_install_path}")
|
string(REGEX REPLACE "^([A-Za-z]):/(.*)" "/\\1/\\2" zeek_script_install_path "${zeek_script_install_path}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
# Set the path where we install the ZeekConfig.cmake file and related files.
|
||||||
|
set(ZEEK_CMAKE_CONFIG_DIR "${CMAKE_INSTALL_PREFIX}/share/zeek/cmake")
|
||||||
|
|
||||||
if ( NOT ZEEK_ETC_INSTALL_DIR )
|
if ( NOT ZEEK_ETC_INSTALL_DIR )
|
||||||
set(ZEEK_ETC_INSTALL_DIR ${ZEEK_ROOT_DIR}/etc)
|
set(ZEEK_ETC_INSTALL_DIR ${ZEEK_ROOT_DIR}/etc)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -247,8 +496,10 @@ set(VERSION_C_IDENT "${VERSION}_plugin_${API_VERSION}")
|
||||||
string(REGEX REPLACE "-[0-9]*$" "_git" VERSION_C_IDENT "${VERSION_C_IDENT}")
|
string(REGEX REPLACE "-[0-9]*$" "_git" VERSION_C_IDENT "${VERSION_C_IDENT}")
|
||||||
string(REGEX REPLACE "[^a-zA-Z0-9_\$]" "_" VERSION_C_IDENT "${VERSION_C_IDENT}")
|
string(REGEX REPLACE "[^a-zA-Z0-9_\$]" "_" VERSION_C_IDENT "${VERSION_C_IDENT}")
|
||||||
|
|
||||||
if(${ENABLE_DEBUG})
|
if(ENABLE_DEBUG)
|
||||||
set(VERSION_C_IDENT "${VERSION_C_IDENT}_debug")
|
set(VERSION_C_IDENT "${VERSION_C_IDENT}_debug")
|
||||||
|
target_compile_definitions(zeek_internal INTERFACE DEBUG)
|
||||||
|
target_compile_definitions(zeek_dynamic_plugin_base INTERFACE DEBUG)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if ( NOT BINARY_PACKAGING_MODE )
|
if ( NOT BINARY_PACKAGING_MODE )
|
||||||
|
@ -425,6 +676,22 @@ if ( NOT MSVC )
|
||||||
endif ()
|
endif ()
|
||||||
FindRequiredPackage(ZLIB)
|
FindRequiredPackage(ZLIB)
|
||||||
|
|
||||||
|
# Forward user-defined hint for OpenSSL to the plugins. Use a cache variable to
|
||||||
|
# make sure this variable survives CMake re-runs.
|
||||||
|
# Note: This variable is picked up in ZeekPluginConfig.cmake.in.
|
||||||
|
if (OPENSSL_ROOT_DIR)
|
||||||
|
set(ZeekOpenSSLHint "${OPENSSL_ROOT_DIR}" CACHE INTERNAL "" FORCE)
|
||||||
|
elseif (DEFINED ENV{OPENSSL_ROOT_DIR})
|
||||||
|
set(ZeekOpenSSLHint "$ENV{OPENSSL_ROOT_DIR}" CACHE INTERNAL "" FORCE)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Forward PKG_CONFIG_PATH to the plugins. Use a cache variable to make sure this
|
||||||
|
# variable survives CMake re-runs.
|
||||||
|
# Note: This variable is picked up in ZeekPluginConfig.cmake.in.
|
||||||
|
if (DEFINED ENV{PKG_CONFIG_PATH})
|
||||||
|
set(ZeekPkgConfigPath "$ENV{PKG_CONFIG_PATH}" CACHE INTERNAL "" FORCE)
|
||||||
|
endif ()
|
||||||
|
|
||||||
# Installation directory for the distribution's Python modules. An
|
# Installation directory for the distribution's Python modules. An
|
||||||
# override via configure's --python-dir wins, specifying a directory
|
# override via configure's --python-dir wins, specifying a directory
|
||||||
# explicitly. Next is --python-prefix, which includes a versioned
|
# explicitly. Next is --python-prefix, which includes a versioned
|
||||||
|
@ -457,11 +724,28 @@ if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/auxil/binpac/CMakeLists.txt)
|
||||||
|
|
||||||
add_subdirectory(auxil/binpac)
|
add_subdirectory(auxil/binpac)
|
||||||
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
|
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
|
||||||
|
|
||||||
|
# FIXME: avoid hard-coding a path for multi-config generator support.
|
||||||
|
# See the TODO in ZeekPluginConfig.cmake.in.
|
||||||
|
set(BINPAC_EXE_PATH "${CMAKE_BINARY_DIR}/auxil/binpac/src/binpac${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
endif ()
|
endif ()
|
||||||
FindRequiredPackage(BinPAC)
|
FindRequiredPackage(BinPAC)
|
||||||
|
|
||||||
|
# Add an alias (used by our plugin setup).
|
||||||
|
add_executable(Zeek::BinPAC ALIAS binpac)
|
||||||
|
|
||||||
if ( NOT BIFCL_EXE_PATH )
|
if ( NOT BIFCL_EXE_PATH )
|
||||||
add_subdirectory(auxil/bifcl)
|
add_subdirectory(auxil/bifcl)
|
||||||
|
add_executable(Zeek::BifCl ALIAS bifcl)
|
||||||
|
# FIXME: avoid hard-coding a path for multi-config generator support.
|
||||||
|
# See the TODO in ZeekPluginConfig.cmake.in.
|
||||||
|
set(BIFCL_EXE_PATH "${CMAKE_BINARY_DIR}/auxil/bifcl/bifcl${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
|
set(_bifcl_exe_path "included")
|
||||||
|
else ( )
|
||||||
|
add_executable(Zeek::BifCl IMPORTED)
|
||||||
|
set_property(TARGET Zeek::BifCl PROPERTY
|
||||||
|
IMPORTED_LOCATION "${BIFCL_EXE_PATH}")
|
||||||
|
set(_bifcl_exe_path "BIFCL_EXE_PATH")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if ( NOT GEN_ZAM_EXE_PATH )
|
if ( NOT GEN_ZAM_EXE_PATH )
|
||||||
|
@ -513,14 +797,13 @@ if ( MSVC )
|
||||||
endif ()
|
endif ()
|
||||||
set(zeekdeps ${zeekdeps} paraglob)
|
set(zeekdeps ${zeekdeps} paraglob)
|
||||||
|
|
||||||
|
# Note: Broker gets some special attention in ZeekConfig.cmake.in.
|
||||||
if ( Broker_ROOT )
|
if ( Broker_ROOT )
|
||||||
find_package(Broker REQUIRED PATHS "${Broker_ROOT}")
|
find_package(Broker REQUIRED CONFIG)
|
||||||
set(zeekdeps ${zeekdeps} ${BROKER_LIBRARY})
|
list(APPEND zeekdeps ${BROKER_LIBRARY})
|
||||||
set(broker_includes ${BROKER_INCLUDE_DIR})
|
|
||||||
elseif ( BROKER_ROOT_DIR )
|
|
||||||
find_package(Broker REQUIRED PATHS "${BROKER_ROOT_DIR}")
|
|
||||||
set(zeekdeps ${zeekdeps} ${BROKER_LIBRARY})
|
|
||||||
set(broker_includes ${BROKER_INCLUDE_DIR})
|
set(broker_includes ${BROKER_INCLUDE_DIR})
|
||||||
|
set(ZEEK_HAS_EXTERNAL_BROKER ON)
|
||||||
|
set(ZEEK_HAS_STATIC_BROKER OFF)
|
||||||
else ()
|
else ()
|
||||||
if ( ZEEK_SANITIZERS )
|
if ( ZEEK_SANITIZERS )
|
||||||
set(BROKER_SANITIZERS ${ZEEK_SANITIZERS})
|
set(BROKER_SANITIZERS ${ZEEK_SANITIZERS})
|
||||||
|
@ -539,11 +822,26 @@ else ()
|
||||||
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
|
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
|
||||||
|
|
||||||
if ( BUILD_STATIC_BROKER )
|
if ( BUILD_STATIC_BROKER )
|
||||||
set(zeekdeps ${zeekdeps} broker_static)
|
list(APPEND zeekdeps broker_static)
|
||||||
else()
|
else()
|
||||||
set(zeekdeps ${zeekdeps} broker)
|
list(APPEND zeekdeps broker)
|
||||||
endif()
|
endif()
|
||||||
set(broker_includes ${CMAKE_CURRENT_SOURCE_DIR}/auxil/broker/include ${CMAKE_CURRENT_BINARY_DIR}/auxil/broker/include)
|
|
||||||
|
set(
|
||||||
|
broker_includes
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/auxil/broker/include
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/auxil/broker/include)
|
||||||
|
|
||||||
|
if (BUILD_STATIC_BROKER)
|
||||||
|
set(ZEEK_HAS_STATIC_BROKER ON)
|
||||||
|
else ()
|
||||||
|
set(ZEEK_HAS_STATIC_BROKER OFF)
|
||||||
|
endif ()
|
||||||
|
set(ZEEK_HAS_EXTERNAL_BROKER OFF)
|
||||||
|
|
||||||
|
# Tell plugins where to find the Broker CMake package in the source tree.
|
||||||
|
# This variable is picked up in ZeekPluginConfig.cmake.in.
|
||||||
|
set(ZEEK_PLUGIN_BROKER_PATH "${CMAKE_CURRENT_BINARY_DIR}/auxil/broker")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if ( NOT DISABLE_SPICY )
|
if ( NOT DISABLE_SPICY )
|
||||||
|
@ -598,7 +896,7 @@ if ( NOT DISABLE_SPICY )
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(SPICY_PLUGIN_BINARY_PATH ${CMAKE_BINARY_DIR}/src/builtin-plugins/spicy-plugin)
|
set(SPICY_PLUGIN_BINARY_PATH ${CMAKE_BINARY_DIR}/src/builtin-plugins/spicy-plugin)
|
||||||
string(APPEND ZEEK_INCLUDE_PLUGINS ";${SPICY_PLUGIN_PATH}")
|
list(APPEND ZEEK_INCLUDE_PLUGINS ${SPICY_PLUGIN_PATH})
|
||||||
else ()
|
else ()
|
||||||
set(HAVE_SPICY no) # evaluated by Spicy plugin build
|
set(HAVE_SPICY no) # evaluated by Spicy plugin build
|
||||||
set(USE_SPICY_ANALYZERS no)
|
set(USE_SPICY_ANALYZERS no)
|
||||||
|
@ -743,10 +1041,30 @@ if ( ${CMAKE_SYSTEM_NAME} MATCHES Linux )
|
||||||
set(AF_PACKET_PLUGIN_PATH ${CMAKE_SOURCE_DIR}/auxil/zeek-af_packet-plugin)
|
set(AF_PACKET_PLUGIN_PATH ${CMAKE_SOURCE_DIR}/auxil/zeek-af_packet-plugin)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
string(APPEND ZEEK_INCLUDE_PLUGINS ";${AF_PACKET_PLUGIN_PATH}")
|
list(APPEND ZEEK_INCLUDE_PLUGINS ${AF_PACKET_PLUGIN_PATH})
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if ( NOT DISABLE_JAVASCRIPT )
|
||||||
|
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/auxil/zeekjs/cmake)
|
||||||
|
find_package(Nodejs)
|
||||||
|
|
||||||
|
if ( NODEJS_FOUND )
|
||||||
|
if ( ${NODEJS_VERSION} VERSION_LESS "16.13.0" )
|
||||||
|
message(STATUS "Node.js version ${NODEJS_VERSION} is too old, need 16.13 or later. Not enabling JavaScript support.")
|
||||||
|
set(ZEEK_HAVE_JAVASCRIPT no)
|
||||||
|
else ()
|
||||||
|
set(ZEEKJS_PLUGIN_PATH ${CMAKE_SOURCE_DIR}/auxil/zeekjs)
|
||||||
|
list(APPEND ZEEK_INCLUDE_PLUGINS ${ZEEKJS_PLUGIN_PATH})
|
||||||
|
set(ZEEK_HAVE_JAVASCRIPT yes)
|
||||||
|
endif ()
|
||||||
|
else ()
|
||||||
|
set(ZEEK_HAVE_JAVASCRIPT no)
|
||||||
|
endif ()
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set(ZEEK_HAVE_JAVASCRIPT ${ZEEK_HAVE_JAVASCRIPT} CACHE INTERNAL "Zeek has JavaScript support")
|
||||||
|
|
||||||
set(DEFAULT_ZEEKPATH_PATHS . ${ZEEK_SCRIPT_INSTALL_PATH} ${ZEEK_SCRIPT_INSTALL_PATH}/policy ${ZEEK_SCRIPT_INSTALL_PATH}/site ${ZEEK_SCRIPT_INSTALL_PATH}/builtin-plugins)
|
set(DEFAULT_ZEEKPATH_PATHS . ${ZEEK_SCRIPT_INSTALL_PATH} ${ZEEK_SCRIPT_INSTALL_PATH}/policy ${ZEEK_SCRIPT_INSTALL_PATH}/site ${ZEEK_SCRIPT_INSTALL_PATH}/builtin-plugins)
|
||||||
if ( MSVC )
|
if ( MSVC )
|
||||||
list(JOIN DEFAULT_ZEEKPATH_PATHS ";" DEFAULT_ZEEKPATH)
|
list(JOIN DEFAULT_ZEEKPATH_PATHS ";" DEFAULT_ZEEKPATH)
|
||||||
|
@ -909,7 +1227,7 @@ CheckOptionalBuildSources(auxil/zeek-client ZeekClient INSTALL_ZEEK_CLIENT)
|
||||||
if ( NOT DISABLE_SPICY )
|
if ( NOT DISABLE_SPICY )
|
||||||
# The `zeek` binary implicitly depends on the driver object file built
|
# The `zeek` binary implicitly depends on the driver object file built
|
||||||
# as part of `spicy`; make that dependency explicit.
|
# as part of `spicy`; make that dependency explicit.
|
||||||
add_dependencies(zeek spicyz)
|
zeek_add_dependencies(spicyz)
|
||||||
|
|
||||||
if ( NOT SPICY_ROOT_DIR )
|
if ( NOT SPICY_ROOT_DIR )
|
||||||
# Make sure we build targets of spicy-plugin after the `spicy` target.
|
# Make sure we build targets of spicy-plugin after the `spicy` target.
|
||||||
|
@ -957,6 +1275,46 @@ if ("${PROJECT_SOURCE_DIR}" STREQUAL "${CMAKE_SOURCE_DIR}")
|
||||||
ConfigurePackaging(${VERSION})
|
ConfigurePackaging(${VERSION})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
# Refers back to the "distribution prefix". This is the source tree when
|
||||||
|
# referring to Zeek from the build directory and the "share" directory under the
|
||||||
|
# install preifx otherwise.
|
||||||
|
set(ZEEK_DIST_PREFIX
|
||||||
|
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}>
|
||||||
|
$<INSTALL_INTERFACE:${CMAKE_INSTALL_DATAROOTDIR}/foo/bar>)
|
||||||
|
|
||||||
|
# Generate extra config file for the dynamic plugins.
|
||||||
|
configure_file(src/ZeekPluginConfig.cmake.in ZeekPluginConfig.cmake @ONLY)
|
||||||
|
|
||||||
|
# Write the CMake package and version files.
|
||||||
|
configure_package_config_file(
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/src/ZeekConfig.cmake.in"
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/ZeekConfig.cmake"
|
||||||
|
INSTALL_DESTINATION "${ZEEK_CMAKE_CONFIG_DIR}")
|
||||||
|
write_basic_package_version_file(
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/ZeekConfigVersion.cmake"
|
||||||
|
VERSION ${ZEEK_VERSION_NUMBER}
|
||||||
|
COMPATIBILITY ExactVersion)
|
||||||
|
|
||||||
|
# Write the CMake targets file.
|
||||||
|
export(EXPORT ZeekTargets FILE ZeekTargets.cmake NAMESPACE Zeek::)
|
||||||
|
|
||||||
|
# Write the bootstrap file for dynamic plugins. Needed by ZeekPlugin.cmake.
|
||||||
|
configure_file(src/ZeekPluginBootstrap.cmake.in ZeekPluginBootstrap.cmake @ONLY)
|
||||||
|
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/ZeekPluginBootstrap.cmake"
|
||||||
|
DESTINATION "${ZEEK_CMAKE_CONFIG_DIR}")
|
||||||
|
|
||||||
|
install(
|
||||||
|
FILES
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/ZeekConfig.cmake"
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/ZeekConfigVersion.cmake"
|
||||||
|
DESTINATION
|
||||||
|
"${ZEEK_CMAKE_CONFIG_DIR}")
|
||||||
|
|
||||||
|
install(
|
||||||
|
EXPORT ZeekTargets
|
||||||
|
DESTINATION "${ZEEK_CMAKE_CONFIG_DIR}"
|
||||||
|
NAMESPACE Zeek::)
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
## Build Summary
|
## Build Summary
|
||||||
|
|
||||||
|
@ -970,11 +1328,6 @@ else ()
|
||||||
set(_install_btest_tools_msg "no pcaps")
|
set(_install_btest_tools_msg "no pcaps")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(_bifcl_exe_path "included")
|
|
||||||
if ( BIFCL_EXE_PATH )
|
|
||||||
set(_bifcl_exe_path ${BIFCL_EXE_PATH})
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set(_binpac_exe_path "included")
|
set(_binpac_exe_path "included")
|
||||||
if ( BINPAC_EXE_PATH )
|
if ( BINPAC_EXE_PATH )
|
||||||
set(_binpac_exe_path ${BINPAC_EXE_PATH})
|
set(_binpac_exe_path ${BINPAC_EXE_PATH})
|
||||||
|
@ -1021,7 +1374,7 @@ message(
|
||||||
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
|
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
|
||||||
"\nConfig file dir: ${ZEEK_ETC_INSTALL_DIR}"
|
"\nConfig file dir: ${ZEEK_ETC_INSTALL_DIR}"
|
||||||
"\nLog dir: ${ZEEK_LOG_DIR}"
|
"\nLog dir: ${ZEEK_LOG_DIR}"
|
||||||
"\nPlugin dir: ${BRO_PLUGIN_INSTALL_PATH}"
|
"\nPlugin dir: ${ZEEK_PLUGIN_DIR}"
|
||||||
"\nPython module dir: ${PY_MOD_INSTALL_DIR}"
|
"\nPython module dir: ${PY_MOD_INSTALL_DIR}"
|
||||||
"\nScript dir: ${ZEEK_SCRIPT_INSTALL_PATH}"
|
"\nScript dir: ${ZEEK_SCRIPT_INSTALL_PATH}"
|
||||||
"\nSpool dir: ${ZEEK_SPOOL_DIR}"
|
"\nSpool dir: ${ZEEK_SPOOL_DIR}"
|
||||||
|
@ -1049,6 +1402,7 @@ message(
|
||||||
"\nSpicy: ${_spicy}"
|
"\nSpicy: ${_spicy}"
|
||||||
"\nSpicy plugin: ${_spicy_plugin}"
|
"\nSpicy plugin: ${_spicy_plugin}"
|
||||||
"\nSpicy analyzers: ${USE_SPICY_ANALYZERS}"
|
"\nSpicy analyzers: ${USE_SPICY_ANALYZERS}"
|
||||||
|
"\nJavaScript: ${ZEEK_HAVE_JAVASCRIPT}"
|
||||||
"\n"
|
"\n"
|
||||||
"\nlibmaxminddb: ${USE_GEOIP}"
|
"\nlibmaxminddb: ${USE_GEOIP}"
|
||||||
"\nKerberos: ${USE_KRB5}"
|
"\nKerberos: ${USE_KRB5}"
|
||||||
|
|
190
NEWS
190
NEWS
|
@ -67,9 +67,51 @@ Breaking Changes
|
||||||
If you see opportunities to extend ``repo-info.json`` with further information,
|
If you see opportunities to extend ``repo-info.json`` with further information,
|
||||||
please get in touch.
|
please get in touch.
|
||||||
|
|
||||||
|
- Plugin authors should raise the minimum required CMake version to 3.15 to
|
||||||
|
ensure compatibility with new CMake scaffolding included in this
|
||||||
|
release. Older versions will trigger a warning at configuration time and,
|
||||||
|
depending on the functionality included in the plugin, may trigger subsequent
|
||||||
|
errors during configuration or build.
|
||||||
|
|
||||||
|
- The IRC_Data analyzer declaration has been moved to protocols/irc/IRC.h.
|
||||||
|
|
||||||
New Functionality
|
New Functionality
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
|
- Zeek now features experimental JavaScript support:
|
||||||
|
|
||||||
|
/* hello.js */
|
||||||
|
zeek.on('zeek_init', () => {
|
||||||
|
console.log('Hello, Zeek!');
|
||||||
|
});
|
||||||
|
|
||||||
|
$ zeek ./hello.js
|
||||||
|
Hello, Zeek!
|
||||||
|
|
||||||
|
When building Zeek on a system that features a recent (16.13+) version of the
|
||||||
|
libnode package with development headers, Zeek automatically includes the
|
||||||
|
externally-maintained ZeekJS plugin (https://github.com/corelight/zeekjs) as a
|
||||||
|
builtin plugin. This allows Zeek to load and execute JavaScript code located
|
||||||
|
in ``.js`` or ``.cjs`` files. When no such files are passed to Zeek, the
|
||||||
|
JavaScript engine and Node.js environment aren't initialized and there is no
|
||||||
|
runtime impact.
|
||||||
|
|
||||||
|
The Linux distributions Fedora 37 & 38, Ubuntu 22.10, and the upcoming Debian
|
||||||
|
12 release provide suitable packages. On other platforms, Node.js can be built
|
||||||
|
from source with the ``--shared`` option.
|
||||||
|
|
||||||
|
To disable this functionality, pass ``--disable-javascript`` to configure.
|
||||||
|
|
||||||
|
- Zeek now provides native "Community ID" support with a new bif called
|
||||||
|
``community_id_v1()``. Two policy scripts ``protocols/conn/community-id-logging``
|
||||||
|
and ``frameworks/notice/community-id`` extend the respective logs with a
|
||||||
|
``community_id`` field the same way as the external zeek-community-id plugin
|
||||||
|
provides. A main difference to the external ``hash_conn()`` bif is that the
|
||||||
|
``community_id_v1()`` takes a ``conn_id`` record instead of a ``connection``.
|
||||||
|
|
||||||
|
Loading the new policy scripts and using the external zeek-community-id
|
||||||
|
plugin at the same time is unsupported.
|
||||||
|
|
||||||
- Introduce a new command-line option ``-V`` / ``--build-info``. It produces
|
- Introduce a new command-line option ``-V`` / ``--build-info``. It produces
|
||||||
verbose output in JSON format about the repository state and any included
|
verbose output in JSON format about the repository state and any included
|
||||||
plugins.
|
plugins.
|
||||||
|
@ -91,6 +133,64 @@ New Functionality
|
||||||
``Z`` field remains unchanged and continues to subsume the two flags, for
|
``Z`` field remains unchanged and continues to subsume the two flags, for
|
||||||
backward compatibility.
|
backward compatibility.
|
||||||
|
|
||||||
|
- The supervisor framework can now start worker nodes that read from a trace file.
|
||||||
|
|
||||||
|
- Zeek can be prevented from updating ``network_time()`` to the current time
|
||||||
|
by setting ``allow_network_time_forward=F``. Together with ``set_network_time()``
|
||||||
|
or a custom plugin, this allows control of ``network_time()`` without Zeek
|
||||||
|
interfering.
|
||||||
|
|
||||||
|
- The setting ``Pcap::non_fd_timeout`` can be used to configure the timeout
|
||||||
|
used by non-selectable packet sources in the idle case (default 20usec).
|
||||||
|
This value has previously been hard-coded, but increasing it can significantly
|
||||||
|
reduce idle CPU usage in low packet rate deployments.
|
||||||
|
|
||||||
|
- Zeek now supports a new ``@pragma`` directive. It currently allows suppressing
|
||||||
|
deprecation warnings in Zeek scripts by opening with
|
||||||
|
``@pragma push ignore-deprecations`` and closing with ``@pragma pop``.
|
||||||
|
This particularly helps in situations where use of the Zeek base scripts, for
|
||||||
|
example to populate a deprecated field for API compatibility, would otherwise
|
||||||
|
trigger deprecation warnings.
|
||||||
|
|
||||||
|
- The ``Reporter`` class was extended by a ``Deprecation()`` method to use
|
||||||
|
for logging deprecations rather than using ad-hoc ``Warning()`` calls.
|
||||||
|
|
||||||
|
- The network statistics record type features a new ``pkts_filtered`` field for
|
||||||
|
reporting the number of packets that the interface filtered before hand-off to
|
||||||
|
Zeek. Packet source implementations are free to fill this field as
|
||||||
|
feasible. The default pcap packet source does not provide this information
|
||||||
|
because its availability depends on the libpcap version.
|
||||||
|
|
||||||
|
- Packet statistics (packets received, packets dropped, bytes received, packets
|
||||||
|
seen on link, and packets filtered) are now reported to the Telemetry
|
||||||
|
framework, under the ``zeek_net`` prefix.
|
||||||
|
|
||||||
|
- Zeek's cluster framework provides the new ``get_node_count(node_type: NodeType)``
|
||||||
|
function to obtain the number of nodes for a given node type as defined in the
|
||||||
|
cluster layout. Furthermore, ``broadcast_topics`` was added as a collection of
|
||||||
|
broker topics that can be used to reach all nodes in a cluster.
|
||||||
|
|
||||||
|
- The new ``Cluster::Experimental`` namespace has been introduced to Zeek's cluster
|
||||||
|
framework to provide experimental features. Based on practical experiences and the
|
||||||
|
adoption of an experimental feature, it may become a regular feature or be removed
|
||||||
|
in future releases. Experimental features are loaded via:
|
||||||
|
``@load policy/frameworks/cluster/experimental``
|
||||||
|
|
||||||
|
- Zeek's cluster framework provides two new experimental events:
|
||||||
|
|
||||||
|
- ``cluster_started``: This event will be broadcasted from the manager once all
|
||||||
|
cluster-level connections have been established based on the given cluster layout.
|
||||||
|
If any node restarts (including the manager itself), the event will neither be
|
||||||
|
rebroadcasted nor raised locally for the restarted node.
|
||||||
|
|
||||||
|
- ``node_fully_connected``: This event will be sent to the manager and raised
|
||||||
|
locally once a cluster node has successfully conducted cluster-level handshakes
|
||||||
|
for all its outgoing connections to other cluster nodes based on the given cluster
|
||||||
|
layout.
|
||||||
|
|
||||||
|
Note: There is no tracking of cluster node connectivity. Thus, there is no guarantee
|
||||||
|
that all peerings still exist at the time of these events being raised.
|
||||||
|
|
||||||
Changed Functionality
|
Changed Functionality
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
@ -100,7 +200,7 @@ Changed Functionality
|
||||||
|
|
||||||
- The ``&on_change`` attribute of set and tables is propagated through ``copy()``.
|
- The ``&on_change`` attribute of set and tables is propagated through ``copy()``.
|
||||||
|
|
||||||
- Revert back to old method of preallocting ``PortVal`` objects for all valid
|
- Revert back to old method of preallocating ``PortVal`` objects for all valid
|
||||||
port numbers, as it was implemented prior to the Windows port. Not
|
port numbers, as it was implemented prior to the Windows port. Not
|
||||||
preallocating these objects saves a minor amount of memory for short runs of
|
preallocating these objects saves a minor amount of memory for short runs of
|
||||||
Zeek, but comes at a performance cost for having to allocate the objects every
|
Zeek, but comes at a performance cost for having to allocate the objects every
|
||||||
|
@ -112,6 +212,94 @@ Changed Functionality
|
||||||
``--disable-port-prealloc`` will disable the preallocation and enable the map
|
``--disable-port-prealloc`` will disable the preallocation and enable the map
|
||||||
lookup version.
|
lookup version.
|
||||||
|
|
||||||
|
- The main-loop has been changed to process all ready IO sources with a
|
||||||
|
zero timeout in the same loop iteration. Previously, two zero-timeout
|
||||||
|
sources would require two main-loop iterations. Further, when the main-loop
|
||||||
|
is polling IO sources with file descriptors, zero timeout IO sources are
|
||||||
|
added to the list of sources to be processed as well.
|
||||||
|
|
||||||
|
The intervals to decide when Zeek checks FD-based IO sources for readiness
|
||||||
|
have been made configurable through ``io_poll_interval_default`` and
|
||||||
|
``io_poll_interval_live`` for ease of testing, development and debugging
|
||||||
|
of the main-loop.
|
||||||
|
|
||||||
|
- Zeek does not arbitrarily update ``network_time()`` to current time anymore.
|
||||||
|
When a packet source is providing a constant stream of packets, packets
|
||||||
|
drive network time. Previously, Zeek updated network time to current
|
||||||
|
time in various situations, disregarding timestamps of network packets.
|
||||||
|
Zeek will now update ``network_time()`` only when a packet source has been
|
||||||
|
inactive/idle for an interval of ``packet_source_inactivity_timeout``
|
||||||
|
(default 100msec). When a worker process suddenly observes no packets, timer
|
||||||
|
expiration may initially be delayed by ``packet_source_inactivity_timeout``.
|
||||||
|
|
||||||
|
- Calling ``suspend_processing()`` when reading traces does not update network
|
||||||
|
time to the current time anymore. Instead, Zeek keeps ``network_time()``
|
||||||
|
according to the trace file. This causes scheduled events to not fire once
|
||||||
|
``suspend_processing()`` is called, which seems more reasonable than
|
||||||
|
arbitrarily setting ``network_time()`` to current time. Processing can still
|
||||||
|
be continued from broker events or input readers.
|
||||||
|
|
||||||
|
- Previously, Zeek would process and dispatch events for the very first packet
|
||||||
|
in a trace file in order to initialize time, even if ``suspend_processing()``
|
||||||
|
was called in a ``zeek_init()`` handler. This has been changed such that the
|
||||||
|
first packet will only be processed once ``continue_processing()`` has been
|
||||||
|
invoked again. Some background around the previous behavior can be found
|
||||||
|
in GH-938. Given that the ``network_time_init()`` event explicitly signals
|
||||||
|
initialization of network time, this behavior seems more reasonable.
|
||||||
|
|
||||||
|
- If an event is scheduled with a 0.0sec timeout from a ``zeek_init()`` handler
|
||||||
|
that also invokes ``suspend_processing()``, the scheduled event will fire
|
||||||
|
immediately with ``network_time()`` still yielding ``0.0``. Previously,
|
||||||
|
``network_time()`` was set to the current time. The new behavior provides
|
||||||
|
more deterministic operation and aligns with timers stopping during a
|
||||||
|
``suspend_processing()``.
|
||||||
|
|
||||||
|
- Broker no longer initializes network time to current time when processing
|
||||||
|
input. Particularly in combination with pcap processing this was not desirable
|
||||||
|
behavior.
|
||||||
|
|
||||||
|
- The IO loop's poll interval is now correctly reduced from 100 to 10 for
|
||||||
|
live packet sources. This should lower CPU usage for deployments with
|
||||||
|
non-selectable packet sources.
|
||||||
|
|
||||||
|
- Zeek's CMake scaffolding has received an overhaul for modernizing the build
|
||||||
|
system and to make it easier to maintain going forward. Plugins can now use a
|
||||||
|
declarative interface for adding all sources, BIFs, etc. in one block instead
|
||||||
|
of using the previous begin/end functions. While the old plugin functions
|
||||||
|
still exist for backward compatibility, the underlying codebase requires newer
|
||||||
|
CMake features. Plugin authors should raise their minimum required CMake
|
||||||
|
version to 3.15, to match Zeek's.
|
||||||
|
|
||||||
|
- The IRC data analyzer does not extract DCC acknowledgements to files anymore.
|
||||||
|
Instead, ``irc_dcc_send_ack`` is raised with the bytes acknowledged by the
|
||||||
|
recipient.
|
||||||
|
|
||||||
|
- The IRC base script now use ``file_sniff()`` instead of ``file_new()`` for
|
||||||
|
DCC file transfers to capture ``fuid`` and inferred MIME type in irc.log.
|
||||||
|
|
||||||
|
- The ``ignore_checksums`` script variable now reflects the correct value
|
||||||
|
when using the ``-C`` command-line flag.
|
||||||
|
|
||||||
|
Removed Functionality
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
- Mixing vector and scalar operands for binary expressions, like addition,
|
||||||
|
multiplication, etc., is now an error.
|
||||||
|
|
||||||
|
- Using deprecated ``when`` semantics without capturing variables is now an error.
|
||||||
|
|
||||||
|
- Referencing local variables in a more outer scope than where they were declared
|
||||||
|
is now an error
|
||||||
|
|
||||||
|
Deprecated Functionality
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
- The cluster framework's ``worker_count`` has been deprecated in favor of the
|
||||||
|
new function ``get_active_node_count(node_type: NodeType)`` that can be used
|
||||||
|
to obtain the number of nodes of a given type the calling node is currently
|
||||||
|
connected to.
|
||||||
|
|
||||||
|
|
||||||
Zeek 5.2.0
|
Zeek 5.2.0
|
||||||
==========
|
==========
|
||||||
|
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
6.0.0-dev.226
|
6.0.0-dev.421
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 14f393117f63ef7327a40ade435f9021787b4f45
|
Subproject commit b6f138be79f7d4408302b1297b0c63092b019773
|
|
@ -1 +1 @@
|
||||||
Subproject commit bc0205ce1fc06ddb91abb6744cb79c7eb846c23e
|
Subproject commit 4fc4c31592c4823d675314bc981931de9e246057
|
|
@ -1 +1 @@
|
||||||
Subproject commit 8c9afe4a48a291e714c3a8031bad9867c4b4c665
|
Subproject commit 8534f719a0a384769383bbd4ad71c9eb2084823d
|
|
@ -1 +1 @@
|
||||||
Subproject commit 4a10f01dc937742d107d42ed6cb7d73bebf7c57b
|
Subproject commit a618f2ce0831c311f9bcff5d020b85fc44345221
|
|
@ -26,6 +26,15 @@ endif ()
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-missing-braces")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-missing-braces")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-vla")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-vla")
|
||||||
|
|
||||||
|
# GCC 13 adds a new flag to check whether a symbol changes meaning. Due to an issue in one
|
||||||
|
# of the dependencies used by Spicy, this causes Zeek to fail to build on that compiler.
|
||||||
|
# Until this is fixed, ignore that warning, but check to to make sure the flag exists first.
|
||||||
|
include(CheckCXXCompilerFlag)
|
||||||
|
check_cxx_compiler_flag("-Wno-changes-meaning" _has_no_changes_meaning_flag)
|
||||||
|
if ( _has_no_changes_meaning_flag )
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-changes-meaning")
|
||||||
|
endif ()
|
||||||
|
|
||||||
# The script generating precompiled headers for Spicy expects a different build
|
# The script generating precompiled headers for Spicy expects a different build
|
||||||
# system layout than provided for a bundled Spicy, disable it.
|
# system layout than provided for a bundled Spicy, disable it.
|
||||||
set(HILTI_DEV_PRECOMPILE_HEADERS OFF)
|
set(HILTI_DEV_PRECOMPILE_HEADERS OFF)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 70e3d6a11224b4de95aa84548162561bcf232079
|
Subproject commit 64de5d0ef323428452827469f07b0a1da8e65e16
|
|
@ -1 +1 @@
|
||||||
Subproject commit 99161f64c269b8554b7c4d4a1fdda7ada86f16a4
|
Subproject commit 633b4b3aafebde91cc1ded20b2841113681aa60a
|
1
auxil/zeekjs
Submodule
1
auxil/zeekjs
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit e4ae24051f31620e8bd7a93e8516797d6734b6d9
|
|
@ -2,7 +2,7 @@ FROM quay.io/centos/centos:stream8
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION 20220519
|
ENV DOCKERFILE_VERSION 20230320
|
||||||
|
|
||||||
RUN dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
RUN dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
||||||
RUN dnf config-manager --set-enabled powertools
|
RUN dnf config-manager --set-enabled powertools
|
||||||
|
@ -26,6 +26,7 @@ RUN dnf -y install \
|
||||||
python3-pip\
|
python3-pip\
|
||||||
sqlite \
|
sqlite \
|
||||||
swig \
|
swig \
|
||||||
|
which \
|
||||||
zlib-devel \
|
zlib-devel \
|
||||||
&& dnf clean all && rm -rf /var/cache/dnf
|
&& dnf clean all && rm -rf /var/cache/dnf
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,10 @@ RUN apt-get update && apt-get -y install \
|
||||||
gcc \
|
gcc \
|
||||||
git \
|
git \
|
||||||
libkrb5-dev \
|
libkrb5-dev \
|
||||||
|
libnode-dev \
|
||||||
libpcap-dev \
|
libpcap-dev \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
|
libuv1-dev \
|
||||||
make \
|
make \
|
||||||
python3 \
|
python3 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
|
|
39
ci/debian-12/Dockerfile
Normal file
39
ci/debian-12/Dockerfile
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
FROM debian:bookworm
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
|
ENV DOCKERFILE_VERSION 20230413
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get -y install \
|
||||||
|
bison \
|
||||||
|
bsdmainutils \
|
||||||
|
ccache \
|
||||||
|
cmake \
|
||||||
|
curl \
|
||||||
|
flex \
|
||||||
|
g++ \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
libkrb5-dev \
|
||||||
|
libnode-dev \
|
||||||
|
libpcap-dev \
|
||||||
|
libssl-dev \
|
||||||
|
libuv1-dev \
|
||||||
|
make \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
python3-pip\
|
||||||
|
python3-websockets \
|
||||||
|
sqlite3 \
|
||||||
|
swig \
|
||||||
|
wget \
|
||||||
|
xz-utils \
|
||||||
|
zlib1g-dev \
|
||||||
|
&& apt autoclean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Debian bookworm really doesn't like using pip to install system wide stuff, but
|
||||||
|
# doesn't seem there's a python3-junit2html package, so not sure what we'd break.
|
||||||
|
RUN pip3 install --break-system-packages junit2html
|
|
@ -2,7 +2,7 @@ FROM fedora:37
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION 20221127
|
ENV DOCKERFILE_VERSION 20230413
|
||||||
|
|
||||||
RUN dnf -y install \
|
RUN dnf -y install \
|
||||||
bison \
|
bison \
|
||||||
|
@ -16,6 +16,7 @@ RUN dnf -y install \
|
||||||
git \
|
git \
|
||||||
libpcap-devel \
|
libpcap-devel \
|
||||||
make \
|
make \
|
||||||
|
nodejs-devel \
|
||||||
openssl \
|
openssl \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
procps-ng \
|
procps-ng \
|
||||||
|
|
|
@ -2,7 +2,7 @@ FROM opensuse/tumbleweed
|
||||||
|
|
||||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION 20221027
|
ENV DOCKERFILE_VERSION 20230330
|
||||||
|
|
||||||
RUN zypper refresh \
|
RUN zypper refresh \
|
||||||
&& zypper in -y \
|
&& zypper in -y \
|
||||||
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||||
|
|
||||||
# A version field to invalide Cirrus's build cache when needed, as suggested in
|
# A version field to invalide Cirrus's build cache when needed, as suggested in
|
||||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||||
ENV DOCKERFILE_VERSION 20220614
|
ENV DOCKERFILE_VERSION 20230413
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y install \
|
RUN apt-get update && apt-get -y install \
|
||||||
bc \
|
bc \
|
||||||
|
@ -20,8 +20,10 @@ RUN apt-get update && apt-get -y install \
|
||||||
lcov \
|
lcov \
|
||||||
libkrb5-dev \
|
libkrb5-dev \
|
||||||
libmaxminddb-dev \
|
libmaxminddb-dev \
|
||||||
|
libnode-dev \
|
||||||
libpcap-dev \
|
libpcap-dev \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
|
libuv1-dev \
|
||||||
make \
|
make \
|
||||||
python3 \
|
python3 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
|
|
2
cmake
2
cmake
|
@ -1 +1 @@
|
||||||
Subproject commit 23d5b121a1492feb15455d6ecaf3940a237beb26
|
Subproject commit 6f096c3f6dcb9f35664cf25a58989bc5d123b995
|
3
configure
vendored
3
configure
vendored
|
@ -325,6 +325,9 @@ while [ $# -ne 0 ]; do
|
||||||
--disable-cpp-tests)
|
--disable-cpp-tests)
|
||||||
append_cache_entry ENABLE_ZEEK_UNIT_TESTS BOOL false
|
append_cache_entry ENABLE_ZEEK_UNIT_TESTS BOOL false
|
||||||
;;
|
;;
|
||||||
|
--disable-javascript)
|
||||||
|
append_cache_entry DISABLE_JAVASCRIPT BOOL true
|
||||||
|
;;
|
||||||
--disable-port-prealloc)
|
--disable-port-prealloc)
|
||||||
append_cache_entry PREALLOCATE_PORT_ARRAY BOOL false
|
append_cache_entry PREALLOCATE_PORT_ARRAY BOOL false
|
||||||
;;
|
;;
|
||||||
|
|
2
doc
2
doc
|
@ -1 +1 @@
|
||||||
Subproject commit 714a02ced483df50a9f7ab383c4dfd7f7d2889ad
|
Subproject commit 6ccf06f0f6b0c24f120160aeb05307e4c4a44975
|
|
@ -44,6 +44,13 @@ export {
|
||||||
## time machine nodes in a cluster. Used with broker-enabled cluster communication.
|
## time machine nodes in a cluster. Used with broker-enabled cluster communication.
|
||||||
const time_machine_topic = "zeek/cluster/time_machine" &redef;
|
const time_machine_topic = "zeek/cluster/time_machine" &redef;
|
||||||
|
|
||||||
|
## A set of topic names to be used for broadcasting messages that are
|
||||||
|
## relevant to all nodes in a cluster. Currently, there is not a common
|
||||||
|
## topic to broadcast to, because enabling implicit Broker forwarding would
|
||||||
|
## cause a routing loop for this topic.
|
||||||
|
const broadcast_topics = { logger_topic, manager_topic, proxy_topic,
|
||||||
|
worker_topic, time_machine_topic };
|
||||||
|
|
||||||
## The topic prefix used for exchanging messages that are relevant to
|
## The topic prefix used for exchanging messages that are relevant to
|
||||||
## a named node in a cluster. Used with broker-enabled cluster communication.
|
## a named node in a cluster. Used with broker-enabled cluster communication.
|
||||||
const node_topic_prefix = "zeek/cluster/node/" &redef;
|
const node_topic_prefix = "zeek/cluster/node/" &redef;
|
||||||
|
@ -184,6 +191,12 @@ export {
|
||||||
id: string &optional;
|
id: string &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
## Record to represent a cluster node including its name.
|
||||||
|
type NamedNode: record {
|
||||||
|
name: string;
|
||||||
|
node: Node;
|
||||||
|
};
|
||||||
|
|
||||||
## This function can be called at any time to determine if the cluster
|
## This function can be called at any time to determine if the cluster
|
||||||
## framework is being enabled for this run.
|
## framework is being enabled for this run.
|
||||||
##
|
##
|
||||||
|
@ -202,7 +215,7 @@ export {
|
||||||
## and it's maintained internally by the cluster framework. It's
|
## and it's maintained internally by the cluster framework. It's
|
||||||
## primarily intended for use by managers to find out how many workers
|
## primarily intended for use by managers to find out how many workers
|
||||||
## should be responding to requests.
|
## should be responding to requests.
|
||||||
global worker_count: count = 0;
|
global worker_count: count = 0 &deprecated="Remove in v6.1. Active worker count can be obtained via get_active_node_count(Cluster::WORKER)";
|
||||||
|
|
||||||
## The cluster layout definition. This should be placed into a filter
|
## The cluster layout definition. This should be placed into a filter
|
||||||
## named cluster-layout.zeek somewhere in the ZEEKPATH. It will be
|
## named cluster-layout.zeek somewhere in the ZEEKPATH. It will be
|
||||||
|
@ -212,6 +225,15 @@ export {
|
||||||
## or "worker-1").
|
## or "worker-1").
|
||||||
const nodes: table[string] of Node = {} &redef;
|
const nodes: table[string] of Node = {} &redef;
|
||||||
|
|
||||||
|
## Returns the number of nodes defined in the cluster layout for a given
|
||||||
|
## node type.
|
||||||
|
global get_node_count: function(node_type: NodeType): count;
|
||||||
|
|
||||||
|
## Returns the number of nodes per type, the calling node is currently
|
||||||
|
## connected to. This is primarily intended for use by the manager to find
|
||||||
|
## out how many nodes should be responding to requests.
|
||||||
|
global get_active_node_count: function(node_type: NodeType): count;
|
||||||
|
|
||||||
## Indicates whether or not the manager will act as the logger and receive
|
## Indicates whether or not the manager will act as the logger and receive
|
||||||
## logs. This value should be set in the cluster-layout.zeek script (the
|
## logs. This value should be set in the cluster-layout.zeek script (the
|
||||||
## value should be true only if no logger is specified in Cluster::nodes).
|
## value should be true only if no logger is specified in Cluster::nodes).
|
||||||
|
@ -262,35 +284,41 @@ export {
|
||||||
global nodeid_topic: function(id: string): string;
|
global nodeid_topic: function(id: string): string;
|
||||||
}
|
}
|
||||||
|
|
||||||
global active_worker_ids: set[string] = set();
|
# Track active nodes per type.
|
||||||
|
global active_node_ids: table[NodeType] of set[string];
|
||||||
type NamedNode: record {
|
|
||||||
name: string;
|
|
||||||
node: Node;
|
|
||||||
};
|
|
||||||
|
|
||||||
function nodes_with_type(node_type: NodeType): vector of NamedNode
|
function nodes_with_type(node_type: NodeType): vector of NamedNode
|
||||||
{
|
{
|
||||||
local rval: vector of NamedNode = vector();
|
local rval: vector of NamedNode = vector();
|
||||||
local names: vector of string = vector();
|
|
||||||
|
|
||||||
for ( name in Cluster::nodes )
|
for ( name, n in Cluster::nodes )
|
||||||
names += name;
|
|
||||||
|
|
||||||
names = sort(names, strcmp);
|
|
||||||
|
|
||||||
for ( i in names )
|
|
||||||
{
|
{
|
||||||
name = names[i];
|
|
||||||
local n = Cluster::nodes[name];
|
|
||||||
|
|
||||||
if ( n$node_type != node_type )
|
if ( n$node_type != node_type )
|
||||||
next;
|
next;
|
||||||
|
|
||||||
rval += NamedNode($name=name, $node=n);
|
rval += NamedNode($name=name, $node=n);
|
||||||
}
|
}
|
||||||
|
|
||||||
return rval;
|
return sort(rval, function(n1: NamedNode, n2: NamedNode): int
|
||||||
|
{ return strcmp(n1$name, n2$name); });
|
||||||
|
}
|
||||||
|
|
||||||
|
function Cluster::get_node_count(node_type: NodeType): count
|
||||||
|
{
|
||||||
|
local cnt = 0;
|
||||||
|
|
||||||
|
for ( _, n in nodes )
|
||||||
|
{
|
||||||
|
if ( n$node_type == node_type )
|
||||||
|
++cnt;
|
||||||
|
}
|
||||||
|
|
||||||
|
return cnt;
|
||||||
|
}
|
||||||
|
|
||||||
|
function Cluster::get_active_node_count(node_type: NodeType): count
|
||||||
|
{
|
||||||
|
return |active_node_ids[node_type]|;
|
||||||
}
|
}
|
||||||
|
|
||||||
function is_enabled(): bool
|
function is_enabled(): bool
|
||||||
|
@ -319,6 +347,8 @@ function nodeid_topic(id: string): string
|
||||||
return nodeid_topic_prefix + id + "/";
|
return nodeid_topic_prefix + id + "/";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@if ( Cluster::is_enabled() )
|
||||||
|
|
||||||
event Cluster::hello(name: string, id: string) &priority=10
|
event Cluster::hello(name: string, id: string) &priority=10
|
||||||
{
|
{
|
||||||
if ( name !in nodes )
|
if ( name !in nodes )
|
||||||
|
@ -341,11 +371,14 @@ event Cluster::hello(name: string, id: string) &priority=10
|
||||||
n$id = id;
|
n$id = id;
|
||||||
Cluster::log(fmt("got hello from %s (%s)", name, id));
|
Cluster::log(fmt("got hello from %s (%s)", name, id));
|
||||||
|
|
||||||
|
if ( n$node_type !in active_node_ids )
|
||||||
|
active_node_ids[n$node_type] = set();
|
||||||
|
add active_node_ids[n$node_type][id];
|
||||||
|
|
||||||
|
@pragma push ignore-deprecations
|
||||||
if ( n$node_type == WORKER )
|
if ( n$node_type == WORKER )
|
||||||
{
|
worker_count = |active_node_ids[WORKER]|;
|
||||||
add active_worker_ids[id];
|
@pragma pop ignore-deprecations
|
||||||
worker_count = |active_worker_ids|;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||||
|
@ -365,12 +398,12 @@ event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=1
|
||||||
{
|
{
|
||||||
Cluster::log(fmt("node down: %s", node_name));
|
Cluster::log(fmt("node down: %s", node_name));
|
||||||
delete n$id;
|
delete n$id;
|
||||||
|
delete active_node_ids[n$node_type][endpoint$id];
|
||||||
|
|
||||||
|
@pragma push ignore-deprecations
|
||||||
if ( n$node_type == WORKER )
|
if ( n$node_type == WORKER )
|
||||||
{
|
worker_count = |active_node_ids[WORKER]|;
|
||||||
delete active_worker_ids[endpoint$id];
|
@pragma pop ignore-deprecations
|
||||||
worker_count = |active_worker_ids|;
|
|
||||||
}
|
|
||||||
|
|
||||||
event Cluster::node_down(node_name, endpoint$id);
|
event Cluster::node_down(node_name, endpoint$id);
|
||||||
break;
|
break;
|
||||||
|
@ -390,6 +423,8 @@ event zeek_init() &priority=5
|
||||||
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster", $policy=log_policy]);
|
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster", $policy=log_policy]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@endif
|
||||||
|
|
||||||
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
|
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
|
||||||
{
|
{
|
||||||
local info = stores[name];
|
local info = stores[name];
|
||||||
|
|
|
@ -7,6 +7,15 @@
|
||||||
|
|
||||||
module Cluster;
|
module Cluster;
|
||||||
|
|
||||||
|
export {
|
||||||
|
## This hook is called when the local node connects to other nodes based on
|
||||||
|
## the given cluster layout. Breaking from the hook will prevent connection
|
||||||
|
## establishment.
|
||||||
|
##
|
||||||
|
## connectee: The node to connect to.
|
||||||
|
global connect_node_hook: hook(connectee: NamedNode);
|
||||||
|
}
|
||||||
|
|
||||||
function connect_peer(node_type: NodeType, node_name: string)
|
function connect_peer(node_type: NodeType, node_name: string)
|
||||||
{
|
{
|
||||||
local nn = nodes_with_type(node_type);
|
local nn = nodes_with_type(node_type);
|
||||||
|
@ -17,12 +26,15 @@ function connect_peer(node_type: NodeType, node_name: string)
|
||||||
|
|
||||||
if ( n$name != node_name )
|
if ( n$name != node_name )
|
||||||
next;
|
next;
|
||||||
|
if ( ! hook connect_node_hook(n) )
|
||||||
|
return;
|
||||||
|
|
||||||
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
||||||
Cluster::retry_interval);
|
Cluster::retry_interval);
|
||||||
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
||||||
n$node$ip, n$node$p, Cluster::retry_interval,
|
n$node$ip, n$node$p, Cluster::retry_interval,
|
||||||
status));
|
status));
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,6 +45,10 @@ function connect_peers_with_type(node_type: NodeType)
|
||||||
for ( i in nn )
|
for ( i in nn )
|
||||||
{
|
{
|
||||||
local n = nn[i];
|
local n = nn[i];
|
||||||
|
|
||||||
|
if ( ! hook connect_node_hook(n) )
|
||||||
|
next;
|
||||||
|
|
||||||
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
||||||
Cluster::retry_interval);
|
Cluster::retry_interval);
|
||||||
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
||||||
|
|
|
@ -59,14 +59,8 @@ global Config::cluster_set_option: event(ID: string, val: any, location: string)
|
||||||
|
|
||||||
function broadcast_option(ID: string, val: any, location: string) &is_used
|
function broadcast_option(ID: string, val: any, location: string) &is_used
|
||||||
{
|
{
|
||||||
# There's not currently a common topic to broadcast to as then enabling
|
for ( topic in Cluster::broadcast_topics )
|
||||||
# implicit Broker forwarding would cause a routing loop.
|
Broker::publish(topic, Config::cluster_set_option, ID, val, location);
|
||||||
Broker::publish(Cluster::worker_topic, Config::cluster_set_option,
|
|
||||||
ID, val, location);
|
|
||||||
Broker::publish(Cluster::proxy_topic, Config::cluster_set_option,
|
|
||||||
ID, val, location);
|
|
||||||
Broker::publish(Cluster::logger_topic, Config::cluster_set_option,
|
|
||||||
ID, val, location);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
event Config::cluster_set_option(ID: string, val: any, location: string)
|
event Config::cluster_set_option(ID: string, val: any, location: string)
|
||||||
|
|
|
@ -163,9 +163,6 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
## A function that one may use to customize log file rotation paths.
|
## A function that one may use to customize log file rotation paths.
|
||||||
## Note that the "fname" field of the *ri* argument is always an
|
|
||||||
## empty string for the purpose of this function call (i.e. the full
|
|
||||||
## file name is not determined yet).
|
|
||||||
const rotation_format_func: function(ri: RotationFmtInfo): RotationPath &redef;
|
const rotation_format_func: function(ri: RotationFmtInfo): RotationPath &redef;
|
||||||
|
|
||||||
## Default naming format for timestamps embedded into filenames.
|
## Default naming format for timestamps embedded into filenames.
|
||||||
|
|
|
@ -205,6 +205,8 @@ export {
|
||||||
["RST_with_data"] = ACTION_LOG,
|
["RST_with_data"] = ACTION_LOG,
|
||||||
["SSL_many_server_names"] = ACTION_LOG,
|
["SSL_many_server_names"] = ACTION_LOG,
|
||||||
["simultaneous_open"] = ACTION_LOG_PER_CONN,
|
["simultaneous_open"] = ACTION_LOG_PER_CONN,
|
||||||
|
["smtp_mail_transaction_invalid"] = ACTION_LOG_PER_CONN,
|
||||||
|
["smtp_excessive_invalid_mail_transactions"] = ACTION_LOG_PER_CONN,
|
||||||
["spontaneous_FIN"] = ACTION_IGNORE,
|
["spontaneous_FIN"] = ACTION_IGNORE,
|
||||||
["spontaneous_RST"] = ACTION_IGNORE,
|
["spontaneous_RST"] = ACTION_IGNORE,
|
||||||
["SMB_parsing_error"] = ACTION_LOG,
|
["SMB_parsing_error"] = ACTION_LOG,
|
||||||
|
|
|
@ -295,7 +295,6 @@ function handle_end_of_result_collection(uid: string, ss_name: string, key: Key,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
|
|
||||||
local ss = stats_store[ss_name];
|
local ss = stats_store[ss_name];
|
||||||
local ir = key_requests[uid];
|
local ir = key_requests[uid];
|
||||||
if ( check_thresholds(ss, key, ir, 1.0) )
|
if ( check_thresholds(ss, key, ir, 1.0) )
|
||||||
|
@ -357,7 +356,7 @@ event SumStats::send_no_key(uid: string, ss_name: string)
|
||||||
done_with[uid] = 0;
|
done_with[uid] = 0;
|
||||||
|
|
||||||
++done_with[uid];
|
++done_with[uid];
|
||||||
if ( Cluster::worker_count == done_with[uid] )
|
if ( Cluster::get_active_node_count(Cluster::WORKER) == done_with[uid] )
|
||||||
{
|
{
|
||||||
delete done_with[uid];
|
delete done_with[uid];
|
||||||
|
|
||||||
|
@ -394,7 +393,7 @@ event SumStats::send_a_key(uid: string, ss_name: string, key: Key)
|
||||||
add stats_keys[uid][key];
|
add stats_keys[uid][key];
|
||||||
|
|
||||||
++done_with[uid];
|
++done_with[uid];
|
||||||
if ( Cluster::worker_count == done_with[uid] )
|
if ( Cluster::get_active_node_count(Cluster::WORKER) == done_with[uid] )
|
||||||
{
|
{
|
||||||
delete done_with[uid];
|
delete done_with[uid];
|
||||||
|
|
||||||
|
@ -437,7 +436,7 @@ event SumStats::cluster_send_result(uid: string, ss_name: string, key: Key, resu
|
||||||
++done_with[uid];
|
++done_with[uid];
|
||||||
|
|
||||||
if ( uid !in dynamic_requests &&
|
if ( uid !in dynamic_requests &&
|
||||||
uid in done_with && Cluster::worker_count == done_with[uid] )
|
uid in done_with && Cluster::get_active_node_count(Cluster::WORKER) == done_with[uid] )
|
||||||
{
|
{
|
||||||
handle_end_of_result_collection(uid, ss_name, key, cleanup);
|
handle_end_of_result_collection(uid, ss_name, key, cleanup);
|
||||||
|
|
||||||
|
@ -481,7 +480,8 @@ function request_key(ss_name: string, key: Key): Result
|
||||||
add dynamic_requests[uid];
|
add dynamic_requests[uid];
|
||||||
|
|
||||||
event SumStats::cluster_get_result(uid, ss_name, key, F);
|
event SumStats::cluster_get_result(uid, ss_name, key, F);
|
||||||
return when [uid, ss_name, key] ( uid in done_with && Cluster::worker_count == done_with[uid] )
|
return when [uid, ss_name, key] ( uid in done_with &&
|
||||||
|
Cluster::get_active_node_count(Cluster::WORKER) == done_with[uid] )
|
||||||
{
|
{
|
||||||
#print "done with request_key";
|
#print "done with request_key";
|
||||||
local result = key_requests[uid];
|
local result = key_requests[uid];
|
||||||
|
|
|
@ -27,6 +27,9 @@ export {
|
||||||
## The interface name from which the node will read/analyze packets.
|
## The interface name from which the node will read/analyze packets.
|
||||||
## Typically used by worker nodes.
|
## Typically used by worker nodes.
|
||||||
interface: string &optional;
|
interface: string &optional;
|
||||||
|
## The PCAP file name from which the node will read/analyze packets.
|
||||||
|
## Typically used by worker nodes.
|
||||||
|
pcap_file: string &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Configuration options that influence behavior of a supervised Zeek node.
|
## Configuration options that influence behavior of a supervised Zeek node.
|
||||||
|
@ -36,6 +39,8 @@ export {
|
||||||
name: string;
|
name: string;
|
||||||
## The interface name from which the node will read/analyze packets.
|
## The interface name from which the node will read/analyze packets.
|
||||||
interface: string &optional;
|
interface: string &optional;
|
||||||
|
## The PCAP file name from which the node will read/analyze packets.
|
||||||
|
pcap_file: string &optional;
|
||||||
## The working directory that the node should use.
|
## The working directory that the node should use.
|
||||||
directory: string &optional;
|
directory: string &optional;
|
||||||
## The filename/path to which the node's stdout will be redirected.
|
## The filename/path to which the node's stdout will be redirected.
|
||||||
|
|
|
@ -161,6 +161,32 @@ type PacketSource: record {
|
||||||
netmask: count;
|
netmask: count;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
## If a packet source does not yield packets for this amount of time,
|
||||||
|
## it is considered idle. When a packet source is found to be idle,
|
||||||
|
## Zeek will update network_time to current time in order for timer expiration
|
||||||
|
## to function. A packet source queueing up packets and not yielding them for
|
||||||
|
## longer than this interval without yielding any packets will provoke
|
||||||
|
## not-very-well-defined timer behavior.
|
||||||
|
##
|
||||||
|
## On Zeek workers with low packet rates, timer expiration may be delayed
|
||||||
|
## by this many milliseconds after the last packet has been received.
|
||||||
|
const packet_source_inactivity_timeout = 100msec &redef;
|
||||||
|
|
||||||
|
## Whether Zeek will forward network_time to the current time upon
|
||||||
|
## observing an idle packet source (or no configured packet source).
|
||||||
|
##
|
||||||
|
## Only set this to *F* if you really know what you're doing. Setting this to
|
||||||
|
## *F* on non-worker systems causes :zeek:see:`network_time` to be stuck
|
||||||
|
## at 0.0 and timer expiration will be non-functional.
|
||||||
|
##
|
||||||
|
## The main purpose of this option is to yield control over network time
|
||||||
|
## to plugins or scripts via broker or other non-timer events.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: network_time set_network_time packet_source_inactivity_timeout
|
||||||
|
##
|
||||||
|
const allow_network_time_forward = T &redef;
|
||||||
|
|
||||||
## A connection's transport-layer protocol. Note that Zeek uses the term
|
## A connection's transport-layer protocol. Note that Zeek uses the term
|
||||||
## "connection" broadly, using flow semantics for ICMP and UDP.
|
## "connection" broadly, using flow semantics for ICMP and UDP.
|
||||||
type transport_proto: enum {
|
type transport_proto: enum {
|
||||||
|
@ -652,6 +678,7 @@ type NetStats: record {
|
||||||
## be always set to zero.
|
## be always set to zero.
|
||||||
pkts_link: count &default=0;
|
pkts_link: count &default=0;
|
||||||
bytes_recvd: count &default=0; ##< Bytes received by Zeek.
|
bytes_recvd: count &default=0; ##< Bytes received by Zeek.
|
||||||
|
pkts_filtered: count &optional; ##< Packets filtered by the packet source.
|
||||||
};
|
};
|
||||||
|
|
||||||
type ConnStats: record {
|
type ConnStats: record {
|
||||||
|
@ -2007,6 +2034,7 @@ type gtp_delete_pdp_ctx_response_elements: record {
|
||||||
|
|
||||||
# Prototypes of Zeek built-in functions.
|
# Prototypes of Zeek built-in functions.
|
||||||
@load base/bif/zeek.bif
|
@load base/bif/zeek.bif
|
||||||
|
@load base/bif/communityid.bif
|
||||||
@load base/bif/stats.bif
|
@load base/bif/stats.bif
|
||||||
@load base/bif/reporter.bif
|
@load base/bif/reporter.bif
|
||||||
@load base/bif/strings.bif
|
@load base/bif/strings.bif
|
||||||
|
@ -5181,6 +5209,32 @@ export {
|
||||||
## interfaces.
|
## interfaces.
|
||||||
const bufsize = 128 &redef;
|
const bufsize = 128 &redef;
|
||||||
|
|
||||||
|
## Default timeout for packet sources without file descriptors.
|
||||||
|
##
|
||||||
|
## For libpcap based packet sources that do not provide a usable
|
||||||
|
## file descriptor for select(), the timeout provided to the IO
|
||||||
|
## loop is either zero if a packet was most recently available
|
||||||
|
## or else this value.
|
||||||
|
##
|
||||||
|
## Depending on the expected packet rate per-worker and the amount of
|
||||||
|
## available packet buffer, raising this value can significantly reduce
|
||||||
|
## Zeek's CPU usage at the cost of a small delay before processing
|
||||||
|
## packets. Setting this value too high may cause packet drops due
|
||||||
|
## to running out of available buffer space.
|
||||||
|
##
|
||||||
|
## Increasing this value to 200usec on low-traffic Myricom based systems
|
||||||
|
## (5 kpps per Zeek worker) has shown a 50% reduction in CPU usage.
|
||||||
|
##
|
||||||
|
## This is an advanced setting. Do monitor dropped packets and capture
|
||||||
|
## loss information when changing it.
|
||||||
|
##
|
||||||
|
## .. note:: Packet sources that override ``GetNextTimeout()`` method
|
||||||
|
## may not respect this value.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: io_poll_interval_live
|
||||||
|
##
|
||||||
|
const non_fd_timeout = 20usec &redef;
|
||||||
|
|
||||||
## The definition of a "pcap interface".
|
## The definition of a "pcap interface".
|
||||||
type Interface: record {
|
type Interface: record {
|
||||||
## The interface/device name.
|
## The interface/device name.
|
||||||
|
@ -5581,6 +5635,35 @@ const digest_salt = "Please change this value." &redef;
|
||||||
## :zeek:see:`find_all_ordered` BIFs.
|
## :zeek:see:`find_all_ordered` BIFs.
|
||||||
const max_find_all_string_length: int = 10000 &redef;
|
const max_find_all_string_length: int = 10000 &redef;
|
||||||
|
|
||||||
|
## How many rounds to go without checking IO sources with file descriptors
|
||||||
|
## for readiness by default. This is used when reading from traces.
|
||||||
|
##
|
||||||
|
## Very roughly, when reading from a pcap, setting this to 100 results in
|
||||||
|
## 100 packets being processed without checking FD based IO sources.
|
||||||
|
##
|
||||||
|
## .. note:: This should not be changed outside of development or when
|
||||||
|
## debugging problems with the main-loop, or developing features with
|
||||||
|
## tight main-loop interaction.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: io_poll_interval_live
|
||||||
|
const io_poll_interval_default = 100 &redef;
|
||||||
|
|
||||||
|
## How often to check IO sources with file descriptors for readiness when
|
||||||
|
## monitoring with a live packet source.
|
||||||
|
##
|
||||||
|
## The poll interval gets defaulted to 100 which is good for cases like reading
|
||||||
|
## from pcap files and when there isn't a packet source, but is a little too
|
||||||
|
## infrequent for live sources (especially fast live sources). Set it down a
|
||||||
|
## little bit for those sources.
|
||||||
|
##
|
||||||
|
## .. note:: This should not be changed outside of development or when
|
||||||
|
## debugging problems with the main-loop, or developing features with
|
||||||
|
## tight main-loop interaction.
|
||||||
|
##
|
||||||
|
## .. zeek:see:: io_poll_interval_default
|
||||||
|
const io_poll_interval_live = 10 &redef;
|
||||||
|
|
||||||
|
|
||||||
global done_with_network = F;
|
global done_with_network = F;
|
||||||
event net_done(t: time)
|
event net_done(t: time)
|
||||||
{ done_with_network = T; }
|
{ done_with_network = T; }
|
||||||
|
|
|
@ -316,12 +316,58 @@ event ftp_request(c: connection, command: string, arg: string) &priority=5
|
||||||
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=5
|
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=5
|
||||||
{
|
{
|
||||||
set_ftp_session(c);
|
set_ftp_session(c);
|
||||||
|
|
||||||
|
# Skip matching up intermediate reply lines (that do not have a
|
||||||
|
# valid status code) with pending commands. Because they may not
|
||||||
|
# have a proper status code, there's little point setting whatever
|
||||||
|
# their reply_code and reply_msg are on the command.
|
||||||
|
#
|
||||||
|
# There's a quirk: Some FTP servers return(ed?) replies like the
|
||||||
|
# following, violating the multi-line reply protocol:
|
||||||
|
#
|
||||||
|
# c: STOR intermol.ps
|
||||||
|
# s: 150 Opening ASCII mode data connection for 'intermol.ps'.
|
||||||
|
# s: 230- WARNING! 4 bare linefeeds received in ASCII mode
|
||||||
|
# s: File may not have transferred correctly.
|
||||||
|
# s: 226 Transfer complete.
|
||||||
|
#
|
||||||
|
# This is a multiline response started with 230-, but never finalized
|
||||||
|
# with the same status code. It should have been completed with
|
||||||
|
# "230 <some final message>", but instead was completed with "226 ...".
|
||||||
|
# This confuses our parser, returning cont_resp = T for all following
|
||||||
|
# server messages. This caused a regression as the current command wasn't
|
||||||
|
# updated for logging.
|
||||||
|
#
|
||||||
|
# The regex below is a best effort to keep existing behavior
|
||||||
|
# in face of such traffic. It matches on messages that look
|
||||||
|
# like valid status codes (starting with 3 digits followed by
|
||||||
|
# at least 10 ASCII characters).
|
||||||
|
#
|
||||||
|
# There's the following in RFC 959, so in the future we could push
|
||||||
|
# the detection/logic down into the parser instead of here.
|
||||||
|
#
|
||||||
|
# If an intermediary line begins with a 3-digit number, the Server
|
||||||
|
# must pad the front to avoid confusion.
|
||||||
|
#
|
||||||
|
if ( cont_resp && code == 0 && c$ftp?$reply_code )
|
||||||
|
{
|
||||||
|
if ( /^[1-9][0-9]{2} [[:print:]]{10}.*/ !in msg )
|
||||||
|
return;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
# This might be worth a weird, but not sure it's
|
||||||
|
# worth it and how trigger happy it could be.
|
||||||
|
# Reporter::conn_weird("FTP_intermediate_line_with_reply_code", c, msg, "FTP");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c$ftp$cmdarg = get_pending_cmd(c$ftp$pending_commands, code, msg);
|
c$ftp$cmdarg = get_pending_cmd(c$ftp$pending_commands, code, msg);
|
||||||
c$ftp$reply_code = code;
|
c$ftp$reply_code = code;
|
||||||
c$ftp$reply_msg = msg;
|
c$ftp$reply_msg = msg;
|
||||||
|
|
||||||
# TODO: figure out what to do with continued FTP response (not used much)
|
# Do not parse out information from any but the first reply line.
|
||||||
if ( cont_resp ) return;
|
if ( cont_resp )
|
||||||
|
return;
|
||||||
|
|
||||||
# TODO: do some sort of generic clear text login processing here.
|
# TODO: do some sort of generic clear text login processing here.
|
||||||
local response_xyz = parse_ftp_reply_code(code);
|
local response_xyz = parse_ftp_reply_code(code);
|
||||||
|
|
|
@ -97,7 +97,7 @@ function log_dcc(f: fa_file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
event file_new(f: fa_file) &priority=-5
|
event file_sniff(f: fa_file, meta: fa_metadata) &priority=-5
|
||||||
{
|
{
|
||||||
if ( f$source == "IRC_DATA" )
|
if ( f$source == "IRC_DATA" )
|
||||||
log_dcc(f);
|
log_dcc(f);
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
@load base/utils/directions-and-hosts
|
@load base/utils/directions-and-hosts
|
||||||
@load base/utils/email
|
@load base/utils/email
|
||||||
@load base/protocols/conn/removal-hooks
|
@load base/protocols/conn/removal-hooks
|
||||||
|
@load base/frameworks/notice/weird
|
||||||
|
|
||||||
module SMTP;
|
module SMTP;
|
||||||
|
|
||||||
|
@ -75,6 +76,11 @@ export {
|
||||||
messages_transferred: count &default=0;
|
messages_transferred: count &default=0;
|
||||||
|
|
||||||
pending_messages: set[Info] &optional;
|
pending_messages: set[Info] &optional;
|
||||||
|
|
||||||
|
trans_mail_from_seen: bool &default=F;
|
||||||
|
trans_rcpt_to_seen: bool &default=F;
|
||||||
|
invalid_transactions: count &default=0;
|
||||||
|
analyzer_id: count &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Direction to capture the full "Received from" path.
|
## Direction to capture the full "Received from" path.
|
||||||
|
@ -91,6 +97,16 @@ export {
|
||||||
|
|
||||||
## SMTP finalization hook. Remaining SMTP info may get logged when it's called.
|
## SMTP finalization hook. Remaining SMTP info may get logged when it's called.
|
||||||
global finalize_smtp: Conn::RemovalHook;
|
global finalize_smtp: Conn::RemovalHook;
|
||||||
|
|
||||||
|
## When seeing a RCPT TO or DATA command, validate that it has been
|
||||||
|
## preceded by a MAIL FROM or RCPT TO command, respectively, else
|
||||||
|
## log a weird and possibly disable the SMTP analyzer upon too
|
||||||
|
## many invalid transactions.
|
||||||
|
option mail_transaction_validation = T;
|
||||||
|
|
||||||
|
## Disable the SMTP analyzer when that many invalid transactions
|
||||||
|
## have been observed in an SMTP session.
|
||||||
|
option max_invalid_mail_transactions = 25;
|
||||||
}
|
}
|
||||||
|
|
||||||
redef record connection += {
|
redef record connection += {
|
||||||
|
@ -151,6 +167,22 @@ function set_smtp_session(c: connection)
|
||||||
c$smtp = new_smtp_log(c);
|
c$smtp = new_smtp_log(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function mail_transaction_invalid(c: connection, addl: string)
|
||||||
|
{
|
||||||
|
Reporter::conn_weird("smtp_mail_transaction_invalid", c, addl, "SMTP");
|
||||||
|
|
||||||
|
++c$smtp_state$invalid_transactions;
|
||||||
|
|
||||||
|
if ( max_invalid_mail_transactions > 0
|
||||||
|
&& c$smtp_state$invalid_transactions > max_invalid_mail_transactions
|
||||||
|
&& c$smtp_state?$analyzer_id )
|
||||||
|
{
|
||||||
|
Reporter::conn_weird("smtp_excessive_invalid_mail_transactions", c, "", "SMTP");
|
||||||
|
if ( disable_analyzer(c$id, c$smtp_state$analyzer_id) )
|
||||||
|
delete c$smtp_state$analyzer_id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function smtp_message(c: connection)
|
function smtp_message(c: connection)
|
||||||
{
|
{
|
||||||
if ( c$smtp$has_client_activity )
|
if ( c$smtp$has_client_activity )
|
||||||
|
@ -160,6 +192,15 @@ function smtp_message(c: connection)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event analyzer_confirmation_info(atype: AllAnalyzers::Tag, info: AnalyzerConfirmationInfo)
|
||||||
|
{
|
||||||
|
if ( atype != Analyzer::ANALYZER_SMTP )
|
||||||
|
return;
|
||||||
|
|
||||||
|
set_smtp_session(info$c);
|
||||||
|
info$c$smtp_state$analyzer_id = info$aid;
|
||||||
|
}
|
||||||
|
|
||||||
event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5
|
event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5
|
||||||
{
|
{
|
||||||
set_smtp_session(c);
|
set_smtp_session(c);
|
||||||
|
@ -184,6 +225,13 @@ event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &
|
||||||
}
|
}
|
||||||
|
|
||||||
c$smtp$has_client_activity = T;
|
c$smtp$has_client_activity = T;
|
||||||
|
c$smtp_state$trans_rcpt_to_seen = T;
|
||||||
|
|
||||||
|
if ( mail_transaction_validation )
|
||||||
|
{
|
||||||
|
if ( ! c$smtp_state$trans_mail_from_seen )
|
||||||
|
mail_transaction_invalid(c, "rcpt to missing mail from");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg )
|
else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg )
|
||||||
|
@ -195,6 +243,23 @@ event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &
|
||||||
if ( mailfrom != "" )
|
if ( mailfrom != "" )
|
||||||
c$smtp$mailfrom = mailfrom;
|
c$smtp$mailfrom = mailfrom;
|
||||||
c$smtp$has_client_activity = T;
|
c$smtp$has_client_activity = T;
|
||||||
|
|
||||||
|
c$smtp_state$trans_mail_from_seen = T;
|
||||||
|
c$smtp_state$trans_rcpt_to_seen = F; # Reset state on MAIL FROM
|
||||||
|
}
|
||||||
|
else if ( upper_command == "DATA" )
|
||||||
|
{
|
||||||
|
if ( mail_transaction_validation )
|
||||||
|
{
|
||||||
|
if ( ! c$smtp_state$trans_rcpt_to_seen ) # mail from checked in rctp to
|
||||||
|
mail_transaction_invalid(c, "data missing rcpt to");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if ( upper_command == "." )
|
||||||
|
{
|
||||||
|
# Reset state when we're seeing a .
|
||||||
|
c$smtp_state$trans_mail_from_seen = F;
|
||||||
|
c$smtp_state$trans_rcpt_to_seen = F;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
89
scripts/policy/frameworks/cluster/experimental.zeek
Normal file
89
scripts/policy/frameworks/cluster/experimental.zeek
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
##! Experimental features of the Cluster framework.
|
||||||
|
|
||||||
|
@load base/frameworks/cluster
|
||||||
|
|
||||||
|
module Cluster::Experimental;
|
||||||
|
|
||||||
|
export {
|
||||||
|
## When using broker-enabled cluster framework, this event will be sent to
|
||||||
|
## the manager and raised locally, once a cluster node has successfully
|
||||||
|
## conducted cluster-level handshakes for all its outgoing connections to
|
||||||
|
## other cluster nodes based on the given cluster layout.
|
||||||
|
##
|
||||||
|
## name: The name of the now fully connected node.
|
||||||
|
##
|
||||||
|
## id: The identifier of the now fully connected node.
|
||||||
|
##
|
||||||
|
## resending: If true, the node has previously signaled that it is fully
|
||||||
|
## connected. This may happen in case the manager restarts.
|
||||||
|
##
|
||||||
|
## .. warning::
|
||||||
|
##
|
||||||
|
## There is no tracking of cluster node connectivity. Thus, there is
|
||||||
|
## no guarantee that all peerings still exist at the time of this event
|
||||||
|
## being raised.
|
||||||
|
global node_fully_connected: event(name: string, id: string, resending: bool);
|
||||||
|
|
||||||
|
## When using broker-enabled cluster framework, this event will be
|
||||||
|
## broadcasted from the manager once all nodes reported that they have set
|
||||||
|
## up all their outgoing connections to other cluster nodes based on the
|
||||||
|
## given cluster layout.
|
||||||
|
##
|
||||||
|
## .. warning::
|
||||||
|
##
|
||||||
|
## There is no tracking of cluster node connectivity. Thus, there is
|
||||||
|
## no guarantee that all peerings still exist at the time of this event
|
||||||
|
## being raised.
|
||||||
|
global cluster_started: event();
|
||||||
|
}
|
||||||
|
|
||||||
|
# Track the names of cluster nodes, the local node sets up connections to.
|
||||||
|
global connectees_pending: set[string];
|
||||||
|
# Track whether the cluster reached the fully connected state.
|
||||||
|
global is_cluster_started = F;
|
||||||
|
|
||||||
|
@if ( Cluster::is_enabled() )
|
||||||
|
|
||||||
|
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||||
|
@load ./nodes-experimental/manager
|
||||||
|
@endif
|
||||||
|
|
||||||
|
event zeek_init() &priority=4
|
||||||
|
{
|
||||||
|
Broker::auto_publish(Cluster::manager_topic, Cluster::Experimental::node_fully_connected);
|
||||||
|
}
|
||||||
|
|
||||||
|
hook Cluster::connect_node_hook(connectee: Cluster::NamedNode)
|
||||||
|
{
|
||||||
|
add connectees_pending[connectee$name];
|
||||||
|
}
|
||||||
|
|
||||||
|
event Cluster::node_up(name: string, id: string) &priority=-10
|
||||||
|
{
|
||||||
|
# Track pending connectees to trigger node_fully_connected, which will be
|
||||||
|
# auto published to the manager once available.
|
||||||
|
local mgr = Cluster::nodes[Cluster::node]?$manager ? Cluster::nodes[Cluster::node]$manager : "";
|
||||||
|
if ( name !in connectees_pending && name != mgr )
|
||||||
|
return;
|
||||||
|
|
||||||
|
# At this point we are either awaiting the started node or see our manager
|
||||||
|
# for the first time. Hence, we can trigger node_fully_connected if no
|
||||||
|
# pending connectee is left.
|
||||||
|
delete connectees_pending[name];
|
||||||
|
if ( |connectees_pending| == 0 )
|
||||||
|
event Cluster::Experimental::node_fully_connected(Cluster::node, Broker::node_id(),
|
||||||
|
is_cluster_started);
|
||||||
|
}
|
||||||
|
|
||||||
|
event Cluster::Experimental::node_fully_connected(name: string, id: string, resending: bool)
|
||||||
|
{
|
||||||
|
if ( ! is_remote_event() )
|
||||||
|
Cluster::log("fully connected");
|
||||||
|
}
|
||||||
|
|
||||||
|
event Cluster::Experimental::cluster_started()
|
||||||
|
{
|
||||||
|
is_cluster_started = T;
|
||||||
|
}
|
||||||
|
|
||||||
|
@endif
|
|
@ -0,0 +1,57 @@
|
||||||
|
##! This script is loaded on the cluster manager to cover manager-related
|
||||||
|
##! parts of experimental features.
|
||||||
|
|
||||||
|
@load base/frameworks/cluster
|
||||||
|
@load policy/frameworks/cluster/experimental
|
||||||
|
|
||||||
|
module Cluster::Experimental;
|
||||||
|
|
||||||
|
global fully_connected_nodes_pending: set[string];
|
||||||
|
|
||||||
|
event zeek_init()
|
||||||
|
{
|
||||||
|
fully_connected_nodes_pending = table_keys(Cluster::nodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
event node_fully_connected(name: string, id: string, resending: bool)
|
||||||
|
{
|
||||||
|
# If a node resends this event, it has already seen the cluster connected.
|
||||||
|
# That is, the manager most likely restarted. Adopt the view of the other
|
||||||
|
# nodes.
|
||||||
|
is_cluster_started = is_cluster_started || resending;
|
||||||
|
|
||||||
|
delete fully_connected_nodes_pending[name];
|
||||||
|
if ( !is_cluster_started && |fully_connected_nodes_pending| == 0 )
|
||||||
|
{
|
||||||
|
event cluster_started();
|
||||||
|
|
||||||
|
for ( topic in Cluster::broadcast_topics )
|
||||||
|
Broker::publish(topic, Cluster::Experimental::cluster_started);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
event cluster_started()
|
||||||
|
{
|
||||||
|
Cluster::log("cluster connected");
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle some special cases for tracking connected nodes:
|
||||||
|
|
||||||
|
event zeek_init() &priority=-15
|
||||||
|
{
|
||||||
|
# Make sure the manager recognizes itself as ready if no
|
||||||
|
# connections have to be initiated.
|
||||||
|
if ( |connectees_pending| == 0 )
|
||||||
|
event node_fully_connected(Cluster::node, Broker::node_id(), F);
|
||||||
|
}
|
||||||
|
|
||||||
|
event Cluster::node_up(name: string, id: string)
|
||||||
|
{
|
||||||
|
# Loggers may not know any manager and would thus be unable to
|
||||||
|
# report successful setup. As they do not establish connections
|
||||||
|
# we can consider this case here.
|
||||||
|
|
||||||
|
local n = Cluster::nodes[name];
|
||||||
|
if ( n$node_type == Cluster::LOGGER && ! n?$manager )
|
||||||
|
event node_fully_connected(name, id, F);
|
||||||
|
}
|
35
scripts/policy/frameworks/notice/community-id.zeek
Normal file
35
scripts/policy/frameworks/notice/community-id.zeek
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# Source this script in addition to protocols/conn/community-id
|
||||||
|
# to add Community ID to notices.
|
||||||
|
|
||||||
|
# Only support loading this if the main script is also loaded.
|
||||||
|
@load base/protocols/conn
|
||||||
|
@load base/frameworks/notice
|
||||||
|
|
||||||
|
@ifdef ( CommunityID::seed )
|
||||||
|
|
||||||
|
module CommunityID::Notice;
|
||||||
|
|
||||||
|
export {
|
||||||
|
# Turn notice support on/off at runtime. When disabled,
|
||||||
|
# this still leaves the `community_id` string in the notice
|
||||||
|
# log, just unset.
|
||||||
|
option enabled: bool = T;
|
||||||
|
|
||||||
|
redef record Notice::Info += {
|
||||||
|
community_id: string &optional &log;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
hook Notice::notice(n: Notice::Info)
|
||||||
|
{
|
||||||
|
if ( CommunityID::Notice::enabled && n?$conn && n$conn?$conn )
|
||||||
|
{
|
||||||
|
local info = n$conn$conn;
|
||||||
|
# This is set during new_connection(), so it should
|
||||||
|
# always be there, but better safe than sorry.
|
||||||
|
if ( info?$community_id )
|
||||||
|
n$community_id = info$community_id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@endif
|
|
@ -1,6 +1,7 @@
|
||||||
##! Log memory/packet/lag statistics.
|
##! Log memory/packet/lag statistics.
|
||||||
|
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
|
@load base/frameworks/telemetry
|
||||||
|
|
||||||
module Stats;
|
module Stats;
|
||||||
|
|
||||||
|
@ -34,6 +35,9 @@ export {
|
||||||
## Lag between the wall clock and packet timestamps if reading
|
## Lag between the wall clock and packet timestamps if reading
|
||||||
## live traffic.
|
## live traffic.
|
||||||
pkt_lag: interval &log &optional;
|
pkt_lag: interval &log &optional;
|
||||||
|
## Number of packets filtered from the link since the last
|
||||||
|
## stats interval if reading live traffic.
|
||||||
|
pkts_filtered: count &log &optional;
|
||||||
|
|
||||||
## Number of events processed since the last stats interval.
|
## Number of events processed since the last stats interval.
|
||||||
events_proc: count &log;
|
events_proc: count &log;
|
||||||
|
@ -84,6 +88,57 @@ export {
|
||||||
global log_stats: event(rec: Info);
|
global log_stats: event(rec: Info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
global bytes_received_cf = Telemetry::register_counter_family([
|
||||||
|
$prefix="zeek",
|
||||||
|
$name="net-received-bytes",
|
||||||
|
$unit="1",
|
||||||
|
$help_text="Total number of bytes received",
|
||||||
|
]);
|
||||||
|
|
||||||
|
global packets_received_cf = Telemetry::register_counter_family([
|
||||||
|
$prefix="zeek",
|
||||||
|
$name="net-received-packets",
|
||||||
|
$unit="1",
|
||||||
|
$help_text="Total number of packets received",
|
||||||
|
]);
|
||||||
|
|
||||||
|
global packets_dropped_cf = Telemetry::register_counter_family([
|
||||||
|
$prefix="zeek",
|
||||||
|
$name="net-dropped-packets",
|
||||||
|
$unit="1",
|
||||||
|
$help_text="Total number of packets dropped",
|
||||||
|
]);
|
||||||
|
|
||||||
|
global link_packets_cf = Telemetry::register_counter_family([
|
||||||
|
$prefix="zeek",
|
||||||
|
$name="net-link-packets",
|
||||||
|
$unit="1",
|
||||||
|
$help_text="Total number of packets on the packet source link before filtering",
|
||||||
|
]);
|
||||||
|
|
||||||
|
global packets_filtered_cf = Telemetry::register_counter_family([
|
||||||
|
$prefix="zeek",
|
||||||
|
$name="net-filtered-packets",
|
||||||
|
$unit="1",
|
||||||
|
$help_text="Total number of packets filtered",
|
||||||
|
]);
|
||||||
|
|
||||||
|
hook Telemetry::sync() {
|
||||||
|
local net_stats = get_net_stats();
|
||||||
|
Telemetry::counter_family_set(bytes_received_cf, vector(), net_stats$bytes_recvd);
|
||||||
|
Telemetry::counter_family_set(packets_received_cf, vector(), net_stats$pkts_recvd);
|
||||||
|
|
||||||
|
if ( reading_live_traffic() )
|
||||||
|
{
|
||||||
|
Telemetry::counter_family_set(packets_dropped_cf, vector(), net_stats$pkts_dropped);
|
||||||
|
Telemetry::counter_family_set(link_packets_cf, vector(), net_stats$pkts_link);
|
||||||
|
|
||||||
|
if ( net_stats?$pkts_filtered )
|
||||||
|
Telemetry::counter_family_set(packets_filtered_cf, vector(), net_stats$pkts_filtered);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
event zeek_init() &priority=5
|
event zeek_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Stats::LOG, [$columns=Info, $ev=log_stats, $path="stats", $policy=log_policy]);
|
Log::create_stream(Stats::LOG, [$columns=Info, $ev=log_stats, $path="stats", $policy=log_policy]);
|
||||||
|
@ -140,6 +195,11 @@ event check_stats(then: time, last_ns: NetStats, last_cs: ConnStats, last_ps: Pr
|
||||||
info$pkt_lag = current_time() - nettime;
|
info$pkt_lag = current_time() - nettime;
|
||||||
info$pkts_dropped = ns$pkts_dropped - last_ns$pkts_dropped;
|
info$pkts_dropped = ns$pkts_dropped - last_ns$pkts_dropped;
|
||||||
info$pkts_link = ns$pkts_link - last_ns$pkts_link;
|
info$pkts_link = ns$pkts_link - last_ns$pkts_link;
|
||||||
|
|
||||||
|
# This makes the assumption that if pkts_filtered is valid, it's been valid in
|
||||||
|
# all of the previous calls.
|
||||||
|
if ( ns?$pkts_filtered )
|
||||||
|
info$pkts_filtered = ns$pkts_filtered - last_ns$pkts_filtered;
|
||||||
}
|
}
|
||||||
|
|
||||||
Log::write(Stats::LOG, info);
|
Log::write(Stats::LOG, info);
|
||||||
|
|
26
scripts/policy/protocols/conn/community-id-logging.zeek
Normal file
26
scripts/policy/protocols/conn/community-id-logging.zeek
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
##! Adds community hash IDs to conn.log.
|
||||||
|
@load base/protocols/conn
|
||||||
|
|
||||||
|
module CommunityID;
|
||||||
|
|
||||||
|
export {
|
||||||
|
# An unsigned 16-bit number to seed our hashing
|
||||||
|
option seed: count = 0;
|
||||||
|
|
||||||
|
# Whether to add a base64 pass over the hash digest.
|
||||||
|
# Enabled by default, since it shortens the output.
|
||||||
|
option do_base64: bool = T;
|
||||||
|
|
||||||
|
# Add the ID string field to the connection log record.
|
||||||
|
redef record Conn::Info += {
|
||||||
|
community_id: string &optional &log;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
module Conn;
|
||||||
|
|
||||||
|
event new_connection(c: connection)
|
||||||
|
{
|
||||||
|
Conn::set_conn(c, F); # likely first to access :-/
|
||||||
|
c$conn$community_id = community_id_v1(c$id, CommunityID::seed, CommunityID::do_base64);
|
||||||
|
}
|
|
@ -10,7 +10,7 @@ redef record SSL::Info += {
|
||||||
requested_client_certificate_authorities: vector of string &optional &log;
|
requested_client_certificate_authorities: vector of string &optional &log;
|
||||||
};
|
};
|
||||||
|
|
||||||
event ssl_certificate_request(c: connection, is_client: bool, certificate_types: index_vec, supported_signature_algorithms: SSL::SignatureAndHashAlgorithm, certificate_authorities: string_vec)
|
event ssl_certificate_request(c: connection, is_client: bool, certificate_types: index_vec, supported_signature_algorithms: signature_and_hashalgorithm_vec, certificate_authorities: string_vec)
|
||||||
{
|
{
|
||||||
if ( is_client )
|
if ( is_client )
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -90,6 +90,9 @@ redef digest_salt = "Please change this value.";
|
||||||
# Extend email alerting to include hostnames
|
# Extend email alerting to include hostnames
|
||||||
@load policy/frameworks/notice/extend-email/hostnames
|
@load policy/frameworks/notice/extend-email/hostnames
|
||||||
|
|
||||||
|
# Extend the notice.log with Community ID hashes
|
||||||
|
# @load policy/frameworks/notice/community-id
|
||||||
|
|
||||||
# Enable logging of telemetry data into telemetry.log and
|
# Enable logging of telemetry data into telemetry.log and
|
||||||
# telemetry_histogram.log.
|
# telemetry_histogram.log.
|
||||||
@load frameworks/telemetry/log
|
@load frameworks/telemetry/log
|
||||||
|
@ -98,6 +101,10 @@ redef digest_salt = "Please change this value.";
|
||||||
# this might impact performance a bit.
|
# this might impact performance a bit.
|
||||||
# @load policy/protocols/ssl/heartbleed
|
# @load policy/protocols/ssl/heartbleed
|
||||||
|
|
||||||
|
# Uncomment the following line to enable logging of Community ID hashes in
|
||||||
|
# the conn.log file.
|
||||||
|
# @load policy/protocols/conn/community-id-logging
|
||||||
|
|
||||||
# Uncomment the following line to enable logging of connection VLANs. Enabling
|
# Uncomment the following line to enable logging of connection VLANs. Enabling
|
||||||
# this adds two VLAN fields to the conn.log file.
|
# this adds two VLAN fields to the conn.log file.
|
||||||
# @load policy/protocols/conn/vlan-logging
|
# @load policy/protocols/conn/vlan-logging
|
||||||
|
|
|
@ -11,6 +11,8 @@
|
||||||
|
|
||||||
# @load frameworks/control/controllee.zeek
|
# @load frameworks/control/controllee.zeek
|
||||||
# @load frameworks/control/controller.zeek
|
# @load frameworks/control/controller.zeek
|
||||||
|
@load frameworks/cluster/experimental.zeek
|
||||||
|
@load frameworks/cluster/nodes-experimental/manager.zeek
|
||||||
@load frameworks/management/agent/__load__.zeek
|
@load frameworks/management/agent/__load__.zeek
|
||||||
@load frameworks/management/agent/api.zeek
|
@load frameworks/management/agent/api.zeek
|
||||||
@load frameworks/management/agent/boot.zeek
|
@load frameworks/management/agent/boot.zeek
|
||||||
|
@ -64,6 +66,7 @@
|
||||||
@load frameworks/files/hash-all-files.zeek
|
@load frameworks/files/hash-all-files.zeek
|
||||||
@load frameworks/notice/__load__.zeek
|
@load frameworks/notice/__load__.zeek
|
||||||
@load frameworks/notice/actions/drop.zeek
|
@load frameworks/notice/actions/drop.zeek
|
||||||
|
@load frameworks/notice/community-id.zeek
|
||||||
@load frameworks/notice/extend-email/hostnames.zeek
|
@load frameworks/notice/extend-email/hostnames.zeek
|
||||||
@load files/x509/disable-certificate-events-known-certs.zeek
|
@load files/x509/disable-certificate-events-known-certs.zeek
|
||||||
@load frameworks/packet-filter/shunt.zeek
|
@load frameworks/packet-filter/shunt.zeek
|
||||||
|
@ -85,6 +88,7 @@
|
||||||
@load misc/weird-stats.zeek
|
@load misc/weird-stats.zeek
|
||||||
@load misc/trim-trace-file.zeek
|
@load misc/trim-trace-file.zeek
|
||||||
@load misc/unknown-protocols.zeek
|
@load misc/unknown-protocols.zeek
|
||||||
|
@load protocols/conn/community-id-logging.zeek
|
||||||
@load protocols/conn/known-hosts.zeek
|
@load protocols/conn/known-hosts.zeek
|
||||||
@load protocols/conn/known-services.zeek
|
@load protocols/conn/known-services.zeek
|
||||||
@load protocols/conn/mac-logging.zeek
|
@load protocols/conn/mac-logging.zeek
|
||||||
|
|
|
@ -20,16 +20,6 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -E create_symlink
|
||||||
".."
|
".."
|
||||||
"${CMAKE_CURRENT_BINARY_DIR}/include/zeek")
|
"${CMAKE_CURRENT_BINARY_DIR}/include/zeek")
|
||||||
|
|
||||||
# This collects generated bif and pac files from subdirectories.
|
|
||||||
set(bro_ALL_GENERATED_OUTPUTS CACHE INTERNAL "automatically generated files" FORCE)
|
|
||||||
|
|
||||||
# This collects bif inputs that we'll load automatically.
|
|
||||||
set(bro_AUTO_BIFS CACHE INTERNAL "BIFs for automatic inclusion" FORCE)
|
|
||||||
set(bro_REGISTER_BIFS CACHE INTERNAL "BIFs for automatic registering" FORCE)
|
|
||||||
|
|
||||||
set(bro_BASE_BIF_SCRIPTS CACHE INTERNAL "Zeek script stubs for BIFs in base distribution of Zeek" FORCE)
|
|
||||||
set(bro_PLUGIN_BIF_SCRIPTS CACHE INTERNAL "Zeek script stubs for BIFs in Zeek plugins" FORCE)
|
|
||||||
|
|
||||||
# Poor man's JSON escaping as this is rendered into a C string.
|
# Poor man's JSON escaping as this is rendered into a C string.
|
||||||
string(REPLACE "\"" "\\\"" ZEEK_BUILD_INFO_ESCAPED "${ZEEK_BUILD_INFO}")
|
string(REPLACE "\"" "\\\"" ZEEK_BUILD_INFO_ESCAPED "${ZEEK_BUILD_INFO}")
|
||||||
string(REPLACE "\n" "\\n" ZEEK_BUILD_INFO_ESCAPED "${ZEEK_BUILD_INFO_ESCAPED}")
|
string(REPLACE "\n" "\\n" ZEEK_BUILD_INFO_ESCAPED "${ZEEK_BUILD_INFO_ESCAPED}")
|
||||||
|
@ -66,16 +56,6 @@ else()
|
||||||
set(SIGN_COMPARE_FLAG "-Wno-sign-compare")
|
set(SIGN_COMPARE_FLAG "-Wno-sign-compare")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# BIF parser/scanner
|
|
||||||
bison_target(BIFParser builtin-func.y
|
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/bif_parse.cc
|
|
||||||
HEADER ${CMAKE_CURRENT_BINARY_DIR}/bif_parse.h
|
|
||||||
#VERBOSE ${CMAKE_CURRENT_BINARY_DIR}/bif_parse.output
|
|
||||||
COMPILE_FLAGS "${BISON_FLAGS}")
|
|
||||||
flex_target(BIFScanner builtin-func.l ${CMAKE_CURRENT_BINARY_DIR}/bif_lex.cc)
|
|
||||||
add_flex_bison_dependency(BIFScanner BIFParser)
|
|
||||||
set_property(SOURCE bif_lex.cc APPEND_STRING PROPERTY COMPILE_FLAGS "${SIGN_COMPARE_FLAG}")
|
|
||||||
|
|
||||||
# Rule parser/scanner
|
# Rule parser/scanner
|
||||||
bison_target(RuleParser rule-parse.y
|
bison_target(RuleParser rule-parse.y
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/rup.cc
|
${CMAKE_CURRENT_BINARY_DIR}/rup.cc
|
||||||
|
@ -119,6 +99,21 @@ flex_target(Scanner scan.l ${CMAKE_CURRENT_BINARY_DIR}/scan.cc
|
||||||
COMPILE_FLAGS "-Pzeek")
|
COMPILE_FLAGS "-Pzeek")
|
||||||
set_property(SOURCE scan.cc APPEND_STRING PROPERTY COMPILE_FLAGS "${SIGN_COMPARE_FLAG}")
|
set_property(SOURCE scan.cc APPEND_STRING PROPERTY COMPILE_FLAGS "${SIGN_COMPARE_FLAG}")
|
||||||
|
|
||||||
|
# Add a dependency for the generated files to zeek_autogen_files.
|
||||||
|
add_custom_target(
|
||||||
|
zeek_bison_outputs
|
||||||
|
DEPENDS
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/parse.cc
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/re-parse.cc
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/re-parse.h
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/re-scan.cc
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/rule-parse.cc
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/rule-parse.h
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/rule-scan.cc
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/scan.cc
|
||||||
|
)
|
||||||
|
add_dependencies(zeek_autogen_files zeek_bison_outputs)
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
## bifcl-dependent targets
|
## bifcl-dependent targets
|
||||||
|
|
||||||
|
@ -128,6 +123,7 @@ set(SUPERVISOR_SRCS supervisor/Supervisor.cc Pipe.cc)
|
||||||
|
|
||||||
set(BIF_SRCS
|
set(BIF_SRCS
|
||||||
zeek.bif
|
zeek.bif
|
||||||
|
communityid.bif
|
||||||
stats.bif
|
stats.bif
|
||||||
event.bif
|
event.bif
|
||||||
const.bif
|
const.bif
|
||||||
|
@ -164,6 +160,8 @@ set(BINPAC_AUXSRC
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/binpac_zeek.h
|
${CMAKE_CURRENT_SOURCE_DIR}/binpac_zeek.h
|
||||||
)
|
)
|
||||||
|
|
||||||
|
set(BINPAC_OUTPUTS "")
|
||||||
|
|
||||||
binpac_target(binpac-lib.pac)
|
binpac_target(binpac-lib.pac)
|
||||||
list(APPEND BINPAC_OUTPUTS "${BINPAC_OUTPUT_CC}")
|
list(APPEND BINPAC_OUTPUTS "${BINPAC_OUTPUT_CC}")
|
||||||
|
|
||||||
|
@ -185,12 +183,6 @@ gen_zam_target(${GEN_ZAM_SRC})
|
||||||
|
|
||||||
option(USE_SQLITE "Should Zeek use SQLite?" ON)
|
option(USE_SQLITE "Should Zeek use SQLite?" ON)
|
||||||
|
|
||||||
set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE)
|
|
||||||
set(bro_SUBDIR_DEPS CACHE INTERNAL "subdir dependencies" FORCE)
|
|
||||||
set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE)
|
|
||||||
set(bro_PLUGIN_DEPS CACHE INTERNAL "plugin dependencies" FORCE)
|
|
||||||
set(bro_PLUGIN_LINK_LIBS CACHE INTERNAL "plugin link libraries" FORCE)
|
|
||||||
|
|
||||||
add_subdirectory(analyzer)
|
add_subdirectory(analyzer)
|
||||||
add_subdirectory(packet_analysis)
|
add_subdirectory(packet_analysis)
|
||||||
add_subdirectory(broker)
|
add_subdirectory(broker)
|
||||||
|
@ -276,6 +268,14 @@ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/DebugCmdConstants.h
|
||||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
add_custom_target(
|
||||||
|
zeek_debugcmd_gen
|
||||||
|
DEPENDS
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/DebugCmdConstants.h
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/DebugCmdInfoConstants.cc
|
||||||
|
)
|
||||||
|
add_dependencies(zeek_autogen_files zeek_debugcmd_gen)
|
||||||
|
|
||||||
set(_gen_zeek_script_cpp ${CMAKE_CURRENT_BINARY_DIR}/../CPP-gen.cc)
|
set(_gen_zeek_script_cpp ${CMAKE_CURRENT_BINARY_DIR}/../CPP-gen.cc)
|
||||||
add_custom_command(OUTPUT ${_gen_zeek_script_cpp}
|
add_custom_command(OUTPUT ${_gen_zeek_script_cpp}
|
||||||
COMMAND ${CMAKE_COMMAND} -E touch ${_gen_zeek_script_cpp})
|
COMMAND ${CMAKE_COMMAND} -E touch ${_gen_zeek_script_cpp})
|
||||||
|
@ -453,16 +453,18 @@ set(THIRD_PARTY_SRCS
|
||||||
)
|
)
|
||||||
|
|
||||||
# Highwayhash. Highwayhash is a bit special since it has architecture dependent code...
|
# Highwayhash. Highwayhash is a bit special since it has architecture dependent code...
|
||||||
|
set(hhash_dir ${PROJECT_SOURCE_DIR}/auxil/highwayhash/highwayhash)
|
||||||
set(HH_SRCS
|
zeek_add_subdir_library(
|
||||||
../auxil/highwayhash/highwayhash/sip_hash.cc
|
hhash
|
||||||
../auxil/highwayhash/highwayhash/sip_tree_hash.cc
|
SOURCES
|
||||||
../auxil/highwayhash/highwayhash/scalar_sip_tree_hash.cc
|
${hhash_dir}/sip_hash.cc
|
||||||
../auxil/highwayhash/highwayhash/arch_specific.cc
|
${hhash_dir}/sip_tree_hash.cc
|
||||||
../auxil/highwayhash/highwayhash/instruction_sets.cc
|
${hhash_dir}/scalar_sip_tree_hash.cc
|
||||||
../auxil/highwayhash/highwayhash/nanobenchmark.cc
|
${hhash_dir}/arch_specific.cc
|
||||||
../auxil/highwayhash/highwayhash/os_specific.cc
|
${hhash_dir}/instruction_sets.cc
|
||||||
../auxil/highwayhash/highwayhash/hh_portable.cc
|
${hhash_dir}/nanobenchmark.cc
|
||||||
|
${hhash_dir}/os_specific.cc
|
||||||
|
${hhash_dir}/hh_portable.cc
|
||||||
)
|
)
|
||||||
|
|
||||||
if (${COMPILER_ARCHITECTURE} STREQUAL "arm")
|
if (${COMPILER_ARCHITECTURE} STREQUAL "arm")
|
||||||
|
@ -475,22 +477,28 @@ if (${COMPILER_ARCHITECTURE} STREQUAL "arm")
|
||||||
" test_arm_neon)
|
" test_arm_neon)
|
||||||
|
|
||||||
if (test_arm_neon)
|
if (test_arm_neon)
|
||||||
list(APPEND HH_SRCS ../auxil/highwayhash/highwayhash/hh_neon.cc)
|
target_sources(zeek_hhash_obj PRIVATE ${hhash_dir}/hh_neon.cc)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set_source_files_properties(${HH_SRCS} PROPERTIES COMPILE_FLAGS
|
target_compile_options(
|
||||||
-mfloat-abi=hard -march=armv7-a -mfpu=neon)
|
zeek_hhash_obj
|
||||||
|
PRIVATE
|
||||||
|
-mfloat-abi=hard
|
||||||
|
-march=armv7-a
|
||||||
|
-mfpu=neon
|
||||||
|
)
|
||||||
elseif (${COMPILER_ARCHITECTURE} STREQUAL "aarch64")
|
elseif (${COMPILER_ARCHITECTURE} STREQUAL "aarch64")
|
||||||
list(APPEND HH_SRCS
|
target_sources(zeek_hhash_obj PRIVATE ${hhash_dir}/hh_neon.cc)
|
||||||
../auxil/highwayhash/highwayhash/hh_neon.cc
|
|
||||||
)
|
|
||||||
elseif (${COMPILER_ARCHITECTURE} STREQUAL "power")
|
elseif (${COMPILER_ARCHITECTURE} STREQUAL "power")
|
||||||
set_source_files_properties(../auxil/highwayhash/highwayhash/hh_vsx.cc PROPERTIES COMPILE_FLAGS
|
target_sources(zeek_hhash_obj PRIVATE ${hhash_dir}/hh_vsx.cc)
|
||||||
|
set_source_files_properties(${hhash_dir}/hh_vsx.cc PROPERTIES COMPILE_FLAGS
|
||||||
-mvsx)
|
-mvsx)
|
||||||
list(APPEND HH_SRCS
|
|
||||||
../auxil/highwayhash/highwayhash/hh_vsx.cc
|
|
||||||
)
|
|
||||||
elseif(${COMPILER_ARCHITECTURE} STREQUAL "x86_64")
|
elseif(${COMPILER_ARCHITECTURE} STREQUAL "x86_64")
|
||||||
|
target_sources(
|
||||||
|
zeek_hhash_obj
|
||||||
|
PRIVATE
|
||||||
|
${hhash_dir}/hh_avx2.cc
|
||||||
|
${hhash_dir}/hh_sse41.cc)
|
||||||
if (MSVC)
|
if (MSVC)
|
||||||
set(_avx_flag /arch:AVX2)
|
set(_avx_flag /arch:AVX2)
|
||||||
# Using an undocumentd compiler flag: https://stackoverflow.com/questions/64053597/how-do-i-enable-sse4-1-and-sse3-but-not-avx-in-msvc/69328426#69328426
|
# Using an undocumentd compiler flag: https://stackoverflow.com/questions/64053597/how-do-i-enable-sse4-1-and-sse3-but-not-avx-in-msvc/69328426#69328426
|
||||||
|
@ -500,15 +508,10 @@ elseif(${COMPILER_ARCHITECTURE} STREQUAL "x86_64")
|
||||||
set(_sse_flag -msse4.1)
|
set(_sse_flag -msse4.1)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set_source_files_properties(../auxil/highwayhash/highwayhash/hh_avx2.cc PROPERTIES COMPILE_FLAGS
|
set_source_files_properties(${hhash_dir}/hh_avx2.cc PROPERTIES COMPILE_FLAGS
|
||||||
${_avx_flag})
|
${_avx_flag})
|
||||||
set_source_files_properties(../auxil/highwayhash/highwayhash/hh_sse41.cc PROPERTIES COMPILE_FLAGS
|
set_source_files_properties(${hhash_dir}/hh_sse41.cc PROPERTIES COMPILE_FLAGS
|
||||||
${_sse_flag})
|
${_sse_flag})
|
||||||
|
|
||||||
list(APPEND HH_SRCS
|
|
||||||
../auxil/highwayhash/highwayhash/hh_avx2.cc
|
|
||||||
../auxil/highwayhash/highwayhash/hh_sse41.cc
|
|
||||||
)
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(zeek_SRCS
|
set(zeek_SRCS
|
||||||
|
@ -538,92 +541,59 @@ set(zeek_SRCS
|
||||||
collect_headers(zeek_HEADERS ${zeek_SRCS})
|
collect_headers(zeek_HEADERS ${zeek_SRCS})
|
||||||
|
|
||||||
add_library(zeek_objs OBJECT ${zeek_SRCS})
|
add_library(zeek_objs OBJECT ${zeek_SRCS})
|
||||||
|
target_link_libraries(zeek_objs PRIVATE $<BUILD_INTERFACE:zeek_internal>)
|
||||||
|
add_dependencies(zeek_objs zeek_autogen_files)
|
||||||
|
add_clang_tidy_files(${zeek_SRCS})
|
||||||
|
zeek_target_link_libraries(zeek_objs)
|
||||||
|
|
||||||
if (ZEEK_STANDALONE)
|
if (TARGET zeek_exe)
|
||||||
add_executable(zeek main.cc
|
target_sources(zeek_exe PRIVATE main.cc ${zeek_HEADERS})
|
||||||
$<TARGET_OBJECTS:zeek_objs>
|
|
||||||
${zeek_HEADERS}
|
|
||||||
${bro_SUBDIR_LIBS}
|
|
||||||
${bro_PLUGIN_LIBS}
|
|
||||||
)
|
|
||||||
|
|
||||||
# npcap/winpcap need to be loaded in delayed mode so that we can set the load path
|
# npcap/winpcap need to be loaded in delayed mode so that we can set the load path
|
||||||
# correctly at runtime. See https://npcap.com/guide/npcap-devguide.html#npcap-feature-native
|
# correctly at runtime. See https://npcap.com/guide/npcap-devguide.html#npcap-feature-native
|
||||||
# for why this is necessary.
|
# for why this is necessary.
|
||||||
if ( MSVC AND HAVE_WPCAP )
|
if ( MSVC AND HAVE_WPCAP )
|
||||||
set(zeekdeps ${zeekdeps} delayimp.lib)
|
set(zeekdeps ${zeekdeps} delayimp.lib)
|
||||||
set_target_properties(zeek PROPERTIES LINK_FLAGS "/DELAYLOAD:wpcap.dll")
|
set_target_properties(zeek_exe PROPERTIES LINK_FLAGS "/DELAYLOAD:wpcap.dll")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_link_libraries(zeek ${bro_PLUGIN_LINK_LIBS} ${zeekdeps} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS})
|
target_link_libraries(zeek_exe PRIVATE ${zeekdeps} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS})
|
||||||
|
|
||||||
# Export symbols from zeek executable for use by plugins
|
# Export symbols from zeek executable for use by plugins
|
||||||
set_target_properties(zeek PROPERTIES ENABLE_EXPORTS TRUE)
|
set_target_properties(zeek_exe PROPERTIES ENABLE_EXPORTS TRUE)
|
||||||
|
|
||||||
if ( MSVC )
|
if ( MSVC )
|
||||||
set(WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
set(WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
install(TARGETS zeek RUNTIME DESTINATION bin)
|
|
||||||
|
|
||||||
set(BRO_EXE zeek
|
|
||||||
CACHE STRING "Zeek executable binary" FORCE)
|
|
||||||
|
|
||||||
set(BRO_EXE_PATH ${CMAKE_CURRENT_BINARY_DIR}/zeek
|
|
||||||
CACHE STRING "Path to Zeek executable binary" FORCE)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (NOT ZEEK_STANDALONE OR CONAN_EXPORTED)
|
if (TARGET zeek_lib)
|
||||||
add_library(libzeek STATIC $<TARGET_OBJECTS:zeek_objs>
|
target_sources(zeek_lib PRIVATE ${zeek_HEADERS})
|
||||||
${zeek_HEADERS}
|
|
||||||
${bro_SUBDIR_LIBS}
|
|
||||||
${bro_PLUGIN_LIBS})
|
|
||||||
|
|
||||||
target_link_libraries(libzeek PUBLIC ${zeekdeps}
|
target_link_libraries(
|
||||||
|
zeek_lib
|
||||||
|
PUBLIC
|
||||||
|
${zeekdeps}
|
||||||
${CMAKE_THREAD_LIBS_INIT}
|
${CMAKE_THREAD_LIBS_INIT}
|
||||||
${CMAKE_DL_LIBS}
|
${CMAKE_DL_LIBS})
|
||||||
${bro_SUBDIR_LIBS}
|
endif()
|
||||||
${bro_PLUGIN_LIBS})
|
|
||||||
|
|
||||||
target_include_directories(libzeek PUBLIC
|
zeek_include_directories(
|
||||||
${CMAKE_SOURCE_DIR}/zeek/src
|
|
||||||
${CMAKE_SOURCE_DIR}/zeek/src/include
|
|
||||||
${CMAKE_BINARY_DIR}
|
${CMAKE_BINARY_DIR}
|
||||||
${CMAKE_BINARY_DIR}/zeek/src
|
${CMAKE_BINARY_DIR}/zeek/src
|
||||||
${CMAKE_BINARY_DIR}/zeek/src/include)
|
${CMAKE_BINARY_DIR}/zeek/src/include
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}
|
||||||
install(TARGETS libzeek LIBRARY DESTINATION lib)
|
${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||||
endif()
|
${CMAKE_SOURCE_DIR}/zeek/src
|
||||||
|
${CMAKE_SOURCE_DIR}/zeek/src/include
|
||||||
# Target to create all the autogenerated files.
|
)
|
||||||
add_custom_target(generate_outputs_stage1)
|
|
||||||
add_dependencies(generate_outputs_stage1 ${bro_ALL_GENERATED_OUTPUTS})
|
|
||||||
|
|
||||||
# Target to create the joint includes files that pull in the bif code.
|
|
||||||
bro_bif_create_includes(generate_outputs_stage2a ${CMAKE_CURRENT_BINARY_DIR} "${bro_AUTO_BIFS}")
|
|
||||||
bro_bif_create_register(generate_outputs_stage2b ${CMAKE_CURRENT_BINARY_DIR} "${bro_REGISTER_BIFS}")
|
|
||||||
add_dependencies(generate_outputs_stage2a generate_outputs_stage1)
|
|
||||||
add_dependencies(generate_outputs_stage2b generate_outputs_stage1)
|
|
||||||
|
|
||||||
# Global target to trigger creation of autogenerated code.
|
|
||||||
add_custom_target(generate_outputs)
|
|
||||||
add_dependencies(generate_outputs generate_outputs_stage2a generate_outputs_stage2b)
|
|
||||||
|
|
||||||
# Build __load__.zeek files for standard *.bif.zeek.
|
|
||||||
bro_bif_create_loader(bif_loader "${bro_BASE_BIF_SCRIPTS}")
|
|
||||||
add_dependencies(bif_loader ${bro_PLUGIN_DEPS} ${bro_SUBDIR_DEPS})
|
|
||||||
add_dependencies(zeek_objs bif_loader)
|
|
||||||
|
|
||||||
# Build __load__.zeek files for plugins/*.bif.zeek.
|
|
||||||
bro_bif_create_loader(bif_loader_plugins "${bro_PLUGIN_BIF_SCRIPTS}")
|
|
||||||
add_dependencies(bif_loader_plugins ${bro_PLUGIN_DEPS} ${bro_SUBDIR_DEPS})
|
|
||||||
add_dependencies(zeek_objs bif_loader_plugins)
|
|
||||||
|
|
||||||
# Install *.bif.zeek.
|
# Install *.bif.zeek.
|
||||||
install(DIRECTORY ${PROJECT_BINARY_DIR}/scripts/base/bif DESTINATION ${ZEEK_SCRIPT_INSTALL_PATH}/base)
|
install(DIRECTORY ${PROJECT_BINARY_DIR}/scripts/base/bif DESTINATION ${ZEEK_SCRIPT_INSTALL_PATH}/base)
|
||||||
|
|
||||||
# Create plugin directory at install time.
|
# Create plugin directory at install time.
|
||||||
install(DIRECTORY DESTINATION ${BRO_PLUGIN_INSTALL_PATH})
|
install(DIRECTORY DESTINATION ${ZEEK_PLUGIN_DIR})
|
||||||
|
|
||||||
# Make clean removes the bif directory.
|
# Make clean removes the bif directory.
|
||||||
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES ${PROJECT_BINARY_DIR}/scripts/base/bif)
|
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES ${PROJECT_BINARY_DIR}/scripts/base/bif)
|
||||||
|
|
|
@ -46,13 +46,7 @@ public:
|
||||||
bool Failed() const { return failed; }
|
bool Failed() const { return failed; }
|
||||||
bool Valid() const { return ! failed; }
|
bool Valid() const { return ! failed; }
|
||||||
|
|
||||||
bool Expired() const
|
bool Expired() const { return util::current_time() > (creation_time + req_ttl); }
|
||||||
{
|
|
||||||
if ( ! req_host.empty() && addrs.empty() )
|
|
||||||
return false; // nothing to expire
|
|
||||||
|
|
||||||
return util::current_time() > (creation_time + req_ttl);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Merge(const DNS_MappingPtr& other);
|
void Merge(const DNS_MappingPtr& other);
|
||||||
|
|
||||||
|
|
11
src/Desc.cc
11
src/Desc.cc
|
@ -432,4 +432,15 @@ std::string obj_desc(const Obj* o)
|
||||||
return std::string(d.Description());
|
return std::string(d.Description());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string obj_desc_short(const Obj* o)
|
||||||
|
{
|
||||||
|
static ODesc d;
|
||||||
|
|
||||||
|
d.SetShort(true);
|
||||||
|
d.Clear();
|
||||||
|
o->Describe(&d);
|
||||||
|
|
||||||
|
return std::string(d.Description());
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace zeek
|
} // namespace zeek
|
||||||
|
|
|
@ -230,6 +230,9 @@ protected:
|
||||||
// IntrusivePtr because the latter is harder to deal with when making
|
// IntrusivePtr because the latter is harder to deal with when making
|
||||||
// calls from a debugger like lldb, which is the main use of this function.
|
// calls from a debugger like lldb, which is the main use of this function.
|
||||||
class Obj;
|
class Obj;
|
||||||
extern std::string obj_desc(const Obj* o);
|
std::string obj_desc(const Obj* o);
|
||||||
|
|
||||||
|
// Same as obj_desc(), but ensure it is short and don't include location info.
|
||||||
|
std::string obj_desc_short(const Obj* o);
|
||||||
|
|
||||||
} // namespace zeek
|
} // namespace zeek
|
||||||
|
|
12
src/Event.cc
12
src/Event.cc
|
@ -189,22 +189,12 @@ void EventMgr::Describe(ODesc* d) const
|
||||||
|
|
||||||
void EventMgr::Process()
|
void EventMgr::Process()
|
||||||
{
|
{
|
||||||
// If we don't have a source, or the source is closed, or we're
|
|
||||||
// reading live (which includes pseudo-realtime), advance the time
|
|
||||||
// here to the current time since otherwise it won't move forward.
|
|
||||||
iosource::PktSrc* pkt_src = iosource_mgr->GetPktSrc();
|
|
||||||
if ( ! pkt_src || ! pkt_src->IsOpen() || run_state::reading_live )
|
|
||||||
run_state::detail::update_network_time(util::current_time());
|
|
||||||
|
|
||||||
queue_flare.Extinguish();
|
queue_flare.Extinguish();
|
||||||
|
|
||||||
// While it semes like the most logical thing to do, we dont want
|
// While it semes like the most logical thing to do, we dont want
|
||||||
// to call Drain() as part of this method. It will get called at
|
// to call Drain() as part of this method. It will get called at
|
||||||
// the end of net_run after all of the sources have been processed
|
// the end of net_run after all of the sources have been processed
|
||||||
// and had the opportunity to spawn new events. We could use
|
// and had the opportunity to spawn new events.
|
||||||
// zeek::iosource_mgr->Wakeup() instead of making EventMgr an IOSource,
|
|
||||||
// but then we couldn't update the time above and nothing would
|
|
||||||
// drive it forward.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void EventMgr::InitPostScript()
|
void EventMgr::InitPostScript()
|
||||||
|
|
283
src/Expr.cc
283
src/Expr.cc
|
@ -1287,25 +1287,6 @@ void BinaryExpr::PromoteForInterval(ExprPtr& op)
|
||||||
op = make_intrusive<ArithCoerceExpr>(op, TYPE_DOUBLE);
|
op = make_intrusive<ArithCoerceExpr>(op, TYPE_DOUBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BinaryExpr::IsScalarAggregateOp() const
|
|
||||||
{
|
|
||||||
const bool is_vec1 = IsAggr(op1->GetType()->Tag()) || is_list(op1);
|
|
||||||
const bool is_vec2 = IsAggr(op2->GetType()->Tag()) || is_list(op2);
|
|
||||||
const bool either_vec = is_vec1 || is_vec2;
|
|
||||||
const bool both_vec = is_vec1 && is_vec2;
|
|
||||||
|
|
||||||
return either_vec && ! both_vec;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BinaryExpr::CheckScalarAggOp() const
|
|
||||||
{
|
|
||||||
if ( ! IsError() && IsScalarAggregateOp() )
|
|
||||||
{
|
|
||||||
reporter->Warning("mixing vector and scalar operands is deprecated (%s) (%s)",
|
|
||||||
type_name(op1->GetType()->Tag()), type_name(op2->GetType()->Tag()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool BinaryExpr::CheckForRHSList()
|
bool BinaryExpr::CheckForRHSList()
|
||||||
{
|
{
|
||||||
if ( op2->Tag() != EXPR_LIST )
|
if ( op2->Tag() != EXPR_LIST )
|
||||||
|
@ -1420,25 +1401,11 @@ IncrExpr::IncrExpr(ExprTag arg_tag, ExprPtr arg_op) : UnaryExpr(arg_tag, arg_op-
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto& t = op->GetType();
|
const auto& t = op->GetType();
|
||||||
|
|
||||||
if ( IsVector(t->Tag()) )
|
|
||||||
{
|
|
||||||
if ( ! IsIntegral(t->AsVectorType()->Yield()->Tag()) )
|
|
||||||
ExprError("vector elements must be integral for increment operator");
|
|
||||||
else
|
|
||||||
{
|
|
||||||
reporter->Warning("increment/decrement operations for vectors deprecated");
|
|
||||||
SetType(t);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if ( ! IsIntegral(t->Tag()) )
|
if ( ! IsIntegral(t->Tag()) )
|
||||||
ExprError("requires an integral operand");
|
ExprError("requires an integral operand");
|
||||||
else
|
else
|
||||||
SetType(t);
|
SetType(t);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
ValPtr IncrExpr::DoSingleEval(Frame* f, Val* v) const
|
ValPtr IncrExpr::DoSingleEval(Frame* f, Val* v) const
|
||||||
{
|
{
|
||||||
|
@ -1469,27 +1436,10 @@ ValPtr IncrExpr::Eval(Frame* f) const
|
||||||
if ( ! v )
|
if ( ! v )
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
if ( is_vector(v) )
|
|
||||||
{
|
|
||||||
VectorValPtr v_vec{NewRef{}, v->AsVectorVal()};
|
|
||||||
|
|
||||||
for ( unsigned int i = 0; i < v_vec->Size(); ++i )
|
|
||||||
{
|
|
||||||
auto elt = v_vec->ValAt(i);
|
|
||||||
if ( elt )
|
|
||||||
v_vec->Assign(i, DoSingleEval(f, elt.get()));
|
|
||||||
}
|
|
||||||
|
|
||||||
op->Assign(f, std::move(v_vec));
|
|
||||||
return v;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto new_v = DoSingleEval(f, v.get());
|
auto new_v = DoSingleEval(f, v.get());
|
||||||
op->Assign(f, new_v);
|
op->Assign(f, new_v);
|
||||||
return new_v;
|
return new_v;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
ComplementExpr::ComplementExpr(ExprPtr arg_op) : UnaryExpr(EXPR_COMPLEMENT, std::move(arg_op))
|
ComplementExpr::ComplementExpr(ExprPtr arg_op) : UnaryExpr(EXPR_COMPLEMENT, std::move(arg_op))
|
||||||
{
|
{
|
||||||
|
@ -1627,21 +1577,40 @@ ValPtr SizeExpr::Fold(Val* v) const
|
||||||
return v->SizeVal();
|
return v->SizeVal();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fill op1 and op2 type tags into bt1 and bt2.
|
||||||
|
//
|
||||||
|
// If both operands are vectors, use their yield type tag. If
|
||||||
|
// either, but not both operands, is a vector, cause an expression
|
||||||
|
// error and return false.
|
||||||
|
static bool get_types_from_scalars_or_vectors(Expr* e, TypeTag& bt1, TypeTag& bt2)
|
||||||
|
{
|
||||||
|
bt1 = e->GetOp1()->GetType()->Tag();
|
||||||
|
bt2 = e->GetOp2()->GetType()->Tag();
|
||||||
|
|
||||||
|
if ( IsVector(bt1) && IsVector(bt2) )
|
||||||
|
{
|
||||||
|
bt1 = e->GetOp1()->GetType()->AsVectorType()->Yield()->Tag();
|
||||||
|
bt2 = e->GetOp2()->GetType()->AsVectorType()->Yield()->Tag();
|
||||||
|
}
|
||||||
|
else if ( IsVector(bt1) || IsVector(bt2) )
|
||||||
|
{
|
||||||
|
e->Error("cannot mix vector and scalar operands");
|
||||||
|
e->SetError();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
AddExpr::AddExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
AddExpr::AddExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
: BinaryExpr(EXPR_ADD, std::move(arg_op1), std::move(arg_op2))
|
: BinaryExpr(EXPR_ADD, std::move(arg_op1), std::move(arg_op2))
|
||||||
{
|
{
|
||||||
if ( IsError() )
|
if ( IsError() )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
TypeTag bt1 = op1->GetType()->Tag();
|
TypeTag bt1, bt2;
|
||||||
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
if ( IsVector(bt1) )
|
return;
|
||||||
bt1 = op1->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
TypeTag bt2 = op2->GetType()->Tag();
|
|
||||||
|
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = op2->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
TypePtr base_result_type;
|
TypePtr base_result_type;
|
||||||
|
|
||||||
|
@ -1656,11 +1625,9 @@ AddExpr::AddExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
else
|
else
|
||||||
ExprError("requires arithmetic operands");
|
ExprError("requires arithmetic operands");
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
|
|
||||||
if ( base_result_type )
|
if ( base_result_type )
|
||||||
{
|
{
|
||||||
if ( is_vector(op1) || is_vector(op2) )
|
if ( is_vector(op1) )
|
||||||
SetType(make_intrusive<VectorType>(std::move(base_result_type)));
|
SetType(make_intrusive<VectorType>(std::move(base_result_type)));
|
||||||
else
|
else
|
||||||
SetType(std::move(base_result_type));
|
SetType(std::move(base_result_type));
|
||||||
|
@ -1797,13 +1764,9 @@ SubExpr::SubExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
const auto& t1 = op1->GetType();
|
const auto& t1 = op1->GetType();
|
||||||
const auto& t2 = op2->GetType();
|
const auto& t2 = op2->GetType();
|
||||||
|
|
||||||
TypeTag bt1 = t1->Tag();
|
TypeTag bt1, bt2;
|
||||||
if ( IsVector(bt1) )
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
bt1 = t1->AsVectorType()->Yield()->Tag();
|
return;
|
||||||
|
|
||||||
TypeTag bt2 = t2->Tag();
|
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = t2->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
TypePtr base_result_type;
|
TypePtr base_result_type;
|
||||||
|
|
||||||
|
@ -1827,11 +1790,9 @@ SubExpr::SubExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
else
|
else
|
||||||
ExprError("requires arithmetic operands");
|
ExprError("requires arithmetic operands");
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
|
|
||||||
if ( base_result_type )
|
if ( base_result_type )
|
||||||
{
|
{
|
||||||
if ( is_vector(op1) || is_vector(op2) )
|
if ( is_vector(op1) )
|
||||||
SetType(make_intrusive<VectorType>(std::move(base_result_type)));
|
SetType(make_intrusive<VectorType>(std::move(base_result_type)));
|
||||||
else
|
else
|
||||||
SetType(std::move(base_result_type));
|
SetType(std::move(base_result_type));
|
||||||
|
@ -1901,15 +1862,9 @@ TimesExpr::TimesExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
|
|
||||||
Canonicalize();
|
Canonicalize();
|
||||||
|
|
||||||
TypeTag bt1 = op1->GetType()->Tag();
|
TypeTag bt1, bt2;
|
||||||
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
if ( IsVector(bt1) )
|
return;
|
||||||
bt1 = op1->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
TypeTag bt2 = op2->GetType()->Tag();
|
|
||||||
|
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = op2->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
if ( bt1 == TYPE_INTERVAL || bt2 == TYPE_INTERVAL )
|
if ( bt1 == TYPE_INTERVAL || bt2 == TYPE_INTERVAL )
|
||||||
{
|
{
|
||||||
|
@ -1922,8 +1877,6 @@ TimesExpr::TimesExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
PromoteType(max_type(bt1, bt2), is_vector(op1) || is_vector(op2));
|
PromoteType(max_type(bt1, bt2), is_vector(op1) || is_vector(op2));
|
||||||
else
|
else
|
||||||
ExprError("requires arithmetic operands");
|
ExprError("requires arithmetic operands");
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void TimesExpr::Canonicalize()
|
void TimesExpr::Canonicalize()
|
||||||
|
@ -1939,15 +1892,9 @@ DivideExpr::DivideExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
if ( IsError() )
|
if ( IsError() )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
TypeTag bt1 = op1->GetType()->Tag();
|
TypeTag bt1, bt2;
|
||||||
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
if ( IsVector(bt1) )
|
return;
|
||||||
bt1 = op1->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
TypeTag bt2 = op2->GetType()->Tag();
|
|
||||||
|
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = op2->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
if ( bt1 == TYPE_INTERVAL || bt2 == TYPE_INTERVAL )
|
if ( bt1 == TYPE_INTERVAL || bt2 == TYPE_INTERVAL )
|
||||||
{
|
{
|
||||||
|
@ -1955,7 +1902,7 @@ DivideExpr::DivideExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
PromoteForInterval(IsArithmetic(bt1) ? op1 : op2);
|
PromoteForInterval(IsArithmetic(bt1) ? op1 : op2);
|
||||||
else if ( bt1 == TYPE_INTERVAL && bt2 == TYPE_INTERVAL )
|
else if ( bt1 == TYPE_INTERVAL && bt2 == TYPE_INTERVAL )
|
||||||
{
|
{
|
||||||
if ( is_vector(op1) || is_vector(op2) )
|
if ( is_vector(op1) )
|
||||||
SetType(make_intrusive<VectorType>(base_type(TYPE_DOUBLE)));
|
SetType(make_intrusive<VectorType>(base_type(TYPE_DOUBLE)));
|
||||||
else
|
else
|
||||||
SetType(base_type(TYPE_DOUBLE));
|
SetType(base_type(TYPE_DOUBLE));
|
||||||
|
@ -1972,8 +1919,6 @@ DivideExpr::DivideExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
|
|
||||||
else
|
else
|
||||||
ExprError("requires arithmetic operands");
|
ExprError("requires arithmetic operands");
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ValPtr DivideExpr::AddrFold(Val* v1, Val* v2) const
|
ValPtr DivideExpr::AddrFold(Val* v1, Val* v2) const
|
||||||
|
@ -2007,22 +1952,14 @@ ModExpr::ModExpr(ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
if ( IsError() )
|
if ( IsError() )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
TypeTag bt1 = op1->GetType()->Tag();
|
TypeTag bt1, bt2;
|
||||||
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
if ( IsVector(bt1) )
|
return;
|
||||||
bt1 = op1->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
TypeTag bt2 = op2->GetType()->Tag();
|
|
||||||
|
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = op2->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
if ( BothIntegral(bt1, bt2) )
|
if ( BothIntegral(bt1, bt2) )
|
||||||
PromoteType(max_type(bt1, bt2), is_vector(op1) || is_vector(op2));
|
PromoteType(max_type(bt1, bt2), is_vector(op1) || is_vector(op2));
|
||||||
else
|
else
|
||||||
ExprError("requires integral operands");
|
ExprError("requires integral operands");
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BoolExpr::BoolExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
BoolExpr::BoolExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
|
@ -2031,24 +1968,14 @@ BoolExpr::BoolExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
if ( IsError() )
|
if ( IsError() )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
TypeTag bt1 = op1->GetType()->Tag();
|
TypeTag bt1, bt2;
|
||||||
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
if ( IsVector(bt1) )
|
return;
|
||||||
bt1 = op1->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
TypeTag bt2 = op2->GetType()->Tag();
|
|
||||||
|
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = op2->GetType()->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
if ( BothBool(bt1, bt2) )
|
if ( BothBool(bt1, bt2) )
|
||||||
{
|
{
|
||||||
if ( is_vector(op1) || is_vector(op2) )
|
if ( is_vector(op1) )
|
||||||
{
|
|
||||||
if ( ! (is_vector(op1) && is_vector(op2)) )
|
|
||||||
reporter->Warning("mixing vector and scalar operands is deprecated");
|
|
||||||
SetType(make_intrusive<VectorType>(base_type(TYPE_BOOL)));
|
SetType(make_intrusive<VectorType>(base_type(TYPE_BOOL)));
|
||||||
}
|
|
||||||
else
|
else
|
||||||
SetType(base_type(TYPE_BOOL));
|
SetType(base_type(TYPE_BOOL));
|
||||||
}
|
}
|
||||||
|
@ -2095,46 +2022,7 @@ ValPtr BoolExpr::Eval(Frame* f) const
|
||||||
if ( ! is_vec1 && ! is_vec2 )
|
if ( ! is_vec1 && ! is_vec2 )
|
||||||
return DoSingleEval(f, std::move(v1), op2.get());
|
return DoSingleEval(f, std::move(v1), op2.get());
|
||||||
|
|
||||||
// Handle scalar op vector or vector op scalar
|
// Both are vectors.
|
||||||
// We can't short-circuit everything since we need to eval
|
|
||||||
// a vector in order to find out its length.
|
|
||||||
if ( ! (is_vec1 && is_vec2) )
|
|
||||||
{ // Only one is a vector.
|
|
||||||
ValPtr scalar_v;
|
|
||||||
VectorValPtr vector_v;
|
|
||||||
|
|
||||||
if ( is_vec1 )
|
|
||||||
{
|
|
||||||
scalar_v = op2->Eval(f);
|
|
||||||
vector_v = {AdoptRef{}, v1.release()->AsVectorVal()};
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
scalar_v = std::move(v1);
|
|
||||||
vector_v = {AdoptRef{}, op2->Eval(f).release()->AsVectorVal()};
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( ! scalar_v || ! vector_v )
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
VectorValPtr result;
|
|
||||||
|
|
||||||
// It's either an EXPR_AND_AND or an EXPR_OR_OR.
|
|
||||||
bool is_and = (tag == EXPR_AND_AND);
|
|
||||||
|
|
||||||
if ( scalar_v->IsZero() == is_and )
|
|
||||||
{
|
|
||||||
result = make_intrusive<VectorVal>(GetType<VectorType>());
|
|
||||||
result->Resize(vector_v->Size());
|
|
||||||
result->AssignRepeat(0, result->Size(), std::move(scalar_v));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
result = std::move(vector_v);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only case remaining: both are vectors.
|
|
||||||
auto v2 = op2->Eval(f);
|
auto v2 = op2->Eval(f);
|
||||||
|
|
||||||
if ( ! v2 )
|
if ( ! v2 )
|
||||||
|
@ -2206,8 +2094,6 @@ BitExpr::BitExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
return; // because following scalar check isn't apt
|
return; // because following scalar check isn't apt
|
||||||
}
|
}
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
|
|
||||||
if ( (bt1 == TYPE_COUNT) && (bt2 == TYPE_COUNT) )
|
if ( (bt1 == TYPE_COUNT) && (bt2 == TYPE_COUNT) )
|
||||||
{
|
{
|
||||||
if ( is_vector(op1) || is_vector(op2) )
|
if ( is_vector(op1) || is_vector(op2) )
|
||||||
|
@ -2249,15 +2135,11 @@ EqExpr::EqExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
const auto& t1 = op1->GetType();
|
const auto& t1 = op1->GetType();
|
||||||
const auto& t2 = op2->GetType();
|
const auto& t2 = op2->GetType();
|
||||||
|
|
||||||
TypeTag bt1 = t1->Tag();
|
TypeTag bt1, bt2;
|
||||||
if ( IsVector(bt1) )
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
bt1 = t1->AsVectorType()->Yield()->Tag();
|
return;
|
||||||
|
|
||||||
TypeTag bt2 = t2->Tag();
|
if ( is_vector(op1) )
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = t2->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
if ( is_vector(op1) || is_vector(op2) )
|
|
||||||
SetType(make_intrusive<VectorType>(base_type(TYPE_BOOL)));
|
SetType(make_intrusive<VectorType>(base_type(TYPE_BOOL)));
|
||||||
else
|
else
|
||||||
SetType(base_type(TYPE_BOOL));
|
SetType(base_type(TYPE_BOOL));
|
||||||
|
@ -2310,8 +2192,6 @@ EqExpr::EqExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
|
|
||||||
else
|
else
|
||||||
ExprError("type clash in comparison");
|
ExprError("type clash in comparison");
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void EqExpr::Canonicalize()
|
void EqExpr::Canonicalize()
|
||||||
|
@ -2364,15 +2244,11 @@ RelExpr::RelExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
const auto& t1 = op1->GetType();
|
const auto& t1 = op1->GetType();
|
||||||
const auto& t2 = op2->GetType();
|
const auto& t2 = op2->GetType();
|
||||||
|
|
||||||
TypeTag bt1 = t1->Tag();
|
TypeTag bt1, bt2;
|
||||||
if ( IsVector(bt1) )
|
if ( ! get_types_from_scalars_or_vectors(this, bt1, bt2) )
|
||||||
bt1 = t1->AsVectorType()->Yield()->Tag();
|
return;
|
||||||
|
|
||||||
TypeTag bt2 = t2->Tag();
|
if ( is_vector(op1) )
|
||||||
if ( IsVector(bt2) )
|
|
||||||
bt2 = t2->AsVectorType()->Yield()->Tag();
|
|
||||||
|
|
||||||
if ( is_vector(op1) || is_vector(op2) )
|
|
||||||
SetType(make_intrusive<VectorType>(base_type(TYPE_BOOL)));
|
SetType(make_intrusive<VectorType>(base_type(TYPE_BOOL)));
|
||||||
else
|
else
|
||||||
SetType(base_type(TYPE_BOOL));
|
SetType(base_type(TYPE_BOOL));
|
||||||
|
@ -2392,8 +2268,6 @@ RelExpr::RelExpr(ExprTag arg_tag, ExprPtr arg_op1, ExprPtr arg_op2)
|
||||||
else if ( bt1 != TYPE_TIME && bt1 != TYPE_INTERVAL && bt1 != TYPE_PORT && bt1 != TYPE_ADDR &&
|
else if ( bt1 != TYPE_TIME && bt1 != TYPE_INTERVAL && bt1 != TYPE_PORT && bt1 != TYPE_ADDR &&
|
||||||
bt1 != TYPE_STRING )
|
bt1 != TYPE_STRING )
|
||||||
ExprError("illegal comparison");
|
ExprError("illegal comparison");
|
||||||
|
|
||||||
CheckScalarAggOp();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RelExpr::Canonicalize()
|
void RelExpr::Canonicalize()
|
||||||
|
@ -3189,6 +3063,15 @@ void IndexExpr::ExprDescribe(ODesc* d) const
|
||||||
d->Add("]");
|
d->Add("]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void report_field_deprecation(const RecordType* rt, const Expr* e, int field,
|
||||||
|
bool has_check = false)
|
||||||
|
{
|
||||||
|
reporter->Deprecation(util::fmt("%s (%s)",
|
||||||
|
rt->GetFieldDeprecationWarning(field, has_check).c_str(),
|
||||||
|
obj_desc_short(e).c_str()),
|
||||||
|
e->GetLocationInfo());
|
||||||
|
}
|
||||||
|
|
||||||
FieldExpr::FieldExpr(ExprPtr arg_op, const char* arg_field_name)
|
FieldExpr::FieldExpr(ExprPtr arg_op, const char* arg_field_name)
|
||||||
: UnaryExpr(EXPR_FIELD, std::move(arg_op)), field_name(util::copy_string(arg_field_name)),
|
: UnaryExpr(EXPR_FIELD, std::move(arg_op)), field_name(util::copy_string(arg_field_name)),
|
||||||
td(nullptr), field(0)
|
td(nullptr), field(0)
|
||||||
|
@ -3211,7 +3094,7 @@ FieldExpr::FieldExpr(ExprPtr arg_op, const char* arg_field_name)
|
||||||
td = rt->FieldDecl(field);
|
td = rt->FieldDecl(field);
|
||||||
|
|
||||||
if ( rt->IsFieldDeprecated(field) )
|
if ( rt->IsFieldDeprecated(field) )
|
||||||
Warn(rt->GetFieldDeprecationWarning(field, false).c_str());
|
report_field_deprecation(rt, this, field);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3296,7 +3179,7 @@ HasFieldExpr::HasFieldExpr(ExprPtr arg_op, const char* arg_field_name)
|
||||||
if ( field < 0 )
|
if ( field < 0 )
|
||||||
ExprError("no such field in record");
|
ExprError("no such field in record");
|
||||||
else if ( rt->IsFieldDeprecated(field) )
|
else if ( rt->IsFieldDeprecated(field) )
|
||||||
Warn(rt->GetFieldDeprecationWarning(field, true).c_str());
|
report_field_deprecation(rt, this, field, true);
|
||||||
|
|
||||||
SetType(base_type(TYPE_BOOL));
|
SetType(base_type(TYPE_BOOL));
|
||||||
}
|
}
|
||||||
|
@ -3427,7 +3310,7 @@ RecordConstructorExpr::RecordConstructorExpr(RecordTypePtr known_rt, ListExprPtr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if ( known_rt->IsFieldDeprecated(i) )
|
else if ( known_rt->IsFieldDeprecated(i) )
|
||||||
Warn(known_rt->GetFieldDeprecationWarning(i, false).c_str());
|
report_field_deprecation(known_rt.get(), this, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
ValPtr RecordConstructorExpr::Eval(Frame* f) const
|
ValPtr RecordConstructorExpr::Eval(Frame* f) const
|
||||||
|
@ -4218,7 +4101,7 @@ RecordCoerceExpr::RecordCoerceExpr(ExprPtr arg_op, RecordTypePtr r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if ( t_r->IsFieldDeprecated(i) )
|
else if ( t_r->IsFieldDeprecated(i) )
|
||||||
Warn(t_r->GetFieldDeprecationWarning(i, false).c_str());
|
report_field_deprecation(t_r, this, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4332,6 +4215,10 @@ TableCoerceExpr::TableCoerceExpr(ExprPtr arg_op, TableTypePtr tt, bool type_chec
|
||||||
SetError();
|
SetError();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( op->Tag() == EXPR_TABLE_COERCE && op->GetType() == tt )
|
||||||
|
// Avoid double-coercion.
|
||||||
|
op = op->GetOp1();
|
||||||
}
|
}
|
||||||
|
|
||||||
SetType(std::move(tt));
|
SetType(std::move(tt));
|
||||||
|
@ -4750,14 +4637,14 @@ void CallExpr::ExprDescribe(ODesc* d) const
|
||||||
args->Describe(d);
|
args->Describe(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
LambdaExpr::LambdaExpr(std::unique_ptr<function_ingredients> arg_ing, IDPList arg_outer_ids,
|
LambdaExpr::LambdaExpr(std::unique_ptr<FunctionIngredients> arg_ing, IDPList arg_outer_ids,
|
||||||
StmtPtr when_parent)
|
StmtPtr when_parent)
|
||||||
: Expr(EXPR_LAMBDA)
|
: Expr(EXPR_LAMBDA)
|
||||||
{
|
{
|
||||||
ingredients = std::move(arg_ing);
|
ingredients = std::move(arg_ing);
|
||||||
outer_ids = std::move(arg_outer_ids);
|
outer_ids = std::move(arg_outer_ids);
|
||||||
|
|
||||||
SetType(ingredients->id->GetType());
|
SetType(ingredients->GetID()->GetType());
|
||||||
|
|
||||||
if ( ! CheckCaptures(when_parent) )
|
if ( ! CheckCaptures(when_parent) )
|
||||||
{
|
{
|
||||||
|
@ -4767,9 +4654,9 @@ LambdaExpr::LambdaExpr(std::unique_ptr<function_ingredients> arg_ing, IDPList ar
|
||||||
|
|
||||||
// Install a dummy version of the function globally for use only
|
// Install a dummy version of the function globally for use only
|
||||||
// when broker provides a closure.
|
// when broker provides a closure.
|
||||||
auto dummy_func = make_intrusive<ScriptFunc>(ingredients->id);
|
auto dummy_func = make_intrusive<ScriptFunc>(ingredients->GetID());
|
||||||
dummy_func->AddBody(ingredients->body, ingredients->inits, ingredients->frame_size,
|
dummy_func->AddBody(ingredients->Body(), ingredients->Inits(), ingredients->FrameSize(),
|
||||||
ingredients->priority);
|
ingredients->Priority());
|
||||||
|
|
||||||
dummy_func->SetOuterIDs(outer_ids);
|
dummy_func->SetOuterIDs(outer_ids);
|
||||||
|
|
||||||
|
@ -4804,7 +4691,7 @@ LambdaExpr::LambdaExpr(std::unique_ptr<function_ingredients> arg_ing, IDPList ar
|
||||||
|
|
||||||
auto v = make_intrusive<FuncVal>(std::move(dummy_func));
|
auto v = make_intrusive<FuncVal>(std::move(dummy_func));
|
||||||
lambda_id->SetVal(std::move(v));
|
lambda_id->SetVal(std::move(v));
|
||||||
lambda_id->SetType(ingredients->id->GetType());
|
lambda_id->SetType(ingredients->GetID()->GetType());
|
||||||
lambda_id->SetConst();
|
lambda_id->SetConst();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4892,14 +4779,14 @@ bool LambdaExpr::CheckCaptures(StmtPtr when_parent)
|
||||||
|
|
||||||
ScopePtr LambdaExpr::GetScope() const
|
ScopePtr LambdaExpr::GetScope() const
|
||||||
{
|
{
|
||||||
return ingredients->scope;
|
return ingredients->Scope();
|
||||||
}
|
}
|
||||||
|
|
||||||
ValPtr LambdaExpr::Eval(Frame* f) const
|
ValPtr LambdaExpr::Eval(Frame* f) const
|
||||||
{
|
{
|
||||||
auto lamb = make_intrusive<ScriptFunc>(ingredients->id);
|
auto lamb = make_intrusive<ScriptFunc>(ingredients->GetID());
|
||||||
lamb->AddBody(ingredients->body, ingredients->inits, ingredients->frame_size,
|
lamb->AddBody(ingredients->Body(), ingredients->Inits(), ingredients->FrameSize(),
|
||||||
ingredients->priority);
|
ingredients->Priority());
|
||||||
|
|
||||||
lamb->CreateCaptures(f);
|
lamb->CreateCaptures(f);
|
||||||
|
|
||||||
|
@ -4913,7 +4800,7 @@ ValPtr LambdaExpr::Eval(Frame* f) const
|
||||||
void LambdaExpr::ExprDescribe(ODesc* d) const
|
void LambdaExpr::ExprDescribe(ODesc* d) const
|
||||||
{
|
{
|
||||||
d->Add(expr_name(Tag()));
|
d->Add(expr_name(Tag()));
|
||||||
ingredients->body->Describe(d);
|
ingredients->Body()->Describe(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraversalCode LambdaExpr::Traverse(TraversalCallback* cb) const
|
TraversalCode LambdaExpr::Traverse(TraversalCallback* cb) const
|
||||||
|
@ -4928,7 +4815,7 @@ TraversalCode LambdaExpr::Traverse(TraversalCallback* cb) const
|
||||||
tc = lambda_id->Traverse(cb);
|
tc = lambda_id->Traverse(cb);
|
||||||
HANDLE_TC_EXPR_PRE(tc);
|
HANDLE_TC_EXPR_PRE(tc);
|
||||||
|
|
||||||
tc = ingredients->body->Traverse(cb);
|
tc = ingredients->Body()->Traverse(cb);
|
||||||
HANDLE_TC_EXPR_PRE(tc);
|
HANDLE_TC_EXPR_PRE(tc);
|
||||||
|
|
||||||
tc = cb->PostExpr(this);
|
tc = cb->PostExpr(this);
|
||||||
|
|
16
src/Expr.h
16
src/Expr.h
|
@ -27,7 +27,7 @@ namespace detail
|
||||||
|
|
||||||
class Frame;
|
class Frame;
|
||||||
class Scope;
|
class Scope;
|
||||||
struct function_ingredients;
|
class FunctionIngredients;
|
||||||
using IDPtr = IntrusivePtr<ID>;
|
using IDPtr = IntrusivePtr<ID>;
|
||||||
using ScopePtr = IntrusivePtr<Scope>;
|
using ScopePtr = IntrusivePtr<Scope>;
|
||||||
|
|
||||||
|
@ -624,14 +624,6 @@ protected:
|
||||||
|
|
||||||
void ExprDescribe(ODesc* d) const override;
|
void ExprDescribe(ODesc* d) const override;
|
||||||
|
|
||||||
// Reports on if this BinaryExpr involves a scalar and aggregate
|
|
||||||
// type (vec, list, table, record).
|
|
||||||
bool IsScalarAggregateOp() const;
|
|
||||||
|
|
||||||
// Warns about deprecated scalar vector operations like
|
|
||||||
// `[1, 2, 3] == 1` or `["a", "b", "c"] + "a"`.
|
|
||||||
void CheckScalarAggOp() const;
|
|
||||||
|
|
||||||
// For assignment operations (=, +=, -=) checks for a valid
|
// For assignment operations (=, +=, -=) checks for a valid
|
||||||
// expression-list on the RHS (op2), potentially transforming
|
// expression-list on the RHS (op2), potentially transforming
|
||||||
// op2 in the process. Returns true if the list is present
|
// op2 in the process. Returns true if the list is present
|
||||||
|
@ -1462,12 +1454,12 @@ protected:
|
||||||
class LambdaExpr final : public Expr
|
class LambdaExpr final : public Expr
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
LambdaExpr(std::unique_ptr<function_ingredients> ingredients, IDPList outer_ids,
|
LambdaExpr(std::unique_ptr<FunctionIngredients> ingredients, IDPList outer_ids,
|
||||||
StmtPtr when_parent = nullptr);
|
StmtPtr when_parent = nullptr);
|
||||||
|
|
||||||
const std::string& Name() const { return my_name; }
|
const std::string& Name() const { return my_name; }
|
||||||
const IDPList& OuterIDs() const { return outer_ids; }
|
const IDPList& OuterIDs() const { return outer_ids; }
|
||||||
const function_ingredients& Ingredients() const { return *ingredients; }
|
const FunctionIngredients& Ingredients() const { return *ingredients; }
|
||||||
|
|
||||||
ValPtr Eval(Frame* f) const override;
|
ValPtr Eval(Frame* f) const override;
|
||||||
TraversalCode Traverse(TraversalCallback* cb) const override;
|
TraversalCode Traverse(TraversalCallback* cb) const override;
|
||||||
|
@ -1486,7 +1478,7 @@ protected:
|
||||||
private:
|
private:
|
||||||
bool CheckCaptures(StmtPtr when_parent);
|
bool CheckCaptures(StmtPtr when_parent);
|
||||||
|
|
||||||
std::unique_ptr<function_ingredients> ingredients;
|
std::unique_ptr<FunctionIngredients> ingredients;
|
||||||
IDPtr lambda_id;
|
IDPtr lambda_id;
|
||||||
IDPList outer_ids;
|
IDPList outer_ids;
|
||||||
|
|
||||||
|
|
11
src/Frame.cc
11
src/Frame.cc
|
@ -120,6 +120,17 @@ Frame* Frame::Clone() const
|
||||||
return other;
|
return other;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Frame* Frame::CloneForTrigger() const
|
||||||
|
{
|
||||||
|
Frame* other = new Frame(0, function, func_args);
|
||||||
|
|
||||||
|
other->call = call;
|
||||||
|
other->assoc = assoc;
|
||||||
|
other->trigger = trigger;
|
||||||
|
|
||||||
|
return other;
|
||||||
|
}
|
||||||
|
|
||||||
static bool val_is_func(const ValPtr& v, ScriptFunc* func)
|
static bool val_is_func(const ValPtr& v, ScriptFunc* func)
|
||||||
{
|
{
|
||||||
if ( v->GetType()->Tag() != TYPE_FUNC )
|
if ( v->GetType()->Tag() != TYPE_FUNC )
|
||||||
|
|
|
@ -157,6 +157,13 @@ public:
|
||||||
*/
|
*/
|
||||||
Frame* Clone() const;
|
Frame* Clone() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a copy of the frame that just includes its trigger context.
|
||||||
|
*
|
||||||
|
* @return a partial copy of this frame.
|
||||||
|
*/
|
||||||
|
Frame* CloneForTrigger() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Serializes the frame in support of copy semantics for lambdas:
|
* Serializes the frame in support of copy semantics for lambdas:
|
||||||
*
|
*
|
||||||
|
|
19
src/Func.cc
19
src/Func.cc
|
@ -44,6 +44,7 @@
|
||||||
// break what symbols are available when, which keeps the build from breaking.
|
// break what symbols are available when, which keeps the build from breaking.
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#include "zeek.bif.func_h"
|
#include "zeek.bif.func_h"
|
||||||
|
#include "communityid.bif.func_h"
|
||||||
#include "stats.bif.func_h"
|
#include "stats.bif.func_h"
|
||||||
#include "reporter.bif.func_h"
|
#include "reporter.bif.func_h"
|
||||||
#include "strings.bif.func_h"
|
#include "strings.bif.func_h"
|
||||||
|
@ -53,6 +54,7 @@
|
||||||
#include "CPP-load.bif.func_h"
|
#include "CPP-load.bif.func_h"
|
||||||
|
|
||||||
#include "zeek.bif.func_def"
|
#include "zeek.bif.func_def"
|
||||||
|
#include "communityid.bif.func_def"
|
||||||
#include "stats.bif.func_def"
|
#include "stats.bif.func_def"
|
||||||
#include "reporter.bif.func_def"
|
#include "reporter.bif.func_def"
|
||||||
#include "strings.bif.func_def"
|
#include "strings.bif.func_def"
|
||||||
|
@ -862,16 +864,18 @@ static std::set<EventGroupPtr> get_func_groups(const std::vector<AttrPtr>& attrs
|
||||||
return groups;
|
return groups;
|
||||||
}
|
}
|
||||||
|
|
||||||
function_ingredients::function_ingredients(ScopePtr scope, StmtPtr body,
|
FunctionIngredients::FunctionIngredients(ScopePtr _scope, StmtPtr _body,
|
||||||
const std::string& module_name)
|
const std::string& module_name)
|
||||||
{
|
{
|
||||||
|
scope = std::move(_scope);
|
||||||
|
body = std::move(_body);
|
||||||
|
|
||||||
frame_size = scope->Length();
|
frame_size = scope->Length();
|
||||||
inits = scope->GetInits();
|
inits = scope->GetInits();
|
||||||
|
|
||||||
this->scope = std::move(scope);
|
id = scope->GetID();
|
||||||
id = this->scope->GetID();
|
|
||||||
|
|
||||||
const auto& attrs = this->scope->Attrs();
|
const auto& attrs = scope->Attrs();
|
||||||
|
|
||||||
if ( attrs )
|
if ( attrs )
|
||||||
{
|
{
|
||||||
|
@ -890,15 +894,11 @@ function_ingredients::function_ingredients(ScopePtr scope, StmtPtr body,
|
||||||
else
|
else
|
||||||
priority = 0;
|
priority = 0;
|
||||||
|
|
||||||
this->body = std::move(body);
|
|
||||||
this->module_name = module_name;
|
|
||||||
|
|
||||||
// Implicit module event groups for events and hooks.
|
// Implicit module event groups for events and hooks.
|
||||||
auto flavor = id->GetType<zeek::FuncType>()->Flavor();
|
auto flavor = id->GetType<zeek::FuncType>()->Flavor();
|
||||||
if ( flavor == FUNC_FLAVOR_EVENT || flavor == FUNC_FLAVOR_HOOK )
|
if ( flavor == FUNC_FLAVOR_EVENT || flavor == FUNC_FLAVOR_HOOK )
|
||||||
{
|
{
|
||||||
auto module_group = event_registry->RegisterGroup(EventGroupKind::Module,
|
auto module_group = event_registry->RegisterGroup(EventGroupKind::Module, module_name);
|
||||||
this->module_name);
|
|
||||||
groups.insert(module_group);
|
groups.insert(module_group);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1038,6 +1038,7 @@ void init_primary_bifs()
|
||||||
var_sizes = id::find_type("var_sizes")->AsTableType();
|
var_sizes = id::find_type("var_sizes")->AsTableType();
|
||||||
|
|
||||||
#include "CPP-load.bif.func_init"
|
#include "CPP-load.bif.func_init"
|
||||||
|
#include "communityid.bif.func_init"
|
||||||
#include "option.bif.func_init"
|
#include "option.bif.func_init"
|
||||||
#include "packet_analysis.bif.func_init"
|
#include "packet_analysis.bif.func_init"
|
||||||
#include "reporter.bif.func_init"
|
#include "reporter.bif.func_init"
|
||||||
|
|
19
src/Func.h
19
src/Func.h
|
@ -334,16 +334,27 @@ struct CallInfo
|
||||||
|
|
||||||
// Struct that collects all the specifics defining a Func. Used for ScriptFuncs
|
// Struct that collects all the specifics defining a Func. Used for ScriptFuncs
|
||||||
// with closures.
|
// with closures.
|
||||||
struct function_ingredients
|
class FunctionIngredients
|
||||||
{
|
{
|
||||||
|
public:
|
||||||
// Gathers all of the information from a scope and a function body needed
|
// Gathers all of the information from a scope and a function body needed
|
||||||
// to build a function.
|
// to build a function.
|
||||||
function_ingredients(ScopePtr scope, StmtPtr body, const std::string& module_name);
|
FunctionIngredients(ScopePtr scope, StmtPtr body, const std::string& module_name);
|
||||||
|
|
||||||
|
const IDPtr& GetID() const { return id; }
|
||||||
|
|
||||||
|
const StmtPtr& Body() const { return body; }
|
||||||
|
void SetBody(StmtPtr _body) { body = std::move(_body); }
|
||||||
|
|
||||||
|
const auto& Inits() const { return inits; }
|
||||||
|
size_t FrameSize() const { return frame_size; }
|
||||||
|
int Priority() const { return priority; }
|
||||||
|
const ScopePtr& Scope() const { return scope; }
|
||||||
|
const auto& Groups() const { return groups; }
|
||||||
|
|
||||||
|
private:
|
||||||
IDPtr id;
|
IDPtr id;
|
||||||
StmtPtr body;
|
StmtPtr body;
|
||||||
std::string module_name; // current module name where function body is defined
|
|
||||||
std::vector<IDPtr> inits;
|
std::vector<IDPtr> inits;
|
||||||
size_t frame_size = 0;
|
size_t frame_size = 0;
|
||||||
int priority = 0;
|
int priority = 0;
|
||||||
|
|
|
@ -147,6 +147,9 @@ void usage(const char* prog, int code)
|
||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
" --profile-scripts[=file] | profile scripts to given file (default stdout)\n");
|
" --profile-scripts[=file] | profile scripts to given file (default stdout)\n");
|
||||||
|
fprintf(stderr,
|
||||||
|
" --profile-script-call-stacks | add call stacks to profile output (requires "
|
||||||
|
"--profile-scripts)\n");
|
||||||
fprintf(stderr, " --pseudo-realtime[=<speedup>] | enable pseudo-realtime for performance "
|
fprintf(stderr, " --pseudo-realtime[=<speedup>] | enable pseudo-realtime for performance "
|
||||||
"evaluation (default 1)\n");
|
"evaluation (default 1)\n");
|
||||||
fprintf(stderr, " -j|--jobs | enable supervisor mode\n");
|
fprintf(stderr, " -j|--jobs | enable supervisor mode\n");
|
||||||
|
@ -369,8 +372,13 @@ Options parse_cmdline(int argc, char** argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
int profile_scripts = 0;
|
int profile_scripts = 0;
|
||||||
|
int profile_script_call_stacks = 0;
|
||||||
|
std::string profile_filename;
|
||||||
int no_unused_warnings = 0;
|
int no_unused_warnings = 0;
|
||||||
|
|
||||||
|
bool enable_script_profile = false;
|
||||||
|
bool enable_script_profile_call_stacks = false;
|
||||||
|
|
||||||
struct option long_opts[] = {
|
struct option long_opts[] = {
|
||||||
{"parse-only", no_argument, nullptr, 'a'},
|
{"parse-only", no_argument, nullptr, 'a'},
|
||||||
{"bare-mode", no_argument, nullptr, 'b'},
|
{"bare-mode", no_argument, nullptr, 'b'},
|
||||||
|
@ -414,6 +422,7 @@ Options parse_cmdline(int argc, char** argv)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
{"profile-scripts", optional_argument, &profile_scripts, 1},
|
{"profile-scripts", optional_argument, &profile_scripts, 1},
|
||||||
|
{"profile-script-call-stacks", optional_argument, &profile_script_call_stacks, 1},
|
||||||
{"no-unused-warnings", no_argument, &no_unused_warnings, 1},
|
{"no-unused-warnings", no_argument, &no_unused_warnings, 1},
|
||||||
{"pseudo-realtime", optional_argument, nullptr, '~'},
|
{"pseudo-realtime", optional_argument, nullptr, '~'},
|
||||||
{"jobs", optional_argument, nullptr, 'j'},
|
{"jobs", optional_argument, nullptr, 'j'},
|
||||||
|
@ -618,10 +627,17 @@ Options parse_cmdline(int argc, char** argv)
|
||||||
// a short-option equivalent.
|
// a short-option equivalent.
|
||||||
if ( profile_scripts )
|
if ( profile_scripts )
|
||||||
{
|
{
|
||||||
activate_script_profiling(optarg);
|
profile_filename = optarg ? optarg : "";
|
||||||
|
enable_script_profile = true;
|
||||||
profile_scripts = 0;
|
profile_scripts = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( profile_script_call_stacks )
|
||||||
|
{
|
||||||
|
enable_script_profile_call_stacks = true;
|
||||||
|
profile_script_call_stacks = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if ( no_unused_warnings )
|
if ( no_unused_warnings )
|
||||||
rval.no_unused_warnings = true;
|
rval.no_unused_warnings = true;
|
||||||
break;
|
break;
|
||||||
|
@ -632,6 +648,17 @@ Options parse_cmdline(int argc, char** argv)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( ! enable_script_profile && enable_script_profile_call_stacks )
|
||||||
|
fprintf(
|
||||||
|
stderr,
|
||||||
|
"ERROR: --profile-scripts-traces requires --profile-scripts to be passed as well.\n");
|
||||||
|
|
||||||
|
if ( enable_script_profile )
|
||||||
|
{
|
||||||
|
activate_script_profiling(profile_filename.empty() ? nullptr : profile_filename.c_str(),
|
||||||
|
enable_script_profile_call_stacks);
|
||||||
|
}
|
||||||
|
|
||||||
// Process remaining arguments. X=Y arguments indicate script
|
// Process remaining arguments. X=Y arguments indicate script
|
||||||
// variable/parameter assignments. X::Y arguments indicate plugins to
|
// variable/parameter assignments. X::Y arguments indicate plugins to
|
||||||
// activate/query. The remainder are treated as scripts to load.
|
// activate/query. The remainder are treated as scripts to load.
|
||||||
|
|
|
@ -5,8 +5,10 @@
|
||||||
#include "zeek/zeek-config.h"
|
#include "zeek/zeek-config.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
#include "zeek/Desc.h"
|
#include "zeek/Desc.h"
|
||||||
|
#include "zeek/Reporter.h"
|
||||||
|
|
||||||
using std::min;
|
using std::min;
|
||||||
|
|
||||||
|
@ -322,6 +324,17 @@ void Reassembler::CheckOverlap(const DataBlockList& list, uint64_t seq, uint64_t
|
||||||
|
|
||||||
void Reassembler::NewBlock(double t, uint64_t seq, uint64_t len, const u_char* data)
|
void Reassembler::NewBlock(double t, uint64_t seq, uint64_t len, const u_char* data)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
// Check for overflows - this should be handled by the caller
|
||||||
|
// and possibly reported as a weird or violation if applicable.
|
||||||
|
if ( std::numeric_limits<uint64_t>::max() - seq < len )
|
||||||
|
{
|
||||||
|
zeek::reporter->InternalWarning("Reassembler::NewBlock() truncating block at seq %" PRIx64
|
||||||
|
" from length %" PRIu64 " to %" PRIu64,
|
||||||
|
seq, len, std::numeric_limits<uint64_t>::max() - seq);
|
||||||
|
len = std::numeric_limits<uint64_t>::max() - seq;
|
||||||
|
}
|
||||||
|
|
||||||
if ( len == 0 )
|
if ( len == 0 )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -59,6 +59,8 @@ Reporter::Reporter(bool arg_abort_on_scripting_errors)
|
||||||
weird_sampling_duration = 0;
|
weird_sampling_duration = 0;
|
||||||
weird_sampling_threshold = 0;
|
weird_sampling_threshold = 0;
|
||||||
|
|
||||||
|
ignore_deprecations = false;
|
||||||
|
|
||||||
syslog_open = false;
|
syslog_open = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +88,7 @@ void Reporter::InitOptions()
|
||||||
auto k = wle.GetHashKey();
|
auto k = wle.GetHashKey();
|
||||||
auto index = wl_val->RecreateIndex(*k);
|
auto index = wl_val->RecreateIndex(*k);
|
||||||
std::string key = index->Idx(0)->AsString()->CheckString();
|
std::string key = index->Idx(0)->AsString()->CheckString();
|
||||||
set->emplace(move(key));
|
set->emplace(std::move(key));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -532,6 +534,21 @@ void Reporter::Weird(const IPAddr& orig, const IPAddr& resp, const char* name, c
|
||||||
"%s", name);
|
"%s", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Reporter::Deprecation(std::string_view msg, const detail::Location* loc1,
|
||||||
|
const detail::Location* loc2)
|
||||||
|
{
|
||||||
|
if ( ignore_deprecations )
|
||||||
|
return;
|
||||||
|
|
||||||
|
if ( loc1 || loc2 )
|
||||||
|
PushLocation(loc1, loc2);
|
||||||
|
|
||||||
|
Warning("%s", msg.data());
|
||||||
|
|
||||||
|
if ( loc1 || loc2 )
|
||||||
|
PopLocation();
|
||||||
|
}
|
||||||
|
|
||||||
void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Connection* conn,
|
void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Connection* conn,
|
||||||
ValPList* addl, bool location, bool time, const char* postfix, const char* fmt,
|
ValPList* addl, bool location, bool time, const char* postfix, const char* fmt,
|
||||||
va_list ap)
|
va_list ap)
|
||||||
|
|
|
@ -133,6 +133,13 @@ public:
|
||||||
void Weird(const IPAddr& orig, const IPAddr& resp, const char* name, const char* addl = "",
|
void Weird(const IPAddr& orig, const IPAddr& resp, const char* name, const char* addl = "",
|
||||||
const char* source = ""); // Raises flow_weird().
|
const char* source = ""); // Raises flow_weird().
|
||||||
|
|
||||||
|
// Report a deprecation. The message should contain a version.
|
||||||
|
void Deprecation(std::string_view msg, const detail::Location* loc1 = nullptr,
|
||||||
|
const detail::Location* loc2 = nullptr);
|
||||||
|
|
||||||
|
// Whether or not deprecations are logged when calling Deprecation()
|
||||||
|
void SetIgnoreDeprecations(bool arg) { ignore_deprecations = arg; }
|
||||||
|
|
||||||
// Syslog a message. This methods does nothing if we're running
|
// Syslog a message. This methods does nothing if we're running
|
||||||
// offline from a trace.
|
// offline from a trace.
|
||||||
void Syslog(const char* fmt, ...) FMT_ATTR;
|
void Syslog(const char* fmt, ...) FMT_ATTR;
|
||||||
|
@ -345,6 +352,8 @@ private:
|
||||||
uint64_t weird_sampling_threshold;
|
uint64_t weird_sampling_threshold;
|
||||||
uint64_t weird_sampling_rate;
|
uint64_t weird_sampling_rate;
|
||||||
double weird_sampling_duration;
|
double weird_sampling_duration;
|
||||||
|
|
||||||
|
bool ignore_deprecations;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern Reporter* reporter;
|
extern Reporter* reporter;
|
||||||
|
|
103
src/RunState.cc
103
src/RunState.cc
|
@ -137,6 +137,44 @@ void update_network_time(double new_network_time)
|
||||||
PLUGIN_HOOK_VOID(HOOK_UPDATE_NETWORK_TIME, HookUpdateNetworkTime(new_network_time));
|
PLUGIN_HOOK_VOID(HOOK_UPDATE_NETWORK_TIME, HookUpdateNetworkTime(new_network_time));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Logic to decide when updating network_time is acceptable:
|
||||||
|
static bool should_forward_network_time()
|
||||||
|
{
|
||||||
|
// In pseudo_realtime mode, always update time once
|
||||||
|
// we've dispatched and processed the first packet.
|
||||||
|
// run_state::detail::first_timestamp is currently set
|
||||||
|
// in PktSrc::ExtractNextPacketInternal()
|
||||||
|
if ( pseudo_realtime != 0.0 && run_state::detail::first_timestamp != 0.0 )
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if ( iosource::PktSrc* ps = iosource_mgr->GetPktSrc() )
|
||||||
|
{
|
||||||
|
// Offline packet sources always control network time
|
||||||
|
// unless we're running pseudo_realtime, see above.
|
||||||
|
if ( ! ps->IsLive() )
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if ( ! ps->HasBeenIdleFor(BifConst::packet_source_inactivity_timeout) )
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We determined that we don't have a packet source, or it is idle.
|
||||||
|
// Unless it has been disabled, network_time will now be moved forward.
|
||||||
|
return BifConst::allow_network_time_forward;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void forward_network_time_if_applicable()
|
||||||
|
{
|
||||||
|
if ( ! should_forward_network_time() )
|
||||||
|
return;
|
||||||
|
|
||||||
|
double now = util::current_time(true);
|
||||||
|
if ( now > network_time )
|
||||||
|
update_network_time(now);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
void init_run(const std::optional<std::string>& interface,
|
void init_run(const std::optional<std::string>& interface,
|
||||||
const std::optional<std::string>& pcap_input_file,
|
const std::optional<std::string>& pcap_input_file,
|
||||||
const std::optional<std::string>& pcap_output_file, bool do_watchdog)
|
const std::optional<std::string>& pcap_output_file, bool do_watchdog)
|
||||||
|
@ -319,21 +357,21 @@ void run_loop()
|
||||||
// date on timers and events. Because we only
|
// date on timers and events. Because we only
|
||||||
// have timers as sources, going to sleep here
|
// have timers as sources, going to sleep here
|
||||||
// doesn't risk blocking on other inputs.
|
// doesn't risk blocking on other inputs.
|
||||||
update_network_time(util::current_time());
|
//
|
||||||
|
// TBD: Is this actually still relevant given that the TimerMgr
|
||||||
|
// is an IO source now? It'll be processed once its
|
||||||
|
// GetNextTimeout() yields 0 and before that there's nothing
|
||||||
|
// to expire anyway.
|
||||||
|
forward_network_time_if_applicable();
|
||||||
expire_timers();
|
expire_timers();
|
||||||
|
|
||||||
|
// Prevent another forward_network_time_if_applicable() below
|
||||||
|
// even if time wasn't actually updated.
|
||||||
|
time_updated = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the time gets updated every pass if we're reading live.
|
if ( ! time_updated )
|
||||||
// This is necessary for e.g. packet sources that don't have a selectable
|
forward_network_time_if_applicable();
|
||||||
// file descriptor. They'll always be ready on a very short timeout, but
|
|
||||||
// won't necessarily have a packet to process. In these case, sometimes
|
|
||||||
// the time won't get updated for a long time and timers don't function
|
|
||||||
// correctly.
|
|
||||||
if ( (! time_updated && reading_live) )
|
|
||||||
{
|
|
||||||
update_network_time(util::current_time());
|
|
||||||
expire_timers();
|
|
||||||
}
|
|
||||||
|
|
||||||
event_mgr.Drain();
|
event_mgr.Drain();
|
||||||
|
|
||||||
|
@ -352,17 +390,13 @@ void run_loop()
|
||||||
// the future on which we need to wait.
|
// the future on which we need to wait.
|
||||||
have_pending_timers = zeek::detail::timer_mgr->Size() > 0;
|
have_pending_timers = zeek::detail::timer_mgr->Size() > 0;
|
||||||
|
|
||||||
|
// Terminate if we're running pseudo_realtime and
|
||||||
|
// the interface has been closed.
|
||||||
if ( pseudo_realtime && communication_enabled )
|
if ( pseudo_realtime && communication_enabled )
|
||||||
{
|
{
|
||||||
auto have_active_packet_source = false;
|
|
||||||
|
|
||||||
iosource::PktSrc* ps = iosource_mgr->GetPktSrc();
|
iosource::PktSrc* ps = iosource_mgr->GetPktSrc();
|
||||||
if ( ps && ps->IsOpen() )
|
if ( ps && ! ps->IsOpen() )
|
||||||
have_active_packet_source = true;
|
iosource_mgr->Terminate();
|
||||||
|
|
||||||
if ( ! have_active_packet_source )
|
|
||||||
// Can turn off pseudo realtime now
|
|
||||||
pseudo_realtime = 0.0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,20 +414,34 @@ void get_final_stats()
|
||||||
{
|
{
|
||||||
iosource::PktSrc::Stats s;
|
iosource::PktSrc::Stats s;
|
||||||
ps->Statistics(&s);
|
ps->Statistics(&s);
|
||||||
double dropped_pct = s.dropped > 0.0
|
|
||||||
? ((double)s.dropped / ((double)s.received + (double)s.dropped)) *
|
auto pct = [](uint64_t v, uint64_t received)
|
||||||
100.0
|
{
|
||||||
: 0.0;
|
return (static_cast<double>(v) /
|
||||||
|
(static_cast<double>(v) + static_cast<double>(received))) *
|
||||||
|
100;
|
||||||
|
};
|
||||||
|
|
||||||
|
double dropped_pct = s.dropped > 0 ? pct(s.dropped, s.received) : 0.0;
|
||||||
|
|
||||||
uint64_t not_processed = packet_mgr->GetUnprocessedCount();
|
uint64_t not_processed = packet_mgr->GetUnprocessedCount();
|
||||||
double unprocessed_pct = not_processed > 0
|
double unprocessed_pct = not_processed > 0
|
||||||
? ((double)not_processed / (double)s.received) * 100.0
|
? ((double)not_processed / (double)s.received) * 100.0
|
||||||
: 0.0;
|
: 0.0;
|
||||||
|
|
||||||
|
std::string filtered = "";
|
||||||
|
if ( s.filtered )
|
||||||
|
{
|
||||||
|
double filtered_pct = s.filtered.value() > 0 ? pct(s.filtered.value(), s.received)
|
||||||
|
: 0.0;
|
||||||
|
filtered = zeek::util::fmt(" %" PRIu64 " (%.2f%%) filtered", s.filtered.value(),
|
||||||
|
filtered_pct);
|
||||||
|
}
|
||||||
|
|
||||||
reporter->Info("%" PRIu64 " packets received on interface %s, %" PRIu64
|
reporter->Info("%" PRIu64 " packets received on interface %s, %" PRIu64
|
||||||
" (%.2f%%) dropped, %" PRIu64 " (%.2f%%) not processed",
|
" (%.2f%%) dropped, %" PRIu64 " (%.2f%%) not processed%s",
|
||||||
s.received, ps->Path().c_str(), s.dropped, dropped_pct, not_processed,
|
s.received, ps->Path().c_str(), s.dropped, dropped_pct, not_processed,
|
||||||
unprocessed_pct);
|
unprocessed_pct, filtered.c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -493,12 +541,13 @@ void continue_processing()
|
||||||
detail::current_wallclock = util::current_time(true);
|
detail::current_wallclock = util::current_time(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( _processing_suspended > 0 )
|
||||||
--_processing_suspended;
|
--_processing_suspended;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_processing_suspended()
|
bool is_processing_suspended()
|
||||||
{
|
{
|
||||||
return _processing_suspended;
|
return _processing_suspended > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace zeek::run_state
|
} // namespace zeek::run_state
|
||||||
|
|
14
src/Scope.cc
14
src/Scope.cc
|
@ -190,20 +190,6 @@ ScopePtr pop_scope()
|
||||||
return old_top;
|
return old_top;
|
||||||
}
|
}
|
||||||
|
|
||||||
void merge_top_scope()
|
|
||||||
{
|
|
||||||
if ( scopes.size() < 2 )
|
|
||||||
reporter->InternalError("scope merge underflow");
|
|
||||||
|
|
||||||
auto t = pop_scope();
|
|
||||||
|
|
||||||
for ( const auto& v : t->OrderedVars() )
|
|
||||||
{
|
|
||||||
v->SetOffset(top_scope->Length());
|
|
||||||
top_scope->Insert(v->Name(), v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ScopePtr current_scope()
|
ScopePtr current_scope()
|
||||||
{
|
{
|
||||||
return top_scope;
|
return top_scope;
|
||||||
|
|
|
@ -93,10 +93,6 @@ extern void push_existing_scope(ScopePtr scope);
|
||||||
// Returns the one popped off.
|
// Returns the one popped off.
|
||||||
extern ScopePtr pop_scope();
|
extern ScopePtr pop_scope();
|
||||||
|
|
||||||
// Merges the elements of the current scope with the one surrounding it,
|
|
||||||
// popping it in the process.
|
|
||||||
extern void merge_top_scope();
|
|
||||||
|
|
||||||
extern ScopePtr current_scope();
|
extern ScopePtr current_scope();
|
||||||
extern ScopePtr global_scope();
|
extern ScopePtr global_scope();
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ void ScriptProfile::StartActivation()
|
||||||
start_stats.SetStats(util::curr_CPU_time(), start_memory);
|
start_stats.SetStats(util::curr_CPU_time(), start_memory);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScriptProfile::EndActivation()
|
void ScriptProfile::EndActivation(const std::string& stack)
|
||||||
{
|
{
|
||||||
uint64_t end_memory;
|
uint64_t end_memory;
|
||||||
util::get_memory_usage(&end_memory, nullptr);
|
util::get_memory_usage(&end_memory, nullptr);
|
||||||
|
@ -25,7 +25,7 @@ void ScriptProfile::EndActivation()
|
||||||
delta_stats.SetStats(util::curr_CPU_time() - start_stats.CPUTime(),
|
delta_stats.SetStats(util::curr_CPU_time() - start_stats.CPUTime(),
|
||||||
end_memory - start_stats.Memory());
|
end_memory - start_stats.Memory());
|
||||||
|
|
||||||
AddIn(&delta_stats, false);
|
AddIn(&delta_stats, false, stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScriptProfile::ChildFinished(const ScriptProfile* child)
|
void ScriptProfile::ChildFinished(const ScriptProfile* child)
|
||||||
|
@ -33,7 +33,7 @@ void ScriptProfile::ChildFinished(const ScriptProfile* child)
|
||||||
child_stats.AddIn(child->DeltaCPUTime(), child->DeltaMemory());
|
child_stats.AddIn(child->DeltaCPUTime(), child->DeltaMemory());
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScriptProfile::Report(FILE* f) const
|
void ScriptProfile::Report(FILE* f, bool with_traces) const
|
||||||
{
|
{
|
||||||
std::string l;
|
std::string l;
|
||||||
|
|
||||||
|
@ -46,10 +46,50 @@ void ScriptProfile::Report(FILE* f) const
|
||||||
l = std::string(loc.filename) + ":" + std::to_string(loc.first_line);
|
l = std::string(loc.filename) + ":" + std::to_string(loc.first_line);
|
||||||
|
|
||||||
std::string ftype = is_BiF ? "BiF" : func->GetType()->FlavorString();
|
std::string ftype = is_BiF ? "BiF" : func->GetType()->FlavorString();
|
||||||
|
std::string call_stacks;
|
||||||
|
|
||||||
fprintf(f, "%s\t%s\t%s\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%" PRIu64 "\n", Name().c_str(),
|
if ( with_traces )
|
||||||
|
{
|
||||||
|
std::string calls, counts, cpu, memory;
|
||||||
|
|
||||||
|
for ( const auto& [s, stats] : Stacks() )
|
||||||
|
{
|
||||||
|
calls += util::fmt("%s|", s.c_str());
|
||||||
|
counts += util::fmt("%d|", stats.call_count);
|
||||||
|
cpu += util::fmt("%f|", stats.cpu_time);
|
||||||
|
memory += util::fmt("%llu|", stats.memory);
|
||||||
|
}
|
||||||
|
|
||||||
|
calls.pop_back();
|
||||||
|
counts.pop_back();
|
||||||
|
cpu.pop_back();
|
||||||
|
memory.pop_back();
|
||||||
|
|
||||||
|
call_stacks = util::fmt("\t%s\t%s\t%s\t%s", calls.c_str(), counts.c_str(), cpu.c_str(),
|
||||||
|
memory.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(f, "%s\t%s\t%s\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%" PRIu64 "\t%s\n", Name().c_str(),
|
||||||
l.c_str(), ftype.c_str(), NumCalls(), CPUTime(), child_stats.CPUTime(), Memory(),
|
l.c_str(), ftype.c_str(), NumCalls(), CPUTime(), child_stats.CPUTime(), Memory(),
|
||||||
child_stats.Memory());
|
child_stats.Memory(), call_stacks.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ScriptProfileStats::AddIn(const ScriptProfileStats* eps, bool bump_num_calls,
|
||||||
|
const std::string& stack)
|
||||||
|
{
|
||||||
|
if ( bump_num_calls )
|
||||||
|
ncalls += eps->NumCalls();
|
||||||
|
|
||||||
|
CPU_time += eps->CPUTime();
|
||||||
|
memory += eps->Memory();
|
||||||
|
|
||||||
|
if ( ! stack.empty() )
|
||||||
|
{
|
||||||
|
auto& data = stacks[stack];
|
||||||
|
data.call_count++;
|
||||||
|
data.cpu_time += eps->CPUTime();
|
||||||
|
data.memory += eps->Memory();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ScriptProfileMgr::ScriptProfileMgr(FILE* _f) : f(_f), non_scripts()
|
ScriptProfileMgr::ScriptProfileMgr(FILE* _f) : f(_f), non_scripts()
|
||||||
|
@ -67,14 +107,28 @@ ScriptProfileMgr::~ScriptProfileMgr()
|
||||||
ScriptProfileStats BiF_stats;
|
ScriptProfileStats BiF_stats;
|
||||||
std::unordered_map<const Func*, ScriptProfileStats> func_stats;
|
std::unordered_map<const Func*, ScriptProfileStats> func_stats;
|
||||||
|
|
||||||
|
std::string call_stack_header;
|
||||||
|
std::string call_stack_types;
|
||||||
|
std::string call_stack_nulls;
|
||||||
|
|
||||||
|
if ( with_traces )
|
||||||
|
{
|
||||||
|
call_stack_header = "\tstacks\tstack_calls\tstack_CPU\tstack_memory";
|
||||||
|
call_stack_types = "\tstring\tstring\tstring\tstring";
|
||||||
|
call_stack_nulls = "\t-\t-\t-\t-";
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(f,
|
fprintf(f,
|
||||||
"#fields\tfunction\tlocation\ttype\tncall\ttot_CPU\tchild_CPU\ttot_Mem\tchild_Mem\n");
|
"#fields\tfunction\tlocation\ttype\tncall\ttot_CPU\tchild_CPU\ttot_Mem\tchild_"
|
||||||
fprintf(f, "#types\tstring\tstring\tstring\tcount\tinterval\tinterval\tcount\tcount\n");
|
"Mem%s\n",
|
||||||
|
call_stack_header.c_str());
|
||||||
|
fprintf(f, "#types\tstring\tstring\tstring\tcount\tinterval\tinterval\tcount\tcount%s\n",
|
||||||
|
call_stack_types.c_str());
|
||||||
|
|
||||||
for ( auto o : objs )
|
for ( auto o : objs )
|
||||||
{
|
{
|
||||||
auto p = profiles[o].get();
|
auto p = profiles[o].get();
|
||||||
profiles[o]->Report(f);
|
profiles[o]->Report(f, with_traces);
|
||||||
|
|
||||||
total_stats.AddInstance();
|
total_stats.AddInstance();
|
||||||
total_stats.AddIn(p);
|
total_stats.AddIn(p);
|
||||||
|
@ -102,21 +156,22 @@ ScriptProfileMgr::~ScriptProfileMgr()
|
||||||
auto& fp = fs.second;
|
auto& fp = fs.second;
|
||||||
auto n = func->GetBodies().size();
|
auto n = func->GetBodies().size();
|
||||||
if ( n > 1 )
|
if ( n > 1 )
|
||||||
fprintf(f, "%s\t%zu-locations\t%s\t%d\t%.06f\t%0.6f\t%" PRIu64 "\t%lld\n",
|
fprintf(f, "%s\t%zu-locations\t%s\t%d\t%.06f\t%0.6f\t%" PRIu64 "\t%lld%s\n",
|
||||||
fp.Name().c_str(), n, func->GetType()->FlavorString().c_str(), fp.NumCalls(),
|
fp.Name().c_str(), n, func->GetType()->FlavorString().c_str(), fp.NumCalls(),
|
||||||
fp.CPUTime(), 0.0, fp.Memory(), 0LL);
|
fp.CPUTime(), 0.0, fp.Memory(), 0LL, call_stack_nulls.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(f, "all-BiFs\t%d-locations\tBiF\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%lld\n",
|
fprintf(f, "all-BiFs\t%d-locations\tBiF\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%lld%s\n",
|
||||||
BiF_stats.NumInstances(), BiF_stats.NumCalls(), BiF_stats.CPUTime(), 0.0,
|
BiF_stats.NumInstances(), BiF_stats.NumCalls(), BiF_stats.CPUTime(), 0.0,
|
||||||
BiF_stats.Memory(), 0LL);
|
BiF_stats.Memory(), 0LL, call_stack_nulls.c_str());
|
||||||
|
|
||||||
fprintf(f, "total\t%d-locations\tTOTAL\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%lld\n",
|
fprintf(f, "total\t%d-locations\tTOTAL\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%lld%s\n",
|
||||||
total_stats.NumInstances(), total_stats.NumCalls(), total_stats.CPUTime(), 0.0,
|
total_stats.NumInstances(), total_stats.NumCalls(), total_stats.CPUTime(), 0.0,
|
||||||
total_stats.Memory(), 0LL);
|
total_stats.Memory(), 0LL, call_stack_nulls.c_str());
|
||||||
|
|
||||||
fprintf(f, "non-scripts\t<no-location>\tTOTAL\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%lld\n",
|
fprintf(f, "non-scripts\t<no-location>\tTOTAL\t%d\t%.06f\t%.06f\t%" PRIu64 "\t%lld%s\n",
|
||||||
non_scripts.NumCalls(), non_scripts.CPUTime(), 0.0, non_scripts.Memory(), 0LL);
|
non_scripts.NumCalls(), non_scripts.CPUTime(), 0.0, non_scripts.Memory(), 0LL,
|
||||||
|
call_stack_nulls.c_str());
|
||||||
|
|
||||||
if ( f != stdout )
|
if ( f != stdout )
|
||||||
fclose(f);
|
fclose(f);
|
||||||
|
@ -152,9 +207,17 @@ void ScriptProfileMgr::EndInvocation()
|
||||||
{
|
{
|
||||||
ASSERT(! call_stack.empty());
|
ASSERT(! call_stack.empty());
|
||||||
auto ep = call_stack.back();
|
auto ep = call_stack.back();
|
||||||
|
|
||||||
call_stack.pop_back();
|
call_stack.pop_back();
|
||||||
|
|
||||||
ep->EndActivation();
|
std::string stack_string = ep->Name();
|
||||||
|
for ( const auto& sep : call_stack )
|
||||||
|
{
|
||||||
|
stack_string.append(";");
|
||||||
|
stack_string.append(sep->Name());
|
||||||
|
}
|
||||||
|
|
||||||
|
ep->EndActivation(stack_string);
|
||||||
|
|
||||||
if ( call_stack.empty() )
|
if ( call_stack.empty() )
|
||||||
non_scripts.StartActivation();
|
non_scripts.StartActivation();
|
||||||
|
@ -169,7 +232,7 @@ std::unique_ptr<ScriptProfileMgr> spm;
|
||||||
|
|
||||||
} // namespace zeek::detail
|
} // namespace zeek::detail
|
||||||
|
|
||||||
void activate_script_profiling(const char* fn)
|
void activate_script_profiling(const char* fn, bool with_traces)
|
||||||
{
|
{
|
||||||
FILE* f;
|
FILE* f;
|
||||||
|
|
||||||
|
@ -186,6 +249,9 @@ void activate_script_profiling(const char* fn)
|
||||||
f = stdout;
|
f = stdout;
|
||||||
|
|
||||||
detail::spm = std::make_unique<detail::ScriptProfileMgr>(f);
|
detail::spm = std::make_unique<detail::ScriptProfileMgr>(f);
|
||||||
|
|
||||||
|
if ( with_traces )
|
||||||
|
detail::spm->EnableTraces();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace zeek
|
} // namespace zeek
|
||||||
|
|
|
@ -22,6 +22,13 @@ namespace detail
|
||||||
class ScriptProfileStats
|
class ScriptProfileStats
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
struct StackData
|
||||||
|
{
|
||||||
|
int call_count = 0;
|
||||||
|
double cpu_time = 0.0;
|
||||||
|
uint64_t memory = 0;
|
||||||
|
};
|
||||||
|
|
||||||
ScriptProfileStats() = default;
|
ScriptProfileStats() = default;
|
||||||
ScriptProfileStats(std::string arg_name) : name(std::move(arg_name)) { }
|
ScriptProfileStats(std::string arg_name) : name(std::move(arg_name)) { }
|
||||||
|
|
||||||
|
@ -46,19 +53,16 @@ public:
|
||||||
double CPUTime() const { return CPU_time; }
|
double CPUTime() const { return CPU_time; }
|
||||||
uint64_t Memory() const { return memory; }
|
uint64_t Memory() const { return memory; }
|
||||||
|
|
||||||
|
// Stacks for all of the calls plus counters.
|
||||||
|
std::unordered_map<std::string, StackData> Stacks() const { return stacks; }
|
||||||
|
|
||||||
// Used to count instances in an aggregate.
|
// Used to count instances in an aggregate.
|
||||||
void AddInstance() { ++ninstances; }
|
void AddInstance() { ++ninstances; }
|
||||||
|
|
||||||
// Fold into this profile another profile. Second argument controls
|
// Fold into this profile another profile. Second argument controls
|
||||||
// whether the folding should include increasing the number of calls.
|
// whether the folding should include increasing the number of calls.
|
||||||
void AddIn(const ScriptProfileStats* eps, bool bump_num_calls = true)
|
void AddIn(const ScriptProfileStats* eps, bool bump_num_calls = true,
|
||||||
{
|
const std::string& stack = "");
|
||||||
if ( bump_num_calls )
|
|
||||||
ncalls += eps->NumCalls();
|
|
||||||
|
|
||||||
CPU_time += eps->CPUTime();
|
|
||||||
memory += eps->Memory();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accumulate a single instance of CPU & memory usage.
|
// Accumulate a single instance of CPU & memory usage.
|
||||||
void AddIn(double delta_CPU_time, uint64_t delta_memory)
|
void AddIn(double delta_CPU_time, uint64_t delta_memory)
|
||||||
|
@ -83,6 +87,7 @@ private:
|
||||||
int ncalls = 0;
|
int ncalls = 0;
|
||||||
double CPU_time = 0.0;
|
double CPU_time = 0.0;
|
||||||
uint64_t memory = 0;
|
uint64_t memory = 0;
|
||||||
|
std::unordered_map<std::string, StackData> stacks;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Manages all of the profile instances associated with a given script.
|
// Manages all of the profile instances associated with a given script.
|
||||||
|
@ -111,7 +116,7 @@ public:
|
||||||
|
|
||||||
// Called to register the beginning/end of an execution instance.
|
// Called to register the beginning/end of an execution instance.
|
||||||
void StartActivation();
|
void StartActivation();
|
||||||
void EndActivation();
|
void EndActivation(const std::string& stack = "");
|
||||||
|
|
||||||
// Called when a child instance finishes.
|
// Called when a child instance finishes.
|
||||||
void ChildFinished(const ScriptProfile* child);
|
void ChildFinished(const ScriptProfile* child);
|
||||||
|
@ -121,7 +126,7 @@ public:
|
||||||
uint64_t DeltaMemory() const { return delta_stats.Memory(); }
|
uint64_t DeltaMemory() const { return delta_stats.Memory(); }
|
||||||
|
|
||||||
// Write the profile to the given file.
|
// Write the profile to the given file.
|
||||||
void Report(FILE* f) const;
|
void Report(FILE* f, bool with_traces) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// We store "func" as a FuncPtr to ensure it sticks around when
|
// We store "func" as a FuncPtr to ensure it sticks around when
|
||||||
|
@ -157,6 +162,8 @@ public:
|
||||||
void StartInvocation(const Func* f, const detail::StmtPtr& body = nullptr);
|
void StartInvocation(const Func* f, const detail::StmtPtr& body = nullptr);
|
||||||
void EndInvocation();
|
void EndInvocation();
|
||||||
|
|
||||||
|
void EnableTraces() { with_traces = true; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
FILE* f; // where to write the profile
|
FILE* f; // where to write the profile
|
||||||
|
|
||||||
|
@ -178,6 +185,8 @@ private:
|
||||||
// Tracks the objects encountered. Used to generate a consistent
|
// Tracks the objects encountered. Used to generate a consistent
|
||||||
// and more natural printing order.
|
// and more natural printing order.
|
||||||
std::vector<const Obj*> objs;
|
std::vector<const Obj*> objs;
|
||||||
|
|
||||||
|
bool with_traces = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
// If non-nil, script profiling is active.
|
// If non-nil, script profiling is active.
|
||||||
|
@ -187,6 +196,6 @@ extern std::unique_ptr<ScriptProfileMgr> spm;
|
||||||
|
|
||||||
// Called to turn on script profiling to the given file. If nil, writes
|
// Called to turn on script profiling to the given file. If nil, writes
|
||||||
// the profile to stdout.
|
// the profile to stdout.
|
||||||
extern void activate_script_profiling(const char* fn);
|
extern void activate_script_profiling(const char* fn, bool with_traces);
|
||||||
|
|
||||||
} // namespace zeek
|
} // namespace zeek
|
||||||
|
|
|
@ -12,6 +12,8 @@ namespace zeek::detail
|
||||||
class BreakNextScriptValidation : public TraversalCallback
|
class BreakNextScriptValidation : public TraversalCallback
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
BreakNextScriptValidation(bool _report) : report(_report) { }
|
||||||
|
|
||||||
TraversalCode PreStmt(const Stmt* stmt)
|
TraversalCode PreStmt(const Stmt* stmt)
|
||||||
{
|
{
|
||||||
if ( ! StmtIsRelevant(stmt) )
|
if ( ! StmtIsRelevant(stmt) )
|
||||||
|
@ -20,21 +22,13 @@ public:
|
||||||
stmt_depths[stmt->Tag()] += 1;
|
stmt_depths[stmt->Tag()] += 1;
|
||||||
|
|
||||||
if ( stmt->Tag() == STMT_BREAK && ! BreakStmtIsValid() )
|
if ( stmt->Tag() == STMT_BREAK && ! BreakStmtIsValid() )
|
||||||
{
|
Report(stmt, "break statement used outside of for, while or "
|
||||||
zeek::reporter->PushLocation(stmt->GetLocationInfo());
|
|
||||||
zeek::reporter->Warning("break statement used outside of for, while or "
|
|
||||||
"switch statement and not within a hook. "
|
"switch statement and not within a hook. "
|
||||||
"With v6.1 this will become an error.");
|
"With v6.1 this will become an error.");
|
||||||
zeek::reporter->PopLocation();
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( stmt->Tag() == STMT_NEXT && ! NextStmtIsValid() )
|
if ( stmt->Tag() == STMT_NEXT && ! NextStmtIsValid() )
|
||||||
{
|
Report(stmt, "next statement used outside of for or while statement. "
|
||||||
zeek::reporter->PushLocation(stmt->GetLocationInfo());
|
|
||||||
zeek::reporter->Warning("next statement used outside of for or while statement. "
|
|
||||||
"With v6.1 this will become an error.");
|
"With v6.1 this will become an error.");
|
||||||
zeek::reporter->PopLocation();
|
|
||||||
}
|
|
||||||
|
|
||||||
return TC_CONTINUE;
|
return TC_CONTINUE;
|
||||||
}
|
}
|
||||||
|
@ -71,6 +65,8 @@ public:
|
||||||
return TC_CONTINUE;
|
return TC_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsValid() const { return valid_script; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool StmtIsRelevant(const Stmt* stmt)
|
bool StmtIsRelevant(const Stmt* stmt)
|
||||||
{
|
{
|
||||||
|
@ -87,13 +83,35 @@ private:
|
||||||
|
|
||||||
bool NextStmtIsValid() { return stmt_depths[STMT_FOR] > 0 || stmt_depths[STMT_WHILE] > 0; }
|
bool NextStmtIsValid() { return stmt_depths[STMT_FOR] > 0 || stmt_depths[STMT_WHILE] > 0; }
|
||||||
|
|
||||||
|
void Report(const Stmt* stmt, const char* msg)
|
||||||
|
{
|
||||||
|
if ( report )
|
||||||
|
{
|
||||||
|
zeek::reporter->PushLocation(stmt->GetLocationInfo());
|
||||||
|
zeek::reporter->Warning("%s", msg);
|
||||||
|
zeek::reporter->PopLocation();
|
||||||
|
}
|
||||||
|
|
||||||
|
valid_script = false;
|
||||||
|
}
|
||||||
|
|
||||||
std::unordered_map<StmtTag, int> stmt_depths;
|
std::unordered_map<StmtTag, int> stmt_depths;
|
||||||
int hook_depth = 0;
|
int hook_depth = 0;
|
||||||
|
bool report; // whether to report problems via "reporter"
|
||||||
|
bool valid_script = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
void script_validation()
|
void script_validation()
|
||||||
{
|
{
|
||||||
zeek::detail::BreakNextScriptValidation bn_cb;
|
BreakNextScriptValidation bn_cb(true);
|
||||||
zeek::detail::traverse_all(&bn_cb);
|
traverse_all(&bn_cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool script_is_valid(const Stmt* stmt)
|
||||||
|
{
|
||||||
|
BreakNextScriptValidation bn_cb(false);
|
||||||
|
stmt->Traverse(&bn_cb);
|
||||||
|
return bn_cb.IsValid();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,9 +4,17 @@
|
||||||
namespace zeek::detail
|
namespace zeek::detail
|
||||||
{
|
{
|
||||||
|
|
||||||
|
class Stmt;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Run extra validations on the parsed AST after everything is initialized
|
* Run extra validations on the parsed AST after everything is initialized
|
||||||
* and report any errors via zeek::reporter->Error().
|
* and report any errors via deprecation warnings or zeek::reporter->Error().
|
||||||
*/
|
*/
|
||||||
void script_validation();
|
void script_validation();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the given script statement (body) is valid.
|
||||||
|
*/
|
||||||
|
bool script_is_valid(const Stmt* s);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
136
src/Stmt.cc
136
src/Stmt.cc
|
@ -1867,7 +1867,8 @@ TraversalCode NullStmt::Traverse(TraversalCallback* cb) const
|
||||||
WhenInfo::WhenInfo(ExprPtr arg_cond, FuncType::CaptureList* arg_cl, bool arg_is_return)
|
WhenInfo::WhenInfo(ExprPtr arg_cond, FuncType::CaptureList* arg_cl, bool arg_is_return)
|
||||||
: cond(std::move(arg_cond)), cl(arg_cl), is_return(arg_is_return)
|
: cond(std::move(arg_cond)), cl(arg_cl), is_return(arg_is_return)
|
||||||
{
|
{
|
||||||
prior_vars = current_scope()->Vars();
|
if ( ! cl )
|
||||||
|
cl = new zeek::FuncType::CaptureList;
|
||||||
|
|
||||||
ProfileFunc cond_pf(cond.get());
|
ProfileFunc cond_pf(cond.get());
|
||||||
|
|
||||||
|
@ -1881,8 +1882,6 @@ WhenInfo::WhenInfo(ExprPtr arg_cond, FuncType::CaptureList* arg_cl, bool arg_is_
|
||||||
{
|
{
|
||||||
bool is_present = false;
|
bool is_present = false;
|
||||||
|
|
||||||
if ( cl )
|
|
||||||
{
|
|
||||||
for ( auto& c : *cl )
|
for ( auto& c : *cl )
|
||||||
if ( c.id == wl )
|
if ( c.id == wl )
|
||||||
{
|
{
|
||||||
|
@ -1895,7 +1894,6 @@ WhenInfo::WhenInfo(ExprPtr arg_cond, FuncType::CaptureList* arg_cl, bool arg_is_
|
||||||
IDPtr wl_ptr = {NewRef{}, const_cast<ID*>(wl)};
|
IDPtr wl_ptr = {NewRef{}, const_cast<ID*>(wl)};
|
||||||
cl->emplace_back(FuncType::Capture{wl_ptr, false});
|
cl->emplace_back(FuncType::Capture{wl_ptr, false});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// In addition, don't treat them as external locals that
|
// In addition, don't treat them as external locals that
|
||||||
// existed at the onset.
|
// existed at the onset.
|
||||||
|
@ -1926,33 +1924,12 @@ WhenInfo::WhenInfo(ExprPtr arg_cond, FuncType::CaptureList* arg_cl, bool arg_is_
|
||||||
|
|
||||||
WhenInfo::WhenInfo(bool arg_is_return) : is_return(arg_is_return)
|
WhenInfo::WhenInfo(bool arg_is_return) : is_return(arg_is_return)
|
||||||
{
|
{
|
||||||
// This won't be needed once we remove the deprecated semantics.
|
|
||||||
cl = new zeek::FuncType::CaptureList;
|
cl = new zeek::FuncType::CaptureList;
|
||||||
BuildInvokeElems();
|
BuildInvokeElems();
|
||||||
}
|
}
|
||||||
|
|
||||||
void WhenInfo::Build(StmtPtr ws)
|
void WhenInfo::Build(StmtPtr ws)
|
||||||
{
|
{
|
||||||
if ( IsDeprecatedSemantics(ws) )
|
|
||||||
{
|
|
||||||
merge_top_scope();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( ! cl )
|
|
||||||
{
|
|
||||||
// This instance is compatible with new-style semantics,
|
|
||||||
// so create a capture list for it and populate with any
|
|
||||||
// when-locals.
|
|
||||||
cl = new zeek::FuncType::CaptureList;
|
|
||||||
|
|
||||||
for ( auto& wl : when_new_locals )
|
|
||||||
{
|
|
||||||
IDPtr wl_ptr = {NewRef{}, const_cast<ID*>(wl)};
|
|
||||||
cl->emplace_back(FuncType::Capture{wl_ptr, false});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lambda_ft->SetCaptures(*cl);
|
lambda_ft->SetCaptures(*cl);
|
||||||
|
|
||||||
// Our general strategy is to construct a single lambda (so that
|
// Our general strategy is to construct a single lambda (so that
|
||||||
|
@ -1997,38 +1974,30 @@ void WhenInfo::Build(StmtPtr ws)
|
||||||
|
|
||||||
auto shebang = make_intrusive<StmtList>(do_test, do_bodies, dummy_return);
|
auto shebang = make_intrusive<StmtList>(do_test, do_bodies, dummy_return);
|
||||||
|
|
||||||
auto ingredients = std::make_unique<function_ingredients>(current_scope(), shebang,
|
auto ingredients = std::make_unique<FunctionIngredients>(current_scope(), shebang,
|
||||||
current_module);
|
current_module);
|
||||||
auto outer_ids = gather_outer_ids(pop_scope(), ingredients->body);
|
auto outer_ids = gather_outer_ids(pop_scope(), ingredients->Body());
|
||||||
|
|
||||||
lambda = make_intrusive<LambdaExpr>(std::move(ingredients), std::move(outer_ids), ws);
|
lambda = make_intrusive<LambdaExpr>(std::move(ingredients), std::move(outer_ids), ws);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WhenInfo::Instantiate(Frame* f)
|
void WhenInfo::Instantiate(Frame* f)
|
||||||
{
|
{
|
||||||
if ( cl )
|
|
||||||
Instantiate(lambda->Eval(f));
|
Instantiate(lambda->Eval(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
void WhenInfo::Instantiate(ValPtr func)
|
void WhenInfo::Instantiate(ValPtr func)
|
||||||
{
|
{
|
||||||
if ( cl )
|
|
||||||
curr_lambda = make_intrusive<ConstExpr>(std::move(func));
|
curr_lambda = make_intrusive<ConstExpr>(std::move(func));
|
||||||
}
|
}
|
||||||
|
|
||||||
ExprPtr WhenInfo::Cond()
|
ExprPtr WhenInfo::Cond()
|
||||||
{
|
{
|
||||||
if ( ! curr_lambda )
|
|
||||||
return cond;
|
|
||||||
|
|
||||||
return make_intrusive<CallExpr>(curr_lambda, invoke_cond);
|
return make_intrusive<CallExpr>(curr_lambda, invoke_cond);
|
||||||
}
|
}
|
||||||
|
|
||||||
StmtPtr WhenInfo::WhenBody()
|
StmtPtr WhenInfo::WhenBody()
|
||||||
{
|
{
|
||||||
if ( ! curr_lambda )
|
|
||||||
return s;
|
|
||||||
|
|
||||||
auto invoke = make_intrusive<CallExpr>(curr_lambda, invoke_s);
|
auto invoke = make_intrusive<CallExpr>(curr_lambda, invoke_s);
|
||||||
return make_intrusive<ReturnStmt>(invoke, true);
|
return make_intrusive<ReturnStmt>(invoke, true);
|
||||||
}
|
}
|
||||||
|
@ -2047,61 +2016,10 @@ double WhenInfo::TimeoutVal(Frame* f)
|
||||||
|
|
||||||
StmtPtr WhenInfo::TimeoutStmt()
|
StmtPtr WhenInfo::TimeoutStmt()
|
||||||
{
|
{
|
||||||
if ( ! curr_lambda )
|
|
||||||
return timeout_s;
|
|
||||||
|
|
||||||
auto invoke = make_intrusive<CallExpr>(curr_lambda, invoke_timeout);
|
auto invoke = make_intrusive<CallExpr>(curr_lambda, invoke_timeout);
|
||||||
return make_intrusive<ReturnStmt>(invoke, true);
|
return make_intrusive<ReturnStmt>(invoke, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool WhenInfo::IsDeprecatedSemantics(StmtPtr ws)
|
|
||||||
{
|
|
||||||
if ( cl )
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// Which locals of the outer function are used in any of the "when"
|
|
||||||
// elements.
|
|
||||||
IDSet locals;
|
|
||||||
|
|
||||||
for ( auto& wl : when_new_locals )
|
|
||||||
prior_vars.erase(wl->Name());
|
|
||||||
|
|
||||||
for ( auto& bl : when_expr_locals )
|
|
||||||
if ( prior_vars.count(bl->Name()) > 0 )
|
|
||||||
locals.insert(bl);
|
|
||||||
|
|
||||||
ProfileFunc body_pf(s.get());
|
|
||||||
for ( auto& bl : body_pf.Locals() )
|
|
||||||
if ( prior_vars.count(bl->Name()) > 0 )
|
|
||||||
locals.insert(bl);
|
|
||||||
|
|
||||||
if ( timeout_s )
|
|
||||||
{
|
|
||||||
ProfileFunc to_pf(timeout_s.get());
|
|
||||||
for ( auto& tl : to_pf.Locals() )
|
|
||||||
if ( prior_vars.count(tl->Name()) > 0 )
|
|
||||||
locals.insert(tl);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( locals.empty() )
|
|
||||||
return false;
|
|
||||||
|
|
||||||
std::string vars;
|
|
||||||
for ( auto& l : locals )
|
|
||||||
{
|
|
||||||
if ( ! vars.empty() )
|
|
||||||
vars += ", ";
|
|
||||||
vars += l->Name();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string msg = util::fmt("\"when\" statement referring to locals without an "
|
|
||||||
"explicit [] capture is deprecated: %s",
|
|
||||||
vars.c_str());
|
|
||||||
ws->Warn(msg.c_str());
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void WhenInfo::BuildInvokeElems()
|
void WhenInfo::BuildInvokeElems()
|
||||||
{
|
{
|
||||||
one_const = make_intrusive<ConstExpr>(val_mgr->Count(1));
|
one_const = make_intrusive<ConstExpr>(val_mgr->Count(1));
|
||||||
|
@ -2117,12 +2035,12 @@ WhenStmt::WhenStmt(WhenInfo* arg_wi) : Stmt(STMT_WHEN), wi(arg_wi)
|
||||||
{
|
{
|
||||||
wi->Build(ThisPtr());
|
wi->Build(ThisPtr());
|
||||||
|
|
||||||
auto cond = wi->Cond();
|
auto cond = wi->OrigCond();
|
||||||
|
|
||||||
if ( ! cond->IsError() && ! IsBool(cond->GetType()->Tag()) )
|
if ( ! cond->IsError() && ! IsBool(cond->GetType()->Tag()) )
|
||||||
cond->Error("conditional in test must be boolean");
|
cond->Error("conditional in test must be boolean");
|
||||||
|
|
||||||
auto te = wi->TimeoutExpr();
|
auto te = wi->OrigTimeout();
|
||||||
|
|
||||||
if ( te )
|
if ( te )
|
||||||
{
|
{
|
||||||
|
@ -2149,8 +2067,6 @@ ValPtr WhenStmt::Exec(Frame* f, StmtFlowType& flow)
|
||||||
|
|
||||||
auto timeout = wi->TimeoutVal(f);
|
auto timeout = wi->TimeoutVal(f);
|
||||||
|
|
||||||
if ( wi->Captures() )
|
|
||||||
{
|
|
||||||
std::vector<ValPtr> local_aggrs;
|
std::vector<ValPtr> local_aggrs;
|
||||||
for ( auto& l : wi->WhenExprLocals() )
|
for ( auto& l : wi->WhenExprLocals() )
|
||||||
{
|
{
|
||||||
|
@ -2160,21 +2076,15 @@ ValPtr WhenStmt::Exec(Frame* f, StmtFlowType& flow)
|
||||||
local_aggrs.emplace_back(std::move(v));
|
local_aggrs.emplace_back(std::move(v));
|
||||||
}
|
}
|
||||||
|
|
||||||
new trigger::Trigger(wi, timeout, wi->WhenExprGlobals(), local_aggrs, f, location);
|
|
||||||
}
|
|
||||||
|
|
||||||
else
|
|
||||||
// The new trigger object will take care of its own deletion.
|
// The new trigger object will take care of its own deletion.
|
||||||
new trigger::Trigger(wi->Cond(), wi->WhenBody(), wi->TimeoutStmt(), timeout, f,
|
new trigger::Trigger(wi, timeout, wi->WhenExprGlobals(), local_aggrs, f, location);
|
||||||
wi->IsReturn(), location);
|
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool WhenStmt::IsPure() const
|
bool WhenStmt::IsPure() const
|
||||||
{
|
{
|
||||||
return wi->Cond()->IsPure() && wi->WhenBody()->IsPure() &&
|
return false;
|
||||||
(! wi->TimeoutStmt() || wi->TimeoutStmt()->IsPure());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void WhenStmt::StmtDescribe(ODesc* d) const
|
void WhenStmt::StmtDescribe(ODesc* d) const
|
||||||
|
@ -2184,35 +2094,35 @@ void WhenStmt::StmtDescribe(ODesc* d) const
|
||||||
if ( d->IsReadable() )
|
if ( d->IsReadable() )
|
||||||
d->Add("(");
|
d->Add("(");
|
||||||
|
|
||||||
wi->Cond()->Describe(d);
|
wi->OrigCond()->Describe(d);
|
||||||
|
|
||||||
if ( d->IsReadable() )
|
if ( d->IsReadable() )
|
||||||
d->Add(")");
|
d->Add(")");
|
||||||
|
|
||||||
d->SP();
|
d->SP();
|
||||||
d->PushIndent();
|
d->PushIndent();
|
||||||
wi->WhenBody()->AccessStats(d);
|
wi->OrigBody()->AccessStats(d);
|
||||||
wi->WhenBody()->Describe(d);
|
wi->OrigBody()->Describe(d);
|
||||||
d->PopIndent();
|
d->PopIndent();
|
||||||
|
|
||||||
if ( wi->TimeoutExpr() )
|
if ( wi->OrigTimeout() )
|
||||||
{
|
{
|
||||||
if ( d->IsReadable() )
|
if ( d->IsReadable() )
|
||||||
{
|
{
|
||||||
d->SP();
|
d->SP();
|
||||||
d->Add("timeout");
|
d->Add("timeout");
|
||||||
d->SP();
|
d->SP();
|
||||||
wi->TimeoutExpr()->Describe(d);
|
wi->OrigTimeout()->Describe(d);
|
||||||
d->SP();
|
d->SP();
|
||||||
d->PushIndent();
|
d->PushIndent();
|
||||||
wi->TimeoutStmt()->AccessStats(d);
|
wi->OrigTimeoutStmt()->AccessStats(d);
|
||||||
wi->TimeoutStmt()->Describe(d);
|
wi->OrigTimeoutStmt()->Describe(d);
|
||||||
d->PopIndent();
|
d->PopIndent();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
wi->TimeoutExpr()->Describe(d);
|
wi->OrigTimeout()->Describe(d);
|
||||||
wi->TimeoutStmt()->Describe(d);
|
wi->OrigTimeoutStmt()->Describe(d);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2232,22 +2142,22 @@ TraversalCode WhenStmt::Traverse(TraversalCallback* cb) const
|
||||||
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
tc = wi->Cond()->Traverse(cb);
|
tc = wi->OrigCond()->Traverse(cb);
|
||||||
HANDLE_TC_STMT_PRE(tc);
|
HANDLE_TC_STMT_PRE(tc);
|
||||||
|
|
||||||
tc = wi->WhenBody()->Traverse(cb);
|
tc = wi->OrigBody()->Traverse(cb);
|
||||||
HANDLE_TC_STMT_PRE(tc);
|
HANDLE_TC_STMT_PRE(tc);
|
||||||
|
|
||||||
if ( wi->TimeoutStmt() )
|
if ( wi->OrigTimeoutStmt() )
|
||||||
{
|
{
|
||||||
tc = wi->TimeoutStmt()->Traverse(cb);
|
tc = wi->OrigTimeoutStmt()->Traverse(cb);
|
||||||
HANDLE_TC_STMT_PRE(tc);
|
HANDLE_TC_STMT_PRE(tc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( wi->TimeoutExpr() )
|
if ( wi->OrigTimeout() )
|
||||||
{
|
{
|
||||||
tc = wi->TimeoutExpr()->Traverse(cb);
|
tc = wi->OrigTimeout()->Traverse(cb);
|
||||||
HANDLE_TC_STMT_PRE(tc);
|
HANDLE_TC_STMT_PRE(tc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
29
src/Stmt.h
29
src/Stmt.h
|
@ -550,8 +550,7 @@ private:
|
||||||
class WhenInfo
|
class WhenInfo
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
// Takes ownership of the CaptureList, which if nil signifies
|
// Takes ownership of the CaptureList.
|
||||||
// old-style frame semantics.
|
|
||||||
WhenInfo(ExprPtr cond, FuncType::CaptureList* cl, bool is_return);
|
WhenInfo(ExprPtr cond, FuncType::CaptureList* cl, bool is_return);
|
||||||
|
|
||||||
// Constructor used by script optimization to create a stub.
|
// Constructor used by script optimization to create a stub.
|
||||||
|
@ -582,18 +581,20 @@ public:
|
||||||
void Instantiate(Frame* f);
|
void Instantiate(Frame* f);
|
||||||
void Instantiate(ValPtr func);
|
void Instantiate(ValPtr func);
|
||||||
|
|
||||||
// For old-style semantics, the following simply return the
|
// Return the original components used to construct the "when".
|
||||||
// individual "when" components. For capture semantics, however,
|
const ExprPtr& OrigCond() const { return cond; }
|
||||||
// these instead return different invocations of a lambda that
|
const StmtPtr& OrigBody() const { return s; }
|
||||||
// manages the captures.
|
const ExprPtr& OrigTimeout() const { return timeout; }
|
||||||
|
const StmtPtr& OrigTimeoutStmt() const { return timeout_s; }
|
||||||
|
|
||||||
|
// Return different invocations of a lambda that manages the captures.
|
||||||
ExprPtr Cond();
|
ExprPtr Cond();
|
||||||
StmtPtr WhenBody();
|
StmtPtr WhenBody();
|
||||||
|
StmtPtr TimeoutStmt();
|
||||||
|
|
||||||
ExprPtr TimeoutExpr() const { return timeout; }
|
ExprPtr TimeoutExpr() const { return timeout; }
|
||||||
double TimeoutVal(Frame* f);
|
double TimeoutVal(Frame* f);
|
||||||
|
|
||||||
StmtPtr TimeoutStmt();
|
|
||||||
|
|
||||||
FuncType::CaptureList* Captures() { return cl; }
|
FuncType::CaptureList* Captures() { return cl; }
|
||||||
|
|
||||||
bool IsReturn() const { return is_return; }
|
bool IsReturn() const { return is_return; }
|
||||||
|
@ -605,19 +606,13 @@ public:
|
||||||
const IDSet& WhenExprGlobals() const { return when_expr_globals; }
|
const IDSet& WhenExprGlobals() const { return when_expr_globals; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// True if the "when" statement corresponds to old-style deprecated
|
|
||||||
// semantics (no captures, but needing captures). Also generates
|
|
||||||
// the corresponding deprecation warnings, which are associated
|
|
||||||
// with "ws".
|
|
||||||
bool IsDeprecatedSemantics(StmtPtr ws);
|
|
||||||
|
|
||||||
// Build those elements we'll need for invoking our lambda.
|
// Build those elements we'll need for invoking our lambda.
|
||||||
void BuildInvokeElems();
|
void BuildInvokeElems();
|
||||||
|
|
||||||
ExprPtr cond;
|
ExprPtr cond;
|
||||||
StmtPtr s;
|
StmtPtr s;
|
||||||
ExprPtr timeout;
|
|
||||||
StmtPtr timeout_s;
|
StmtPtr timeout_s;
|
||||||
|
ExprPtr timeout;
|
||||||
FuncType::CaptureList* cl;
|
FuncType::CaptureList* cl;
|
||||||
|
|
||||||
bool is_return = false;
|
bool is_return = false;
|
||||||
|
@ -649,10 +644,6 @@ private:
|
||||||
|
|
||||||
// Locals introduced via "local" in the "when" clause itself.
|
// Locals introduced via "local" in the "when" clause itself.
|
||||||
IDSet when_new_locals;
|
IDSet when_new_locals;
|
||||||
|
|
||||||
// Used for identifying deprecated instances. Holds all of the local
|
|
||||||
// variables in the scope prior to parsing the "when" statement.
|
|
||||||
std::map<std::string, IDPtr, std::less<>> prior_vars;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class WhenStmt final : public Stmt
|
class WhenStmt final : public Stmt
|
||||||
|
|
|
@ -93,14 +93,6 @@ int TimerMgr::Advance(double arg_t, int max_expire)
|
||||||
|
|
||||||
void TimerMgr::Process()
|
void TimerMgr::Process()
|
||||||
{
|
{
|
||||||
// If we don't have a source, or the source is closed, or we're reading live (which includes
|
|
||||||
// pseudo-realtime), advance the timer here to the current time since otherwise it won't
|
|
||||||
// move forward and the timers won't fire correctly.
|
|
||||||
iosource::PktSrc* pkt_src = iosource_mgr->GetPktSrc();
|
|
||||||
if ( ! pkt_src || ! pkt_src->IsOpen() || run_state::reading_live ||
|
|
||||||
run_state::is_processing_suspended() )
|
|
||||||
run_state::detail::update_network_time(util::current_time());
|
|
||||||
|
|
||||||
// Just advance the timer manager based on the current network time. This won't actually
|
// Just advance the timer manager based on the current network time. This won't actually
|
||||||
// change the time, but will dispatch any timers that need dispatching.
|
// change the time, but will dispatch any timers that need dispatching.
|
||||||
run_state::current_dispatched += Advance(run_state::network_time,
|
run_state::current_dispatched += Advance(run_state::network_time,
|
||||||
|
|
|
@ -99,13 +99,6 @@ protected:
|
||||||
double time;
|
double time;
|
||||||
};
|
};
|
||||||
|
|
||||||
Trigger::Trigger(ExprPtr cond, StmtPtr body, StmtPtr timeout_stmts, double timeout, Frame* frame,
|
|
||||||
bool is_return, const Location* location)
|
|
||||||
{
|
|
||||||
timeout_value = timeout;
|
|
||||||
Init(cond, body, timeout_stmts, frame, is_return, location);
|
|
||||||
}
|
|
||||||
|
|
||||||
Trigger::Trigger(WhenInfo* wi, double timeout, const IDSet& _globals,
|
Trigger::Trigger(WhenInfo* wi, double timeout, const IDSet& _globals,
|
||||||
std::vector<ValPtr> _local_aggrs, Frame* f, const Location* loc)
|
std::vector<ValPtr> _local_aggrs, Frame* f, const Location* loc)
|
||||||
{
|
{
|
||||||
|
@ -114,34 +107,29 @@ Trigger::Trigger(WhenInfo* wi, double timeout, const IDSet& _globals,
|
||||||
local_aggrs = std::move(_local_aggrs);
|
local_aggrs = std::move(_local_aggrs);
|
||||||
have_trigger_elems = true;
|
have_trigger_elems = true;
|
||||||
|
|
||||||
Init(wi->Cond(), wi->WhenBody(), wi->TimeoutStmt(), f, wi->IsReturn(), loc);
|
cond = wi->Cond();
|
||||||
}
|
body = wi->WhenBody();
|
||||||
|
timeout_stmts = wi->TimeoutStmt();
|
||||||
|
is_return = wi->IsReturn();
|
||||||
|
|
||||||
void Trigger::Init(ExprPtr arg_cond, StmtPtr arg_body, StmtPtr arg_timeout_stmts, Frame* arg_frame,
|
|
||||||
bool arg_is_return, const Location* location)
|
|
||||||
{
|
|
||||||
cond = arg_cond;
|
|
||||||
body = arg_body;
|
|
||||||
timeout_stmts = arg_timeout_stmts;
|
|
||||||
timer = nullptr;
|
timer = nullptr;
|
||||||
delayed = false;
|
delayed = false;
|
||||||
disabled = false;
|
disabled = false;
|
||||||
attached = nullptr;
|
attached = nullptr;
|
||||||
is_return = arg_is_return;
|
|
||||||
|
|
||||||
if ( location )
|
if ( location )
|
||||||
name = util::fmt("%s:%d-%d", location->filename, location->first_line, location->last_line);
|
name = util::fmt("%s:%d-%d", location->filename, location->first_line, location->last_line);
|
||||||
else
|
else
|
||||||
name = "<no-trigger-location>";
|
name = "<no-trigger-location>";
|
||||||
|
|
||||||
if ( arg_frame )
|
if ( f )
|
||||||
frame = arg_frame->Clone();
|
frame = f->CloneForTrigger();
|
||||||
else
|
else
|
||||||
frame = nullptr;
|
frame = nullptr;
|
||||||
|
|
||||||
DBG_LOG(DBG_NOTIFIERS, "%s: instantiating", Name());
|
DBG_LOG(DBG_NOTIFIERS, "%s: instantiating", Name());
|
||||||
|
|
||||||
if ( is_return && frame && arg_frame )
|
if ( is_return && frame )
|
||||||
{
|
{
|
||||||
Trigger* parent = frame->GetTrigger();
|
Trigger* parent = frame->GetTrigger();
|
||||||
if ( ! parent )
|
if ( ! parent )
|
||||||
|
@ -152,7 +140,7 @@ void Trigger::Init(ExprPtr arg_cond, StmtPtr arg_body, StmtPtr arg_timeout_stmts
|
||||||
}
|
}
|
||||||
|
|
||||||
parent->Attach(this);
|
parent->Attach(this);
|
||||||
arg_frame->SetDelayed();
|
f->SetDelayed();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we don't get deleted if somebody calls a method like
|
// Make sure we don't get deleted if somebody calls a method like
|
||||||
|
@ -262,7 +250,7 @@ bool Trigger::Eval()
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
f = frame->Clone();
|
f = frame->CloneForTrigger();
|
||||||
}
|
}
|
||||||
catch ( InterpreterException& )
|
catch ( InterpreterException& )
|
||||||
{
|
{
|
||||||
|
@ -364,7 +352,7 @@ void Trigger::Timeout()
|
||||||
if ( timeout_stmts )
|
if ( timeout_stmts )
|
||||||
{
|
{
|
||||||
StmtFlowType flow;
|
StmtFlowType flow;
|
||||||
FramePtr f{AdoptRef{}, frame->Clone()};
|
FramePtr f{AdoptRef{}, frame->CloneForTrigger()};
|
||||||
ValPtr v;
|
ValPtr v;
|
||||||
|
|
||||||
try
|
try
|
||||||
|
|
|
@ -48,11 +48,6 @@ public:
|
||||||
// statements are executed immediately and the object is deleted
|
// statements are executed immediately and the object is deleted
|
||||||
// right away.
|
// right away.
|
||||||
|
|
||||||
// These first constructor is for the deprecated deep-copy semantics.
|
|
||||||
Trigger(ExprPtr cond, StmtPtr body, StmtPtr timeout_stmts, double timeout, Frame* f,
|
|
||||||
bool is_return, const Location* loc);
|
|
||||||
|
|
||||||
// Used for capture-list semantics.
|
|
||||||
Trigger(WhenInfo* wi, double timeout, const IDSet& globals, std::vector<ValPtr> local_aggrs,
|
Trigger(WhenInfo* wi, double timeout, const IDSet& globals, std::vector<ValPtr> local_aggrs,
|
||||||
Frame* f, const Location* loc);
|
Frame* f, const Location* loc);
|
||||||
|
|
||||||
|
@ -112,9 +107,6 @@ public:
|
||||||
private:
|
private:
|
||||||
friend class TriggerTimer;
|
friend class TriggerTimer;
|
||||||
|
|
||||||
void Init(ExprPtr cond, StmtPtr body, StmtPtr timeout_stmts, Frame* frame, bool is_return,
|
|
||||||
const Location* location);
|
|
||||||
|
|
||||||
void ReInit(std::vector<ValPtr> index_expr_results);
|
void ReInit(std::vector<ValPtr> index_expr_results);
|
||||||
|
|
||||||
void Register(const ID* id);
|
void Register(const ID* id);
|
||||||
|
|
247
src/Type.cc
247
src/Type.cc
|
@ -992,41 +992,118 @@ void TypeDecl::DescribeReST(ODesc* d, bool roles_only) const
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following tracks how to initialize a given field, for fast execution
|
namespace detail
|
||||||
// of Create().
|
{
|
||||||
|
|
||||||
class FieldInit
|
// A record field initialization that directly assigns a fixed value ...
|
||||||
|
class DirectFieldInit final : public FieldInit
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
// The type of initialization for the field.
|
DirectFieldInit(ZVal _init_val) : init_val(_init_val) { }
|
||||||
enum
|
|
||||||
{
|
|
||||||
R_INIT_NONE, // skip this entry
|
|
||||||
|
|
||||||
R_INIT_DIRECT, // look in direct_init for raw value
|
ZVal Generate() const override { return init_val; }
|
||||||
R_INIT_DIRECT_MANAGED, // same, but managed type
|
|
||||||
|
|
||||||
R_INIT_DEF, // look in def_expr for expression
|
private:
|
||||||
|
ZVal init_val;
|
||||||
R_INIT_RECORD, // field requires a new record
|
|
||||||
R_INIT_TABLE, // field requires a new table/set
|
|
||||||
R_INIT_VECTOR, // field requires a new vector
|
|
||||||
} init_type = R_INIT_NONE;
|
|
||||||
|
|
||||||
bool def_coerce = false; // whether coercion's required
|
|
||||||
|
|
||||||
// For R_INIT_DIRECT/R_INIT_DIRECT_MANAGED:
|
|
||||||
ZVal direct_init;
|
|
||||||
|
|
||||||
detail::ExprPtr def_expr;
|
|
||||||
TypePtr def_type;
|
|
||||||
|
|
||||||
RecordTypePtr r_type; // for R_INIT_RECORD
|
|
||||||
TableTypePtr t_type; // for R_INIT_TABLE
|
|
||||||
detail::AttributesPtr attrs; // attributes for R_INIT_TABLE
|
|
||||||
VectorTypePtr v_type; // for R_INIT_VECTOR
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ... the same, but for a value that needs memory management.
|
||||||
|
class DirectManagedFieldInit final : public FieldInit
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
DirectManagedFieldInit(ZVal _init_val) : init_val(_init_val) { }
|
||||||
|
~DirectManagedFieldInit() { ZVal::DeleteManagedType(init_val); }
|
||||||
|
|
||||||
|
ZVal Generate() const override
|
||||||
|
{
|
||||||
|
zeek::Ref(init_val.ManagedVal());
|
||||||
|
return init_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
ZVal init_val;
|
||||||
|
};
|
||||||
|
|
||||||
|
// A record field initialization that's done by evaluating an expression.
|
||||||
|
class ExprFieldInit final : public FieldInit
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
// Initialization requires evaluating the given expression,
|
||||||
|
// yielding the a value of the given type (which might require
|
||||||
|
// coercion for some records).
|
||||||
|
ExprFieldInit(detail::ExprPtr _init_expr, TypePtr _init_type)
|
||||||
|
: init_expr(std::move(_init_expr)), init_type(std::move(_init_type))
|
||||||
|
{
|
||||||
|
if ( init_type->Tag() == TYPE_RECORD && ! same_type(init_expr->GetType(), init_type) )
|
||||||
|
coerce_type = cast_intrusive<RecordType>(init_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
ZVal Generate() const override
|
||||||
|
{
|
||||||
|
auto v = init_expr->Eval(nullptr);
|
||||||
|
if ( ! v )
|
||||||
|
{
|
||||||
|
reporter->Error("failed &default in record creation");
|
||||||
|
return ZVal();
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( coerce_type )
|
||||||
|
v = v->AsRecordVal()->CoerceTo(coerce_type);
|
||||||
|
|
||||||
|
return ZVal(v, init_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
detail::ExprPtr init_expr;
|
||||||
|
TypePtr init_type;
|
||||||
|
RecordTypePtr coerce_type; // non-nil iff coercion is required
|
||||||
|
};
|
||||||
|
|
||||||
|
// A record field initialization where the field is initialized to an
|
||||||
|
// empty/default record of the given type.
|
||||||
|
class RecordFieldInit final : public FieldInit
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
RecordFieldInit(RecordTypePtr _init_type) : init_type(std::move(_init_type)) { }
|
||||||
|
|
||||||
|
ZVal Generate() const override { return ZVal(new RecordVal(init_type)); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
RecordTypePtr init_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
// A record field initialization where the field is initialized to an
|
||||||
|
// empty table of the given type.
|
||||||
|
class TableFieldInit final : public FieldInit
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
TableFieldInit(TableTypePtr _init_type, detail::AttributesPtr _attrs)
|
||||||
|
: init_type(std::move(_init_type)), attrs(std::move(_attrs))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
ZVal Generate() const override { return ZVal(new TableVal(init_type, attrs)); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
TableTypePtr init_type;
|
||||||
|
detail::AttributesPtr attrs;
|
||||||
|
};
|
||||||
|
|
||||||
|
// A record field initialization where the field is initialized to an
|
||||||
|
// empty vector of the given type.
|
||||||
|
class VectorFieldInit final : public FieldInit
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
VectorFieldInit(VectorTypePtr _init_type) : init_type(std::move(_init_type)) { }
|
||||||
|
|
||||||
|
ZVal Generate() const override { return ZVal(new VectorVal(init_type)); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
VectorTypePtr init_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
|
||||||
RecordType::RecordType(type_decl_list* arg_types) : Type(TYPE_RECORD)
|
RecordType::RecordType(type_decl_list* arg_types) : Type(TYPE_RECORD)
|
||||||
{
|
{
|
||||||
types = arg_types;
|
types = arg_types;
|
||||||
|
@ -1062,65 +1139,52 @@ RecordType::~RecordType()
|
||||||
|
|
||||||
delete types;
|
delete types;
|
||||||
}
|
}
|
||||||
|
|
||||||
for ( auto fi : field_inits )
|
|
||||||
delete fi;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordType::AddField(unsigned int field, const TypeDecl* td)
|
void RecordType::AddField(unsigned int field, const TypeDecl* td)
|
||||||
{
|
{
|
||||||
ASSERT(field == field_inits.size());
|
ASSERT(field == deferred_inits.size());
|
||||||
ASSERT(field == managed_fields.size());
|
ASSERT(field == managed_fields.size());
|
||||||
|
|
||||||
managed_fields.push_back(ZVal::IsManagedType(td->type));
|
managed_fields.push_back(ZVal::IsManagedType(td->type));
|
||||||
|
|
||||||
auto init = new FieldInit();
|
// We defer error-checking until here so that we can keep deferred_inits
|
||||||
init->init_type = FieldInit::R_INIT_NONE;
|
|
||||||
|
|
||||||
init->attrs = td->attrs;
|
|
||||||
|
|
||||||
// We defer error-checking until here so that we can keep field_inits
|
|
||||||
// and managed_fields correctly tracking the associated fields.
|
// and managed_fields correctly tracking the associated fields.
|
||||||
|
|
||||||
if ( field_ids.count(td->id) != 0 )
|
if ( field_ids.count(td->id) != 0 )
|
||||||
{
|
{
|
||||||
reporter->Error("duplicate field '%s' found in record definition", td->id);
|
reporter->Error("duplicate field '%s' found in record definition", td->id);
|
||||||
field_inits.push_back(init);
|
deferred_inits.push_back(nullptr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
field_ids.insert(std::string(td->id));
|
field_ids.insert(std::string(td->id));
|
||||||
|
|
||||||
auto a = init->attrs;
|
auto a = td->attrs;
|
||||||
|
|
||||||
auto type = td->type;
|
auto type = td->type;
|
||||||
|
|
||||||
auto def_attr = a ? a->Find(detail::ATTR_DEFAULT) : nullptr;
|
auto def_attr = a ? a->Find(detail::ATTR_DEFAULT) : nullptr;
|
||||||
auto def_expr = def_attr ? def_attr->GetExpr() : nullptr;
|
auto def_expr = def_attr ? def_attr->GetExpr() : nullptr;
|
||||||
|
|
||||||
|
std::unique_ptr<detail::FieldInit> init;
|
||||||
|
|
||||||
if ( def_expr && ! IsErrorType(type->Tag()) )
|
if ( def_expr && ! IsErrorType(type->Tag()) )
|
||||||
{
|
{
|
||||||
if ( type->Tag() == TYPE_RECORD && def_expr->GetType()->Tag() == TYPE_RECORD &&
|
|
||||||
! same_type(def_expr->GetType(), type) )
|
|
||||||
init->def_coerce = true;
|
|
||||||
|
|
||||||
if ( def_expr->Tag() == detail::EXPR_CONST )
|
if ( def_expr->Tag() == detail::EXPR_CONST )
|
||||||
{
|
{
|
||||||
auto v = def_expr->Eval(nullptr);
|
auto v = def_expr->Eval(nullptr);
|
||||||
|
auto zv = ZVal(v, type);
|
||||||
|
|
||||||
if ( ZVal::IsManagedType(type) )
|
if ( ZVal::IsManagedType(type) )
|
||||||
init->init_type = FieldInit::R_INIT_DIRECT_MANAGED;
|
init = std::make_unique<detail::DirectManagedFieldInit>(zv);
|
||||||
else
|
else
|
||||||
init->init_type = FieldInit::R_INIT_DIRECT;
|
init = std::make_unique<detail::DirectFieldInit>(zv);
|
||||||
|
|
||||||
init->direct_init = ZVal(v, type);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
init->init_type = FieldInit::R_INIT_DEF;
|
auto efi = std::make_unique<detail::ExprFieldInit>(def_expr, type);
|
||||||
init->def_expr = def_expr;
|
creation_inits.emplace_back(std::make_pair(field, std::move(efi)));
|
||||||
init->def_type = def_expr->GetType();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1129,25 +1193,16 @@ void RecordType::AddField(unsigned int field, const TypeDecl* td)
|
||||||
TypeTag tag = type->Tag();
|
TypeTag tag = type->Tag();
|
||||||
|
|
||||||
if ( tag == TYPE_RECORD )
|
if ( tag == TYPE_RECORD )
|
||||||
{
|
init = std::make_unique<detail::RecordFieldInit>(cast_intrusive<RecordType>(type));
|
||||||
init->init_type = FieldInit::R_INIT_RECORD;
|
|
||||||
init->r_type = cast_intrusive<RecordType>(type);
|
|
||||||
}
|
|
||||||
|
|
||||||
else if ( tag == TYPE_TABLE )
|
else if ( tag == TYPE_TABLE )
|
||||||
{
|
init = std::make_unique<detail::TableFieldInit>(cast_intrusive<TableType>(type), a);
|
||||||
init->init_type = FieldInit::R_INIT_TABLE;
|
|
||||||
init->t_type = cast_intrusive<TableType>(type);
|
|
||||||
}
|
|
||||||
|
|
||||||
else if ( tag == TYPE_VECTOR )
|
else if ( tag == TYPE_VECTOR )
|
||||||
{
|
init = std::make_unique<detail::VectorFieldInit>(cast_intrusive<VectorType>(type));
|
||||||
init->init_type = FieldInit::R_INIT_VECTOR;
|
|
||||||
init->v_type = cast_intrusive<VectorType>(type);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
field_inits.push_back(init);
|
deferred_inits.push_back(std::move(init));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RecordType::HasField(const char* field) const
|
bool RecordType::HasField(const char* field) const
|
||||||
|
@ -1344,64 +1399,14 @@ void RecordType::AddFieldsDirectly(const type_decl_list& others, bool add_log_at
|
||||||
|
|
||||||
void RecordType::Create(std::vector<std::optional<ZVal>>& r) const
|
void RecordType::Create(std::vector<std::optional<ZVal>>& r) const
|
||||||
{
|
{
|
||||||
int n = NumFields();
|
for ( auto& di : deferred_inits )
|
||||||
|
if ( di )
|
||||||
for ( int i = 0; i < n; ++i )
|
r.push_back(di->Generate());
|
||||||
{
|
|
||||||
auto* init = field_inits[i];
|
|
||||||
|
|
||||||
ZVal r_i;
|
|
||||||
|
|
||||||
switch ( init->init_type )
|
|
||||||
{
|
|
||||||
case FieldInit::R_INIT_NONE:
|
|
||||||
r.push_back(std::nullopt);
|
|
||||||
continue;
|
|
||||||
|
|
||||||
case FieldInit::R_INIT_DIRECT:
|
|
||||||
r_i = init->direct_init;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case FieldInit::R_INIT_DIRECT_MANAGED:
|
|
||||||
r_i = init->direct_init;
|
|
||||||
zeek::Ref(r_i.ManagedVal());
|
|
||||||
break;
|
|
||||||
|
|
||||||
case FieldInit::R_INIT_DEF:
|
|
||||||
{
|
|
||||||
auto v = init->def_expr->Eval(nullptr);
|
|
||||||
if ( v )
|
|
||||||
{
|
|
||||||
const auto& t = init->def_type;
|
|
||||||
|
|
||||||
if ( init->def_coerce )
|
|
||||||
{
|
|
||||||
auto rt = cast_intrusive<RecordType>(t);
|
|
||||||
v = v->AsRecordVal()->CoerceTo(rt);
|
|
||||||
}
|
|
||||||
|
|
||||||
r_i = ZVal(v, t);
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
reporter->Error("failed &default in record creation");
|
r.push_back(std::nullopt);
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case FieldInit::R_INIT_RECORD:
|
for ( auto& ci : creation_inits )
|
||||||
r_i = ZVal(new RecordVal(init->r_type));
|
r[ci.first] = ci.second->Generate();
|
||||||
break;
|
|
||||||
|
|
||||||
case FieldInit::R_INIT_TABLE:
|
|
||||||
r_i = ZVal(new TableVal(init->t_type, init->attrs));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case FieldInit::R_INIT_VECTOR:
|
|
||||||
r_i = ZVal(new VectorVal(init->v_type));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
r.push_back(r_i);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordType::DescribeFields(ODesc* d) const
|
void RecordType::DescribeFields(ODesc* d) const
|
||||||
|
|
49
src/Type.h
49
src/Type.h
|
@ -22,6 +22,7 @@ namespace zeek
|
||||||
class Val;
|
class Val;
|
||||||
union ZVal;
|
union ZVal;
|
||||||
class EnumVal;
|
class EnumVal;
|
||||||
|
class RecordVal;
|
||||||
class TableVal;
|
class TableVal;
|
||||||
using ValPtr = IntrusivePtr<Val>;
|
using ValPtr = IntrusivePtr<Val>;
|
||||||
using EnumValPtr = IntrusivePtr<EnumVal>;
|
using EnumValPtr = IntrusivePtr<EnumVal>;
|
||||||
|
@ -35,6 +36,16 @@ class ListExpr;
|
||||||
class Attributes;
|
class Attributes;
|
||||||
using ListExprPtr = IntrusivePtr<ListExpr>;
|
using ListExprPtr = IntrusivePtr<ListExpr>;
|
||||||
|
|
||||||
|
// The following tracks how to initialize a given record field.
|
||||||
|
class FieldInit
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
virtual ~FieldInit() { }
|
||||||
|
|
||||||
|
// Return the initialization value of the field.
|
||||||
|
virtual ZVal Generate() const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
// Zeek types.
|
// Zeek types.
|
||||||
|
@ -599,11 +610,6 @@ public:
|
||||||
|
|
||||||
using type_decl_list = PList<TypeDecl>;
|
using type_decl_list = PList<TypeDecl>;
|
||||||
|
|
||||||
// The following tracks how to initialize a given field. We don't define
|
|
||||||
// it here because it requires pulling in a bunch of low-level headers that
|
|
||||||
// would be nice to avoid.
|
|
||||||
class FieldInit;
|
|
||||||
|
|
||||||
class RecordType final : public Type
|
class RecordType final : public Type
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
@ -687,7 +693,9 @@ public:
|
||||||
* Populates a new instance of the record with its initial values.
|
* Populates a new instance of the record with its initial values.
|
||||||
* @param r The record's underlying value vector.
|
* @param r The record's underlying value vector.
|
||||||
*/
|
*/
|
||||||
void Create(std::vector<std::optional<ZVal>>& r) const;
|
[[deprecated("Remove in v6.1. Construct a corresponding RecordVal and build vector from "
|
||||||
|
"GetFieldAs() calls.")]] void
|
||||||
|
Create(std::vector<std::optional<ZVal>>& r) const;
|
||||||
|
|
||||||
void DescribeReST(ODesc* d, bool roles_only = false) const override;
|
void DescribeReST(ODesc* d, bool roles_only = false) const override;
|
||||||
void DescribeFields(ODesc* d) const;
|
void DescribeFields(ODesc* d) const;
|
||||||
|
@ -709,16 +717,29 @@ public:
|
||||||
|
|
||||||
detail::TraversalCode Traverse(detail::TraversalCallback* cb) const override;
|
detail::TraversalCode Traverse(detail::TraversalCallback* cb) const override;
|
||||||
|
|
||||||
protected:
|
private:
|
||||||
RecordType() { types = nullptr; }
|
RecordType() { types = nullptr; }
|
||||||
|
|
||||||
void AddField(unsigned int field, const TypeDecl* td);
|
void AddField(unsigned int field, const TypeDecl* td);
|
||||||
|
|
||||||
void DoDescribe(ODesc* d) const override;
|
void DoDescribe(ODesc* d) const override;
|
||||||
|
|
||||||
// Maps each field to how to initialize it. Uses pointers due to
|
// Field initializations that can be deferred to first access,
|
||||||
// keeping the FieldInit definition private to Type.cc (see above).
|
// beneficial for fields that are separately initialized prior
|
||||||
std::vector<FieldInit*> field_inits;
|
// to first access. Nil pointers mean "skip initializing the field".
|
||||||
|
std::vector<std::unique_ptr<detail::FieldInit>> deferred_inits;
|
||||||
|
|
||||||
|
// Field initializations that need to be done upon record creation,
|
||||||
|
// rather than deferred. These are expressions whose value might
|
||||||
|
// change if computed later.
|
||||||
|
//
|
||||||
|
// Such initializations are uncommon, so we represent them using
|
||||||
|
// <fieldoffset, init> pairs.
|
||||||
|
std::vector<std::pair<int, std::unique_ptr<detail::FieldInit>>> creation_inits;
|
||||||
|
|
||||||
|
friend zeek::RecordVal;
|
||||||
|
const auto& DeferredInits() const { return deferred_inits; }
|
||||||
|
const auto& CreationInits() const { return creation_inits; }
|
||||||
|
|
||||||
// If we were willing to bound the size of records, then we could
|
// If we were willing to bound the size of records, then we could
|
||||||
// use std::bitset here instead.
|
// use std::bitset here instead.
|
||||||
|
@ -1008,7 +1029,7 @@ inline bool IsString(TypeTag t)
|
||||||
return (t == TYPE_STRING);
|
return (t == TYPE_STRING);
|
||||||
}
|
}
|
||||||
|
|
||||||
// True if the given type is a container aggregate.
|
// True if the given type is an aggregate.
|
||||||
inline bool IsAggr(TypeTag tag)
|
inline bool IsAggr(TypeTag tag)
|
||||||
{
|
{
|
||||||
return tag == TYPE_VECTOR || tag == TYPE_TABLE || tag == TYPE_RECORD;
|
return tag == TYPE_VECTOR || tag == TYPE_TABLE || tag == TYPE_RECORD;
|
||||||
|
@ -1022,6 +1043,12 @@ inline bool IsAggr(const TypePtr& t)
|
||||||
return IsAggr(t->Tag());
|
return IsAggr(t->Tag());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// True if the given type is a container.
|
||||||
|
inline bool IsContainer(TypeTag tag)
|
||||||
|
{
|
||||||
|
return tag == TYPE_VECTOR || tag == TYPE_TABLE;
|
||||||
|
}
|
||||||
|
|
||||||
// True if the given type tag corresponds to the error type.
|
// True if the given type tag corresponds to the error type.
|
||||||
inline bool IsErrorType(TypeTag t)
|
inline bool IsErrorType(TypeTag t)
|
||||||
{
|
{
|
||||||
|
|
29
src/Val.cc
29
src/Val.cc
|
@ -2759,17 +2759,20 @@ RecordVal::RecordVal(RecordTypePtr t, bool init_fields) : Val(t), is_managed(t->
|
||||||
|
|
||||||
int n = rt->NumFields();
|
int n = rt->NumFields();
|
||||||
|
|
||||||
record_val = new std::vector<std::optional<ZVal>>;
|
|
||||||
record_val->reserve(n);
|
|
||||||
|
|
||||||
if ( run_state::is_parsing )
|
if ( run_state::is_parsing )
|
||||||
parse_time_records[rt.get()].emplace_back(NewRef{}, this);
|
parse_time_records[rt.get()].emplace_back(NewRef{}, this);
|
||||||
|
|
||||||
|
record_val = new std::vector<std::optional<ZVal>>;
|
||||||
|
|
||||||
if ( init_fields )
|
if ( init_fields )
|
||||||
|
{
|
||||||
|
record_val->resize(n);
|
||||||
|
|
||||||
|
for ( auto& e : rt->CreationInits() )
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
rt->Create(*record_val);
|
(*record_val)[e.first] = e.second->Generate();
|
||||||
}
|
}
|
||||||
catch ( InterpreterException& e )
|
catch ( InterpreterException& e )
|
||||||
{
|
{
|
||||||
|
@ -2780,13 +2783,20 @@ RecordVal::RecordVal(RecordTypePtr t, bool init_fields) : Val(t), is_managed(t->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
record_val->reserve(n);
|
||||||
|
}
|
||||||
|
|
||||||
RecordVal::~RecordVal()
|
RecordVal::~RecordVal()
|
||||||
{
|
{
|
||||||
auto n = record_val->size();
|
auto n = record_val->size();
|
||||||
|
|
||||||
for ( unsigned int i = 0; i < n; ++i )
|
for ( unsigned int i = 0; i < n; ++i )
|
||||||
if ( HasField(i) && IsManaged(i) )
|
{
|
||||||
ZVal::DeleteManagedType(*(*record_val)[i]);
|
auto f_i = (*record_val)[i];
|
||||||
|
if ( f_i && IsManaged(i) )
|
||||||
|
ZVal::DeleteManagedType(*f_i);
|
||||||
|
}
|
||||||
|
|
||||||
delete record_val;
|
delete record_val;
|
||||||
}
|
}
|
||||||
|
@ -2812,12 +2822,13 @@ void RecordVal::Assign(int field, ValPtr new_val)
|
||||||
|
|
||||||
void RecordVal::Remove(int field)
|
void RecordVal::Remove(int field)
|
||||||
{
|
{
|
||||||
if ( HasField(field) )
|
auto& f_i = (*record_val)[field];
|
||||||
|
if ( f_i )
|
||||||
{
|
{
|
||||||
if ( IsManaged(field) )
|
if ( IsManaged(field) )
|
||||||
ZVal::DeleteManagedType(*(*record_val)[field]);
|
ZVal::DeleteManagedType(*f_i);
|
||||||
|
|
||||||
(*record_val)[field] = std::nullopt;
|
f_i = std::nullopt;
|
||||||
|
|
||||||
Modified();
|
Modified();
|
||||||
}
|
}
|
||||||
|
|
58
src/Val.h
58
src/Val.h
|
@ -52,9 +52,15 @@ class HashKey;
|
||||||
|
|
||||||
class ValTrace;
|
class ValTrace;
|
||||||
class ZBody;
|
class ZBody;
|
||||||
|
class CPPRuntime;
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
|
namespace logging
|
||||||
|
{
|
||||||
|
class Manager;
|
||||||
|
}
|
||||||
|
|
||||||
namespace run_state
|
namespace run_state
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -1173,9 +1179,10 @@ public:
|
||||||
|
|
||||||
void Assign(int field, StringVal* new_val)
|
void Assign(int field, StringVal* new_val)
|
||||||
{
|
{
|
||||||
if ( HasField(field) )
|
auto& fv = (*record_val)[field];
|
||||||
ZVal::DeleteManagedType(*(*record_val)[field]);
|
if ( fv )
|
||||||
(*record_val)[field] = ZVal(new_val);
|
ZVal::DeleteManagedType(*fv);
|
||||||
|
fv = ZVal(new_val);
|
||||||
AddedField(field);
|
AddedField(field);
|
||||||
}
|
}
|
||||||
void Assign(int field, const char* new_val) { Assign(field, new StringVal(new_val)); }
|
void Assign(int field, const char* new_val) { Assign(field, new StringVal(new_val)); }
|
||||||
|
@ -1188,7 +1195,7 @@ public:
|
||||||
*/
|
*/
|
||||||
template <class T> void AssignField(const char* field_name, T&& val)
|
template <class T> void AssignField(const char* field_name, T&& val)
|
||||||
{
|
{
|
||||||
int idx = GetType()->AsRecordType()->FieldOffset(field_name);
|
int idx = rt->FieldOffset(field_name);
|
||||||
if ( idx < 0 )
|
if ( idx < 0 )
|
||||||
reporter->InternalError("missing record field: %s", field_name);
|
reporter->InternalError("missing record field: %s", field_name);
|
||||||
Assign(idx, std::forward<T>(val));
|
Assign(idx, std::forward<T>(val));
|
||||||
|
@ -1206,7 +1213,13 @@ public:
|
||||||
* @param field The field index to retrieve.
|
* @param field The field index to retrieve.
|
||||||
* @return Whether there's a value for the given field index.
|
* @return Whether there's a value for the given field index.
|
||||||
*/
|
*/
|
||||||
bool HasField(int field) const { return (*record_val)[field] ? true : false; }
|
bool HasField(int field) const
|
||||||
|
{
|
||||||
|
if ( (*record_val)[field] )
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return rt->DeferredInits()[field] != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the given field is in the record, false if
|
* Returns true if the given field is in the record, false if
|
||||||
|
@ -1216,7 +1229,7 @@ public:
|
||||||
*/
|
*/
|
||||||
bool HasField(const char* field) const
|
bool HasField(const char* field) const
|
||||||
{
|
{
|
||||||
int idx = GetType()->AsRecordType()->FieldOffset(field);
|
int idx = rt->FieldOffset(field);
|
||||||
return (idx != -1) && HasField(idx);
|
return (idx != -1) && HasField(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1227,10 +1240,17 @@ public:
|
||||||
*/
|
*/
|
||||||
ValPtr GetField(int field) const
|
ValPtr GetField(int field) const
|
||||||
{
|
{
|
||||||
if ( ! HasField(field) )
|
auto& fv = (*record_val)[field];
|
||||||
|
if ( ! fv )
|
||||||
|
{
|
||||||
|
const auto& fi = rt->DeferredInits()[field];
|
||||||
|
if ( ! fi )
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
return (*record_val)[field]->ToVal(rt->GetFieldType(field));
|
fv = fi->Generate();
|
||||||
|
}
|
||||||
|
|
||||||
|
return fv->ToVal(rt->GetFieldType(field));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1358,7 +1378,7 @@ public:
|
||||||
|
|
||||||
template <typename T> auto GetFieldAs(const char* field) const
|
template <typename T> auto GetFieldAs(const char* field) const
|
||||||
{
|
{
|
||||||
int idx = GetType()->AsRecordType()->FieldOffset(field);
|
int idx = rt->FieldOffset(field);
|
||||||
|
|
||||||
if ( idx < 0 )
|
if ( idx < 0 )
|
||||||
reporter->InternalError("missing record field: %s", field);
|
reporter->InternalError("missing record field: %s", field);
|
||||||
|
@ -1403,8 +1423,10 @@ public:
|
||||||
static void DoneParsing();
|
static void DoneParsing();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
friend class zeek::logging::Manager;
|
||||||
friend class zeek::detail::ValTrace;
|
friend class zeek::detail::ValTrace;
|
||||||
friend class zeek::detail::ZBody;
|
friend class zeek::detail::ZBody;
|
||||||
|
friend class zeek::detail::CPPRuntime;
|
||||||
|
|
||||||
RecordValPtr DoCoerceTo(RecordTypePtr other, bool allow_orphaning) const;
|
RecordValPtr DoCoerceTo(RecordTypePtr other, bool allow_orphaning) const;
|
||||||
|
|
||||||
|
@ -1429,7 +1451,18 @@ protected:
|
||||||
// Caller assumes responsibility for memory management. The first
|
// Caller assumes responsibility for memory management. The first
|
||||||
// version allows manipulation of whether the field is present at all.
|
// version allows manipulation of whether the field is present at all.
|
||||||
// The second version ensures that the optional value is present.
|
// The second version ensures that the optional value is present.
|
||||||
std::optional<ZVal>& RawOptField(int field) { return (*record_val)[field]; }
|
std::optional<ZVal>& RawOptField(int field)
|
||||||
|
{
|
||||||
|
auto& f = (*record_val)[field];
|
||||||
|
if ( ! f )
|
||||||
|
{
|
||||||
|
const auto& fi = rt->DeferredInits()[field];
|
||||||
|
if ( fi )
|
||||||
|
f = fi->Generate();
|
||||||
|
}
|
||||||
|
|
||||||
|
return f;
|
||||||
|
}
|
||||||
|
|
||||||
ZVal& RawField(int field)
|
ZVal& RawField(int field)
|
||||||
{
|
{
|
||||||
|
@ -1451,8 +1484,9 @@ protected:
|
||||||
private:
|
private:
|
||||||
void DeleteFieldIfManaged(unsigned int field)
|
void DeleteFieldIfManaged(unsigned int field)
|
||||||
{
|
{
|
||||||
if ( HasField(field) && IsManaged(field) )
|
auto& f = (*record_val)[field];
|
||||||
ZVal::DeleteManagedType(*(*record_val)[field]);
|
if ( f && IsManaged(field) )
|
||||||
|
ZVal::DeleteManagedType(*f);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsManaged(unsigned int offset) const { return is_managed[offset]; }
|
bool IsManaged(unsigned int offset) const { return is_managed[offset]; }
|
||||||
|
|
61
src/Var.cc
61
src/Var.cc
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
#include "zeek/Desc.h"
|
||||||
#include "zeek/EventRegistry.h"
|
#include "zeek/EventRegistry.h"
|
||||||
#include "zeek/Expr.h"
|
#include "zeek/Expr.h"
|
||||||
#include "zeek/Func.h"
|
#include "zeek/Func.h"
|
||||||
|
@ -193,7 +194,11 @@ static void make_var(const IDPtr& id, TypePtr t, InitClass c, ExprPtr init,
|
||||||
{
|
{
|
||||||
// This can happen because the grammar allows any "init_class",
|
// This can happen because the grammar allows any "init_class",
|
||||||
// including none, to be followed by an expression.
|
// including none, to be followed by an expression.
|
||||||
init->Warn("initialization not preceded by =/+=/-= is deprecated");
|
// Remove in v6.1 (make an error)
|
||||||
|
reporter->Deprecation(
|
||||||
|
util::fmt("Remove in v6.1. Initialization not preceded by =/+=/-= is deprecated. (%s)",
|
||||||
|
obj_desc_short(init.get()).c_str()),
|
||||||
|
init->GetLocationInfo());
|
||||||
|
|
||||||
// The historical instances of these, such as the
|
// The historical instances of these, such as the
|
||||||
// language/redef-same-prefixtable-idx.zeek btest, treat
|
// language/redef-same-prefixtable-idx.zeek btest, treat
|
||||||
|
@ -376,7 +381,7 @@ static void make_var(const IDPtr& id, TypePtr t, InitClass c, ExprPtr init,
|
||||||
|
|
||||||
if ( dt == VAR_OPTION )
|
if ( dt == VAR_OPTION )
|
||||||
{
|
{
|
||||||
if ( ! init )
|
if ( ! init && ! IsContainer(t->Tag()) )
|
||||||
id->Error("option variable must be initialized");
|
id->Error("option variable must be initialized");
|
||||||
|
|
||||||
id->SetOption();
|
id->SetOption();
|
||||||
|
@ -536,14 +541,13 @@ static std::optional<FuncType::Prototype> func_type_check(const FuncType* decl,
|
||||||
{
|
{
|
||||||
auto msg = ad->DeprecationMessage();
|
auto msg = ad->DeprecationMessage();
|
||||||
|
|
||||||
if ( msg.empty() )
|
if ( ! msg.empty() )
|
||||||
impl->Warn(
|
msg = ": " + msg;
|
||||||
util::fmt("use of deprecated parameter '%s'", rval->args->FieldName(i)),
|
|
||||||
decl, true);
|
reporter->Deprecation(util::fmt("use of deprecated parameter '%s'%s (%s)",
|
||||||
else
|
rval->args->FieldName(i), msg.data(),
|
||||||
impl->Warn(util::fmt("use of deprecated parameter '%s': %s",
|
obj_desc_short(impl).c_str()),
|
||||||
rval->args->FieldName(i), msg.data()),
|
impl->GetLocationInfo(), decl->GetLocationInfo());
|
||||||
decl, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return rval;
|
return rval;
|
||||||
|
@ -604,13 +608,13 @@ static auto get_prototype(IDPtr id, FuncTypePtr t)
|
||||||
|
|
||||||
if ( prototype->deprecated )
|
if ( prototype->deprecated )
|
||||||
{
|
{
|
||||||
if ( prototype->deprecation_msg.empty() )
|
auto msg = prototype->deprecation_msg;
|
||||||
t->Warn(util::fmt("use of deprecated '%s' prototype", id->Name()),
|
if ( ! msg.empty() )
|
||||||
prototype->args.get(), true);
|
msg = ": " + msg;
|
||||||
else
|
|
||||||
t->Warn(util::fmt("use of deprecated '%s' prototype: %s", id->Name(),
|
reporter->Deprecation(util::fmt("use of deprecated '%s' prototype%s (%s)", id->Name(),
|
||||||
prototype->deprecation_msg.data()),
|
msg.c_str(), obj_desc_short(t.get()).c_str()),
|
||||||
prototype->args.get(), true);
|
t->GetLocationInfo(), prototype->args->GetLocationInfo());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -841,24 +845,25 @@ void end_func(StmtPtr body, const char* module_name, bool free_of_conditionals)
|
||||||
oi->num_stmts = Stmt::GetNumStmts();
|
oi->num_stmts = Stmt::GetNumStmts();
|
||||||
oi->num_exprs = Expr::GetNumExprs();
|
oi->num_exprs = Expr::GetNumExprs();
|
||||||
|
|
||||||
auto ingredients = std::make_unique<function_ingredients>(pop_scope(), std::move(body),
|
auto ingredients = std::make_unique<FunctionIngredients>(pop_scope(), std::move(body),
|
||||||
module_name);
|
module_name);
|
||||||
if ( ! ingredients->id->HasVal() )
|
auto id = ingredients->GetID();
|
||||||
|
if ( ! id->HasVal() )
|
||||||
{
|
{
|
||||||
auto f = make_intrusive<ScriptFunc>(ingredients->id);
|
auto f = make_intrusive<ScriptFunc>(id);
|
||||||
ingredients->id->SetVal(make_intrusive<FuncVal>(std::move(f)));
|
id->SetVal(make_intrusive<FuncVal>(std::move(f)));
|
||||||
ingredients->id->SetConst();
|
id->SetConst();
|
||||||
}
|
}
|
||||||
|
|
||||||
ingredients->id->GetVal()->AsFunc()->AddBody(ingredients->body, ingredients->inits,
|
id->GetVal()->AsFunc()->AddBody(ingredients->Body(), ingredients->Inits(),
|
||||||
ingredients->frame_size, ingredients->priority,
|
ingredients->FrameSize(), ingredients->Priority(),
|
||||||
ingredients->groups);
|
ingredients->Groups());
|
||||||
|
|
||||||
auto func_ptr = cast_intrusive<FuncVal>(ingredients->id->GetVal())->AsFuncPtr();
|
auto func_ptr = cast_intrusive<FuncVal>(id->GetVal())->AsFuncPtr();
|
||||||
auto func = cast_intrusive<ScriptFunc>(func_ptr);
|
auto func = cast_intrusive<ScriptFunc>(func_ptr);
|
||||||
func->SetScope(ingredients->scope);
|
func->SetScope(ingredients->Scope());
|
||||||
|
|
||||||
for ( const auto& group : ingredients->groups )
|
for ( const auto& group : ingredients->Groups() )
|
||||||
group->AddFunc(func);
|
group->AddFunc(func);
|
||||||
|
|
||||||
analyze_func(std::move(func));
|
analyze_func(std::move(func));
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@ namespace zeek
|
||||||
{
|
{
|
||||||
|
|
||||||
class AddrVal;
|
class AddrVal;
|
||||||
|
class EnumVal;
|
||||||
class File;
|
class File;
|
||||||
class Func;
|
class Func;
|
||||||
class ListVal;
|
class ListVal;
|
||||||
|
|
34
src/ZeekConfig.cmake.in
Normal file
34
src/ZeekConfig.cmake.in
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
@PACKAGE_INIT@
|
||||||
|
|
||||||
|
include(CMakeFindDependencyMacro)
|
||||||
|
|
||||||
|
# Note: ZeekPluginConfig only exists on the build interface to pull in
|
||||||
|
# additional variables and dependencies for building dynamic plugins.
|
||||||
|
if ( EXISTS "${CMAKE_CURRENT_LIST_DIR}/ZeekPluginConfig.cmake" )
|
||||||
|
include("${CMAKE_CURRENT_LIST_DIR}/ZeekPluginConfig.cmake")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set(CMAKE_THREAD_PREFER_PTHREAD ON)
|
||||||
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
|
find_dependency(Threads REQUIRED)
|
||||||
|
|
||||||
|
find_dependency(OpenSSL REQUIRED)
|
||||||
|
|
||||||
|
# In our MSVC build, we have these extra dependencies from Conan.
|
||||||
|
if ( MSVC )
|
||||||
|
find_dependency(libpcap)
|
||||||
|
find_dependency(ZLIB)
|
||||||
|
find_dependency(c-ares)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if ( NOT "@ZEEK_HAS_STATIC_BROKER@" )
|
||||||
|
# Always force using the package config file since users might still have
|
||||||
|
# a legacy FindBroker.cmake in their system.
|
||||||
|
find_dependency(Broker REQUIRED CONFIG)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
foreach ( dep @ZEEK_INSTALL_DEPENDENCIES@ )
|
||||||
|
find_dependency(${dep} REQUIRED)
|
||||||
|
endforeach ( )
|
||||||
|
|
||||||
|
include("${CMAKE_CURRENT_LIST_DIR}/ZeekTargets.cmake")
|
19
src/ZeekPluginBootstrap.cmake.in
Normal file
19
src/ZeekPluginBootstrap.cmake.in
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
# This script is installed alongside ZeekPlugin.cmake for passing variables from
|
||||||
|
# the CMake configuration step to plugins.
|
||||||
|
|
||||||
|
# Allows scripts such as ZeekPlugin.cmake to locate the Zeek CMake package.
|
||||||
|
set(ZEEK_CMAKE_CONFIG_DIR "@ZEEK_CMAKE_CONFIG_DIR@"
|
||||||
|
CACHE PATH "Internal Zeek variable: the CMake package path for Zeek." FORCE)
|
||||||
|
|
||||||
|
# Tells ZeekPlugin.cmake where to install plugins.
|
||||||
|
set(ZEEK_PLUGIN_DIR "@ZEEK_PLUGIN_DIR@"
|
||||||
|
CACHE PATH "Internal Zeek variable: the directory for installing Zeek plugins." FORCE)
|
||||||
|
|
||||||
|
# Allows scripts to locate files in the Zeek install tree.
|
||||||
|
set(ZEEK_CMAKE_INSTALL_PREFIX "@CMAKE_INSTALL_PREFIX@"
|
||||||
|
CACHE PATH "Internal Zeek variable: CMAKE_INSTALL_PREFIX of Zeek." FORCE)
|
||||||
|
|
||||||
|
# Set ZEEK_PLUGIN_SCRIPTS_PATH for ZeekPlugin.cmake. We install the scripts into
|
||||||
|
# the package directory.
|
||||||
|
set(ZEEK_PLUGIN_SCRIPTS_PATH "${ZEEK_CMAKE_CONFIG_DIR}"
|
||||||
|
CACHE PATH "Path to utility scripts for building Zeek plugins." FORCE)
|
69
src/ZeekPluginConfig.cmake.in
Normal file
69
src/ZeekPluginConfig.cmake.in
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
# Note: this config is used for builtin dynamic plugins outside of the source
|
||||||
|
# tree of Zeek. Plugins refer back to the Zeek source tree since they need
|
||||||
|
# access to some parts of Zeek that we don't install.
|
||||||
|
|
||||||
|
# Pull in some path magic that Zeek uses as well.
|
||||||
|
include(MacDependencyPaths)
|
||||||
|
|
||||||
|
# For finding zeek-plugin-create-package.sh and zeek-plugin-install-package.sh.
|
||||||
|
if ( NOT ZEEK_PLUGIN_SCRIPTS_PATH )
|
||||||
|
set(ZEEK_PLUGIN_SCRIPTS_PATH "@ZEEK_PLUGIN_SCRIPTS_PATH@"
|
||||||
|
CACHE PATH "Path to utility shell scripts." FORCE)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Path for installing plugins.
|
||||||
|
if ( NOT ZEEK_PLUGIN_DIR )
|
||||||
|
set(ZEEK_PLUGIN_DIR "@ZEEK_PLUGIN_DIR@"
|
||||||
|
CACHE STRING "Installation path for plugins" FORCE)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# For finding Zeek sources.
|
||||||
|
if ( NOT ZEEK_SOURCE_DIR )
|
||||||
|
set(ZEEK_SOURCE_DIR "@ZEEK_SOURCE_DIR@"
|
||||||
|
CACHE PATH "Path to the Zeek source tree." FORCE)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Provide a hint to ZeekConfig.cmake where to find Broker from the build tree.
|
||||||
|
# Note: the straightforward way would be setting `Broker_ROOT` instead, but
|
||||||
|
# plugins may still use CMake < 3.12.
|
||||||
|
if (NOT "@ZEEK_HAS_EXTERNAL_BROKER@")
|
||||||
|
set(Broker_DIR "@ZEEK_PLUGIN_BROKER_PATH@" CACHE
|
||||||
|
PATH "Directory for finding Broker's package file" FORCE)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Provide hint to the plugins where to find standard packages by passing along
|
||||||
|
# user-defined values.
|
||||||
|
set(ZeekOpenSSLHint "@ZeekOpenSSLHint@")
|
||||||
|
if (ZeekOpenSSLHint AND NOT OPENSSL_ROOT_DIR)
|
||||||
|
set(OPENSSL_ROOT_DIR "${ZeekOpenSSLHint}" CACHE
|
||||||
|
PATH "Directory hint for finding OpenSSL" FORCE)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Force PKG_CONFIG_PATH environment variable to reflect what we've used when
|
||||||
|
# building Zeek.
|
||||||
|
set(ZeekPkgConfigPath "@ZeekPkgConfigPath@")
|
||||||
|
if (ZeekPkgConfigPath)
|
||||||
|
set(ENV{PKG_CONFIG_PATH} "${ZeekPkgConfigPath}")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# For having a binpac target available. Guarded to shield against including this
|
||||||
|
# file multiple times.
|
||||||
|
if (NOT TARGET Zeek::BinPAC)
|
||||||
|
add_executable(Zeek::BinPAC IMPORTED)
|
||||||
|
set_property(TARGET Zeek::BinPAC PROPERTY
|
||||||
|
IMPORTED_LOCATION "@BINPAC_EXE_PATH@")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# For having a bifcl target available. Guarded to shield against including this
|
||||||
|
# file multiple times.
|
||||||
|
if (NOT TARGET Zeek::BifCl)
|
||||||
|
add_executable(Zeek::BifCl IMPORTED)
|
||||||
|
set_property(TARGET Zeek::BifCl PROPERTY
|
||||||
|
IMPORTED_LOCATION "@BIFCL_EXE_PATH@")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# TODO: using BIFCL_EXE_PATH and BINPAC_EXE_PATH does not play well with
|
||||||
|
# multi-configuration generators. We currently hard-code these paths in
|
||||||
|
# the main CMakeLists.txt instead of dynamically fetching the right thing.
|
||||||
|
# A better solution would be either using find_program here or
|
||||||
|
# `file(GENERATE ...)` from the main CMake file.
|
|
@ -535,9 +535,6 @@ TEST_CASE("construction")
|
||||||
CHECK_EQ(s7.Len(), 6);
|
CHECK_EQ(s7.Len(), 6);
|
||||||
CHECK_EQ(s7.Bytes(), text2);
|
CHECK_EQ(s7.Bytes(), text2);
|
||||||
|
|
||||||
// Construct a temporary reporter object for the next two tests
|
|
||||||
zeek::reporter = new zeek::Reporter(false);
|
|
||||||
|
|
||||||
zeek::byte_vec text3 = new u_char[7];
|
zeek::byte_vec text3 = new u_char[7];
|
||||||
memcpy(text3, text.c_str(), 7);
|
memcpy(text3, text.c_str(), 7);
|
||||||
zeek::String s8{false, text3, 6};
|
zeek::String s8{false, text3, 6};
|
||||||
|
@ -549,8 +546,6 @@ TEST_CASE("construction")
|
||||||
zeek::String s9{false, text4, 6};
|
zeek::String s9{false, text4, 6};
|
||||||
CHECK_EQ(std::string(s9.CheckString()), "<string-with-NUL>");
|
CHECK_EQ(std::string(s9.CheckString()), "<string-with-NUL>");
|
||||||
|
|
||||||
delete zeek::reporter;
|
|
||||||
|
|
||||||
zeek::byte_vec text5 = (zeek::byte_vec)malloc(7);
|
zeek::byte_vec text5 = (zeek::byte_vec)malloc(7);
|
||||||
memcpy(text5, text.c_str(), 7);
|
memcpy(text5, text.c_str(), 7);
|
||||||
zeek::String s10{true, text5, 6};
|
zeek::String s10{true, text5, 6};
|
||||||
|
|
|
@ -386,10 +386,8 @@ void Analyzer::ForwardEndOfData(bool orig)
|
||||||
bool Analyzer::AddChildAnalyzer(Analyzer* analyzer, bool init)
|
bool Analyzer::AddChildAnalyzer(Analyzer* analyzer, bool init)
|
||||||
{
|
{
|
||||||
auto t = analyzer->GetAnalyzerTag();
|
auto t = analyzer->GetAnalyzerTag();
|
||||||
auto it = std::find(prevented.begin(), prevented.end(), t);
|
|
||||||
auto prevent = (it != prevented.end());
|
|
||||||
|
|
||||||
if ( HasChildAnalyzer(t) || prevent )
|
if ( HasChildAnalyzer(t) || IsPreventedChildAnalyzer(t) )
|
||||||
{
|
{
|
||||||
analyzer->Done();
|
analyzer->Done();
|
||||||
delete analyzer;
|
delete analyzer;
|
||||||
|
@ -418,9 +416,7 @@ Analyzer* Analyzer::AddChildAnalyzer(const zeek::Tag& analyzer)
|
||||||
if ( HasChildAnalyzer(analyzer) )
|
if ( HasChildAnalyzer(analyzer) )
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
auto it = std::find(prevented.begin(), prevented.end(), analyzer);
|
if ( IsPreventedChildAnalyzer(tag) )
|
||||||
|
|
||||||
if ( it != prevented.end() )
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
Analyzer* a = analyzer_mgr->InstantiateAnalyzer(analyzer, conn);
|
Analyzer* a = analyzer_mgr->InstantiateAnalyzer(analyzer, conn);
|
||||||
|
@ -468,27 +464,35 @@ bool Analyzer::Remove()
|
||||||
return removing;
|
return removing;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Analyzer::PreventChildren(zeek::Tag tag)
|
void Analyzer::PreventChildren(const zeek::Tag& tag)
|
||||||
{
|
{
|
||||||
auto it = std::find(prevented.begin(), prevented.end(), tag);
|
if ( IsPreventedChildAnalyzer(tag) )
|
||||||
|
|
||||||
if ( it != prevented.end() )
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
prevented.emplace_back(tag);
|
prevented.emplace_back(tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Analyzer::HasChildAnalyzer(zeek::Tag tag)
|
bool Analyzer::IsPreventedChildAnalyzer(const zeek::Tag& tag) const
|
||||||
|
{
|
||||||
|
return std::find(prevented.begin(), prevented.end(), tag) != prevented.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Analyzer::HasChildAnalyzer(const zeek::Tag& tag) const
|
||||||
|
{
|
||||||
|
return GetChildAnalyzer(tag) != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
Analyzer* Analyzer::GetChildAnalyzer(const zeek::Tag& tag) const
|
||||||
{
|
{
|
||||||
LOOP_OVER_CHILDREN(i)
|
LOOP_OVER_CHILDREN(i)
|
||||||
if ( (*i)->tag == tag && ! ((*i)->removing || (*i)->finished) )
|
if ( (*i)->tag == tag && ! ((*i)->removing || (*i)->finished) )
|
||||||
return true;
|
return *i;
|
||||||
|
|
||||||
LOOP_OVER_GIVEN_CHILDREN(i, new_children)
|
LOOP_OVER_GIVEN_CHILDREN(i, new_children)
|
||||||
if ( (*i)->tag == tag && ! ((*i)->removing || (*i)->finished) )
|
if ( (*i)->tag == tag && ! ((*i)->removing || (*i)->finished) )
|
||||||
return true;
|
return *i;
|
||||||
|
|
||||||
return false;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
Analyzer* Analyzer::FindChild(ID arg_id)
|
Analyzer* Analyzer::FindChild(ID arg_id)
|
||||||
|
|
|
@ -434,14 +434,36 @@ public:
|
||||||
*
|
*
|
||||||
* @param tag The type of analyzer to prevent.
|
* @param tag The type of analyzer to prevent.
|
||||||
*/
|
*/
|
||||||
void PreventChildren(zeek::Tag tag);
|
void PreventChildren(const zeek::Tag& tag);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the given analyzer type is prevented from
|
||||||
|
* being added as a child.
|
||||||
|
*
|
||||||
|
* @param tag The type of analyzer to prevent.
|
||||||
|
*
|
||||||
|
* @return true if the analyzer type is prevented, else false.
|
||||||
|
*/
|
||||||
|
bool IsPreventedChildAnalyzer(const zeek::Tag& tag) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if analyzer has a direct child of a given type.
|
* Returns true if analyzer has a direct child of a given type.
|
||||||
*
|
*
|
||||||
* @param tag The type of analyzer to check for.
|
* @param tag The type of analyzer to check for.
|
||||||
*/
|
*/
|
||||||
bool HasChildAnalyzer(zeek::Tag tag);
|
bool HasChildAnalyzer(const zeek::Tag& tag) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a pointer to a direct child analyzer of the given type.
|
||||||
|
*
|
||||||
|
* Note that the returned pointer is owned by the analyzer and may
|
||||||
|
* be deleted without notification. Do not hold on to it.
|
||||||
|
*
|
||||||
|
* @param tag The type of the analyzer to check for.
|
||||||
|
*
|
||||||
|
* @return The analyzer, or null if not found.
|
||||||
|
*/
|
||||||
|
Analyzer* GetChildAnalyzer(const zeek::Tag& tag) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Recursively searches all (direct or indirect) childs of the
|
* Recursively searches all (direct or indirect) childs of the
|
||||||
|
|
|
@ -1,20 +1,14 @@
|
||||||
|
zeek_add_subdir_library(
|
||||||
include(ZeekSubdir)
|
analyzer
|
||||||
|
INTERNAL_DEPENDENCIES ${BIF_BUILD_TARGET}
|
||||||
include_directories(BEFORE
|
INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}
|
SOURCES
|
||||||
${CMAKE_CURRENT_BINARY_DIR}
|
|
||||||
)
|
|
||||||
|
|
||||||
add_subdirectory(protocol)
|
|
||||||
|
|
||||||
set(analyzer_SRCS
|
|
||||||
Analyzer.cc
|
Analyzer.cc
|
||||||
Manager.cc
|
|
||||||
Component.cc
|
Component.cc
|
||||||
|
Manager.cc
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Treat BIFs as builtin (alternative mode).
|
||||||
bif_target(analyzer.bif)
|
bif_target(analyzer.bif)
|
||||||
|
|
||||||
bro_add_subdir_library(analyzer ${analyzer_SRCS})
|
add_subdirectory(protocol)
|
||||||
add_dependencies(bro_analyzer generate_outputs)
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
include(ZeekPlugin)
|
||||||
|
|
||||||
add_subdirectory(bittorrent)
|
add_subdirectory(bittorrent)
|
||||||
add_subdirectory(conn-size)
|
add_subdirectory(conn-size)
|
||||||
add_subdirectory(dce-rpc)
|
add_subdirectory(dce-rpc)
|
||||||
|
|
|
@ -1,10 +1,14 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
BitTorrent
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
BitTorrent.cc
|
||||||
zeek_plugin_begin(Zeek BitTorrent)
|
BitTorrentTracker.cc
|
||||||
zeek_plugin_cc(BitTorrent.cc BitTorrentTracker.cc Plugin.cc)
|
Plugin.cc
|
||||||
zeek_plugin_bif(events.bif)
|
BIFS
|
||||||
zeek_plugin_pac(bittorrent.pac bittorrent-analyzer.pac bittorrent-protocol.pac)
|
events.bif
|
||||||
zeek_plugin_end()
|
PAC
|
||||||
|
bittorrent.pac
|
||||||
|
bittorrent-analyzer.pac
|
||||||
|
bittorrent-protocol.pac
|
||||||
|
)
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
ConnSize
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
ConnSize.cc
|
||||||
zeek_plugin_begin(Zeek ConnSize)
|
Plugin.cc
|
||||||
zeek_plugin_cc(ConnSize.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_bif(functions.bif)
|
functions.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
DCE_RPC
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
DCE_RPC.cc
|
||||||
zeek_plugin_begin(Zeek DCE_RPC)
|
Plugin.cc
|
||||||
zeek_plugin_cc(DCE_RPC.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(consts.bif types.bif events.bif)
|
consts.bif
|
||||||
zeek_plugin_pac(
|
types.bif
|
||||||
|
events.bif
|
||||||
|
PAC
|
||||||
dce_rpc.pac
|
dce_rpc.pac
|
||||||
dce_rpc-protocol.pac
|
dce_rpc-protocol.pac
|
||||||
dce_rpc-analyzer.pac
|
dce_rpc-analyzer.pac
|
||||||
|
@ -14,5 +16,3 @@ zeek_plugin_pac(
|
||||||
endpoint-atsvc.pac
|
endpoint-atsvc.pac
|
||||||
endpoint-epmapper.pac
|
endpoint-epmapper.pac
|
||||||
)
|
)
|
||||||
zeek_plugin_end()
|
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,15 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
DHCP
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
DHCP.cc
|
||||||
zeek_plugin_begin(Zeek DHCP)
|
Plugin.cc
|
||||||
zeek_plugin_cc(DHCP.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_bif(types.bif)
|
types.bif
|
||||||
zeek_plugin_pac(dhcp.pac dhcp-protocol.pac dhcp-analyzer.pac dhcp-options.pac)
|
PAC
|
||||||
zeek_plugin_end()
|
dhcp.pac
|
||||||
|
dhcp-protocol.pac
|
||||||
|
dhcp-analyzer.pac
|
||||||
|
dhcp-options.pac
|
||||||
|
)
|
||||||
|
|
|
@ -1,10 +1,14 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
DNP3
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
DNP3.cc
|
||||||
zeek_plugin_begin(Zeek DNP3)
|
Plugin.cc
|
||||||
zeek_plugin_cc(DNP3.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_pac(dnp3.pac dnp3-analyzer.pac dnp3-protocol.pac dnp3-objects.pac)
|
PAC
|
||||||
zeek_plugin_end()
|
dnp3.pac
|
||||||
|
dnp3-analyzer.pac
|
||||||
|
dnp3-protocol.pac
|
||||||
|
dnp3-objects.pac
|
||||||
|
)
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
DNS
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
DNS.cc
|
||||||
zeek_plugin_begin(Zeek DNS)
|
Plugin.cc
|
||||||
zeek_plugin_cc(DNS.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
File
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
File.cc
|
||||||
zeek_plugin_begin(Zeek File)
|
Plugin.cc
|
||||||
zeek_plugin_cc(File.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -33,14 +33,6 @@ protected:
|
||||||
std::string file_id_resp;
|
std::string file_id_resp;
|
||||||
};
|
};
|
||||||
|
|
||||||
class IRC_Data : public File_Analyzer
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
explicit IRC_Data(Connection* conn) : File_Analyzer("IRC_Data", conn) { }
|
|
||||||
|
|
||||||
static Analyzer* Instantiate(Connection* conn) { return new IRC_Data(conn); }
|
|
||||||
};
|
|
||||||
|
|
||||||
class FTP_Data : public File_Analyzer
|
class FTP_Data : public File_Analyzer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -15,8 +15,6 @@ public:
|
||||||
{
|
{
|
||||||
AddComponent(
|
AddComponent(
|
||||||
new zeek::analyzer::Component("FTP_Data", zeek::analyzer::file::FTP_Data::Instantiate));
|
new zeek::analyzer::Component("FTP_Data", zeek::analyzer::file::FTP_Data::Instantiate));
|
||||||
AddComponent(
|
|
||||||
new zeek::analyzer::Component("IRC_Data", zeek::analyzer::file::IRC_Data::Instantiate));
|
|
||||||
|
|
||||||
zeek::plugin::Configuration config;
|
zeek::plugin::Configuration config;
|
||||||
config.name = "Zeek::File";
|
config.name = "Zeek::File";
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
Finger
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
Finger.cc
|
||||||
zeek_plugin_begin(Zeek Finger)
|
Plugin.cc
|
||||||
zeek_plugin_cc(Finger.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
FTP
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
FTP.cc
|
||||||
zeek_plugin_begin(Zeek FTP)
|
Plugin.cc
|
||||||
zeek_plugin_cc(FTP.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_bif(functions.bif)
|
functions.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
Gnutella
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
Gnutella.cc
|
||||||
zeek_plugin_begin(Zeek Gnutella)
|
Plugin.cc
|
||||||
zeek_plugin_cc(Gnutella.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -1,16 +1,14 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
GSSAPI
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
GSSAPI.cc
|
||||||
zeek_plugin_begin(Zeek GSSAPI)
|
Plugin.cc
|
||||||
zeek_plugin_cc(GSSAPI.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_pac(
|
PAC
|
||||||
gssapi.pac
|
gssapi.pac
|
||||||
gssapi-protocol.pac
|
gssapi-protocol.pac
|
||||||
gssapi-analyzer.pac
|
gssapi-analyzer.pac
|
||||||
../asn1/asn1.pac
|
${PROJECT_SOURCE_DIR}/src/analyzer/protocol/asn1/asn1.pac
|
||||||
)
|
)
|
||||||
zeek_plugin_end()
|
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
HTTP
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
HTTP.cc
|
||||||
zeek_plugin_begin(Zeek HTTP)
|
Plugin.cc
|
||||||
zeek_plugin_cc(HTTP.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_bif(functions.bif)
|
functions.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
Ident
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
Ident.cc
|
||||||
zeek_plugin_begin(Zeek Ident)
|
Plugin.cc
|
||||||
zeek_plugin_cc(Ident.cc Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_bif(events.bif)
|
events.bif
|
||||||
zeek_plugin_end()
|
)
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
|
zeek_add_plugin(
|
||||||
include(ZeekPlugin)
|
Zeek
|
||||||
|
IMAP
|
||||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
SOURCES
|
||||||
|
Plugin.cc
|
||||||
zeek_plugin_begin(Zeek IMAP)
|
IMAP.cc
|
||||||
zeek_plugin_cc(Plugin.cc)
|
BIFS
|
||||||
zeek_plugin_cc(IMAP.cc)
|
events.bif
|
||||||
zeek_plugin_bif(events.bif)
|
PAC
|
||||||
zeek_plugin_pac(imap.pac imap-analyzer.pac imap-protocol.pac)
|
imap.pac
|
||||||
zeek_plugin_end()
|
imap-analyzer.pac
|
||||||
|
imap-protocol.pac
|
||||||
|
)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue