Compare commits

...

29 commits
master ... lts

Author SHA1 Message Date
Tim Wojtulewicz
9458ebdd39 Update CHANGES, VERSION, and NEWS for v8.0.1 2025-08-26 17:58:46 +00:00
Arne Welzel
56e4dc9247 Merge remote-tracking branch 'origin/topic/awelzel/4754-follow-up'
* origin/topic/awelzel/4754-follow-up:
  cluster/serializer/broker: Drop unused include
  cluster/serializer/broker: fixup inconsistent param comment

(cherry picked from commit 9b94e25e67)
2025-08-26 09:48:08 -07:00
Tim Wojtulewicz
2f38ff6c87 Merge remote-tracking branch 'origin/topic/bbannier/issue-3266'
* origin/topic/bbannier/issue-3266:
  Fix installation of symlink with `DESTDIR`

(cherry picked from commit d7db612b0f)
2025-08-26 09:05:29 -07:00
Tim Wojtulewicz
708f914524 Update zeekctl submodule for docs fixes [nomail] [skip ci] 2025-08-26 08:57:51 -07:00
Tim Wojtulewicz
f5d549fe9d Update docs submodule for v8.0.1 [nomail] [skip ci] 2025-08-25 19:42:32 +00:00
Christian Kreibich
9e66cf873b Merge branch 'topic/christian/news-typos'
* topic/christian/news-typos:
  Minor fixes to a few NEWS entries.

(cherry picked from commit 2929f1eb17)
2025-08-25 12:32:14 -07:00
Tim Wojtulewicz
53b88d33b6 Bump zeekctl submodule for MetricsAddr docs 2025-08-25 12:30:33 -07:00
Arne Welzel
c0a80fe610 Merge remote-tracking branch 'origin/topic/awelzel/cluster-event-metadata-fixes-for-8.0'
* origin/topic/awelzel/cluster-event-metadata-fixes-for-8.0:
  cluster/Backend: Fallback to current network time when current event has not timestamp
  cluster/serializer/broker: Do not send empty metadata vectors around

(cherry picked from commit 3e89e6b328)
2025-08-22 09:25:39 -07:00
Arne Welzel
4bfac4a087 Merge remote-tracking branch 'origin/topic/awelzel/4754-double-wrapped-broker-data-records'
* origin/topic/awelzel/4754-double-wrapped-broker-data-records:
  cluster/serializer/broker: Do not special case Broker::Data anymore
  broker/Data: Support unwrapping Broker::Data records

(cherry picked from commit 3d6a064ecc)
2025-08-22 09:24:47 -07:00
Arne Welzel
5de3ea1e2f Merge remote-tracking branch 'origin/topic/awelzel/docker-trixie'
* origin/topic/awelzel/docker-trixie:
  ci: Run zeekctl and builtin tasks with Debian 13, too
  ci: Prepend timestamps to output
  ci: Enable Spicy for arm_debian13
  ci: Add Debian 13.0 (trixie)
  docker: Bump to debian:trixie-slim

(cherry picked from commit 63574b9fd4)
2025-08-22 09:22:04 -07:00
Arne Welzel
abfb6ca156 Merge remote-tracking branch 'amazing-pp/t/psql-login-no-role'
* amazing-pp/t/psql-login-no-role:
  Report PostgreSQL login success only after ReadyForQuery

(cherry picked from commit e04f725523)
2025-08-22 09:19:15 -07:00
Tim Wojtulewicz
1cb6519ba7 Merge remote-tracking branch 'origin/topic/timw/event-constructor-deprecation-message' into release/8.0
* origin/topic/timw/event-constructor-deprecation-message:
  Clarify Event constructor deprectation message
2025-08-18 14:44:33 -07:00
Tim Wojtulewicz
f07a59d32c Updating CHANGES and VERSION. 2025-08-18 18:01:55 +00:00
Tim Wojtulewicz
ef6999ed62 Update docs submodule for 8.0.0 [nomail] [skip ci] 2025-08-18 17:08:25 +00:00
Tim Wojtulewicz
8a6a24cb70 Merge remote-tracking branch 'origin/topic/etyp/update-news-record-vec-deprecation'
* origin/topic/etyp/update-news-record-vec-deprecation:
  Add `record_type_to_vector` deprecation to NEWS

(cherry picked from commit a4da8d3f7b)
2025-08-18 16:42:13 +00:00
Arne Welzel
69dc9209b5 Merge remote-tracking branch 'origin/topic/vern/stmt-line-numbers'
* origin/topic/vern/stmt-line-numbers:
  maintenance updates for ZAM BiF-tracking
  fix line numbers associated with "if" and initialization statements

(cherry picked from commit c0a863cba0)
2025-08-18 16:37:06 +00:00
Tim Wojtulewicz
7fdb266b24 Fix ci/update-zeekygen-docs to agree with ruff-format 2025-08-18 16:36:43 +00:00
Tim Wojtulewicz
4d64c955ac Merge remote-tracking branch 'origin/topic/bbannier/bump-spicy-8.0' into release/8.0
* origin/topic/bbannier/bump-spicy-8.0:
  Bump auxil/spicy to spicy-1.14.0
2025-08-18 08:23:28 -07:00
Benjamin Bannier
26bdaf94d8 Bump auxil/spicy to spicy-1.14.0 2025-08-18 12:57:24 +02:00
Tim Wojtulewicz
b2bd588740 Clarify Event constructor deprectation message 2025-08-17 13:56:48 -07:00
Tim Wojtulewicz
56e55ba3ee Updating CHANGES and VERSION. 2025-08-12 12:43:40 -07:00
Tim Wojtulewicz
04c4d792d1 Merge remote-tracking branch 'origin/topic/bbannier/bump-spicy'
* origin/topic/bbannier/bump-spicy:
  Bump pre-commit hooks
  Bump auxil/spicy to latest development snapshot

(cherry picked from commit cc59bfa5d8)
2025-08-12 12:42:54 -07:00
Tim Wojtulewicz
84d28bc30c Update docs submodule with 8.0.0-rc2 changes [nomail] [skip ci] 2025-08-12 11:07:04 -07:00
Tim Wojtulewicz
fcdfe2aca2 Merge remote-tracking branch 'origin/topic/awelzel/4730-smb-read-response-data-offset'
* origin/topic/awelzel/4730-smb-read-response-data-offset:
  smb2/read: Parse only 1 byte for data_offset, ignore reserved1

(cherry picked from commit 76289a8022)
2025-08-11 11:39:22 -07:00
Arne Welzel
1511ca00df Merge remote-tracking branch 'origin/topic/awelzel/4176-cluster-on-sub-unsub-hooks'
* origin/topic/awelzel/4176-cluster-on-sub-unsub-hooks:
  cluster: Add on_subscribe() and on_unsubscribe() hooks

(cherry picked from commit 13f613eb1d)
2025-08-11 11:36:42 -07:00
Johanna Amann
a76b2148c6 Merge remote-tracking branch 'origin/topic/johanna/analyzer-log-proto'
* origin/topic/johanna/analyzer-log-proto:
  Add proto to analyzer.log

(cherry picked from commit 2f2f328a72)
2025-08-11 11:34:08 -07:00
Tim Wojtulewicz
8e7482de4b Update zeek-aux submodule with c++20 changes 2025-08-07 08:33:44 -07:00
Tim Wojtulewicz
1addeab4fe Updating CHANGES and VERSION. 2025-08-04 09:44:48 -07:00
Christian Kreibich
c1cb1a2e5f Compile contributors for Zeek 8.0 in the NEWS file
(cherry picked from commit 4fdd83f3f5)
2025-08-04 09:39:08 -07:00
71 changed files with 701 additions and 134 deletions

View file

@ -142,6 +142,7 @@ ci_template: &CI_TEMPLATE
env:
CIRRUS_WORKING_DIR: /zeek
CIRRUS_LOG_TIMESTAMP: true
ZEEK_CI_CPUS: *CPUS
ZEEK_CI_BTEST_JOBS: *BTEST_JOBS
ZEEK_CI_BTEST_RETRIES: *BTEST_RETRIES
@ -208,51 +209,49 @@ centosstream9_task:
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
debian12_task:
debian13_task:
container:
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
arm_debian12_task:
arm_debian13_task:
arm_container:
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
env:
ZEEK_CI_CONFIGURE_FLAGS: *NO_SPICY_CONFIG
debian12_static_task:
debian13_static_task:
container:
# Just use a recent/common distro to run a static compile test.
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
env:
ZEEK_CI_CONFIGURE_FLAGS: *STATIC_CONFIG
debian12_binary_task:
debian13_binary_task:
container:
# Just use a recent/common distro to run binary mode compile test.
# As of 2024-03, the used configure flags are equivalent to the flags
# that we use to create binary packages.
# Just use a recent/common distro to run a static compile test.
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
env:
ZEEK_CI_CONFIGURE_FLAGS: *BINARY_CONFIG
debian11_task:
debian12_task:
container:
# Debian 11 EOL: June 2026
dockerfile: ci/debian-11/Dockerfile
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
<< : *SKIP_TASK_ON_PR
@ -797,8 +796,8 @@ zeekctl_debian12_task:
$CIRRUS_BRANCH =~ 'release/.*' )
)
container:
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
sync_submodules_script: git submodule update --recursive --init
always:
@ -821,8 +820,8 @@ include_plugins_debian12_task:
cpu: *CPUS
memory: *MEMORY
container:
# Debian 12 (bookworm) EOL: TBD
dockerfile: ci/debian-12/Dockerfile
# Debian 13 (trixie) EOL: TBD
dockerfile: ci/debian-13/Dockerfile
<< : *RESOURCES_TEMPLATE
sync_submodules_script: git submodule update --recursive --init
fetch_external_plugins_script:

View file

@ -19,7 +19,7 @@ repos:
files: '^testing/btest/.*$'
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v20.1.7
rev: v20.1.8
hooks:
- id: clang-format
types_or:
@ -28,13 +28,13 @@ repos:
- "json"
- repo: https://github.com/maxwinterstein/shfmt-py
rev: v3.11.0.2
rev: v3.12.0.1
hooks:
- id: shfmt
args: ["-w", "-i", "4", "-ci"]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.12.1
rev: v0.12.8
hooks:
- id: ruff
args: [--fix]
@ -46,7 +46,7 @@ repos:
- id: cmake-format
- repo: https://github.com/crate-ci/typos
rev: v1.33.1
rev: v1.35.3
hooks:
- id: typos
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES|scripts/base/protocols/ssl/mozilla-ca-list.zeek)$'

View file

@ -38,6 +38,7 @@ extend-ignore-re = [
"\"BaR\"",
"\"xFoObar\"",
"\"FoO\"",
"Smoot",
]
extend-ignore-identifiers-re = [

209
CHANGES
View file

@ -1,3 +1,212 @@
8.0.1 | 2025-08-26 17:58:37 +0000
* Update CHANGES, VERSION, and NEWS for v8.0.1 (Tim Wojtulewicz, Corelight)
8.0.0-12 | 2025-08-26 09:47:30 -0700
* cluster/serializer/broker: Drop unused include (Arne Welzel, Corelight)
(cherry picked from commit 9b94e25e6707b2abdfe163d8bb3305edf3f9b201)
* cluster/serializer/broker: fixup inconsistent param comment (Arne Welzel, Corelight)
(cherry picked from commit 9b94e25e6707b2abdfe163d8bb3305edf3f9b201)
8.0.0-11 | 2025-08-26 09:04:29 -0700
* Fix installation of symlink with `DESTDIR` (Benjamin Bannier, Corelight)
We install test data which we also make available under an alternative
path for backwards compatibility. The installation of this symlink did
not take `DESTDIR` installs like used by Zeek's packaging into account
which caused installations from packages to behave different from
installs from source.
This patch fixes the symlink to respect a possible `DESTDIR`.
(cherry picked from commit d7db612b0f1f802316ab745e292c03aa3d69c5bd)
* Update zeekctl submodule for docs fixes [nomail] [skip ci] (Tim Wojtulewicz, Corelight)
* Update docs submodule for v8.0.1 [nomail] [skip ci] (Tim Wojtulewicz, Corelight)
8.0.0-8 | 2025-08-25 12:31:35 -0700
* Merge branch 'topic/christian/news-typos' (Christian Kreibich, Corelight)
* topic/christian/news-typos:
Minor fixes to a few NEWS entries.
(cherry picked from commit 2929f1eb175e86799c8926ac8b0f1eeb5c9eac3d)
* Bump zeekctl submodule for MetricsAddr docs (Tim Wojtulewicz, Corelight)
8.0.0-6 | 2025-08-22 09:25:14 -0700
* cluster/Backend: Fallback to current network time when current event has not timestamp (Arne Welzel, Corelight)
When a WebSocket client sends an event to Zeek without explicit network
timestamp metadata, Zeek would use -1.0 as a timestamp for any events
published while handling this event. Instead, it seems far more sensible
to use the current network time in that scenario.
(cherry picked from commit 3e89e6b3288453b7a0f89fb742384e213cf5cc94)
* cluster/serializer/broker: Do not send empty metadata vectors around (Arne Welzel, Corelight)
Event when there's no metadata attached to an event, we'd still use the
constructor passing an empty metadata vector, resulting in an on-the-wire
representation with an empty trailing vector.
Particularly visible when just snooping events via websocat. There also
seems to be some bug with the timestamp -1 handling.
(cherry picked from commit 3e89e6b3288453b7a0f89fb742384e213cf5cc94)
8.0.0-5 | 2025-08-22 09:24:21 -0700
* cluster/serializer/broker: Do not special case Broker::Data anymore (Arne Welzel, Corelight)
The previous approach ignored the fact that nested / inner values might
also be Broker::Data values. I'm not super sure about the validity of
the test, because it's essentially demonstrating any-nesting, but
it's not leading to extra Broker::Data encoding.
(cherry picked from commit 3d6a064ecce177868ad7f323c63b40524e7e8455)
* broker/Data: Support unwrapping Broker::Data records (Arne Welzel, Corelight)
Calling val_to_data() on a Broker::Data ends up wrapping the
Broker::Data record instead of using the contained broker::value
directly.
Seems this should be the default behavior and wonder if the flag
even makes sense, but for a 8.0 backport that seems more reasonable.
(cherry picked from commit 3d6a064ecce177868ad7f323c63b40524e7e8455)
8.0.0-4 | 2025-08-22 09:21:32 -0700
* ci: Run zeekctl and builtin tasks with Debian 13, too (Arne Welzel, Corelight)
(cherry picked from commit 63574b9fd4645a2f21d7ba3388e16c1b4c55812d)
* ci: Prepend timestamps to output (Arne Welzel, Corelight)
(cherry picked from commit 63574b9fd4645a2f21d7ba3388e16c1b4c55812d)
* ci: Enable Spicy for arm_debian13 (Arne Welzel, Corelight)
(cherry picked from commit 63574b9fd4645a2f21d7ba3388e16c1b4c55812d)
* ci: Add Debian 13.0 (trixie) (Arne Welzel, Corelight)
(cherry picked from commit 63574b9fd4645a2f21d7ba3388e16c1b4c55812d)
* docker: Bump to debian:trixie-slim (Arne Welzel, Corelight)
(cherry picked from commit 63574b9fd4645a2f21d7ba3388e16c1b4c55812d)
8.0.0-3 | 2025-08-22 09:18:56 -0700
* Report PostgreSQL login success only after ReadyForQuery (Fupeng Zhao)
Previously, Zeek treated the receipt of `AuthenticationOk` as a
successful login. However, according to the PostgreSQL
Frontend/Backend Protocol, the startup phase is not complete until
the server sends `ReadyForQuery`. It is still possible for the server
to emit an `ErrorResponse` (e.g. ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION)
after `AuthenticationOk` but before `ReadyForQuery`.
This change updates the PostgreSQL analyzer to defer reporting login
success until `ReadyForQuery` is observed. This prevents false
positives in cases where authentication succeeds but session startup
fails.
(cherry picked from commit e04f725523dc4eaeb4739c8bbfcdce8a9ba06f7b)
8.0.0-2 | 2025-08-18 14:44:27 -0700
* Clarify Event constructor deprectation message (Tim Wojtulewicz, Corelight)
8.0.0 | 2025-08-18 17:08:25 +0000
* Update docs submodule for 8.0.0 [nomail] [skip ci] (Tim Wojtulewicz, Corelight)
8.0.0-rc2.5 | 2025-08-18 16:41:20 +0000
* Add `record_type_to_vector` deprecation to NEWS (Evan Typanski, Corelight)
(cherry picked from commit a4da8d3f7bd455c7158465d12d25eb6030526f3f)
8.0.0-rc2.4 | 2025-08-18 16:36:59 +0000
* maintenance updates for ZAM BiF-tracking (Vern Paxson, Corelight)
(cherry picked from commit c0a863cba0896f860a0c44ce0ff668c21267c239)
* fix line numbers associated with "if" and initialization statements (Vern Paxson, Corelight)
(cherry picked from commit c0a863cba0896f860a0c44ce0ff668c21267c239)
* Fix ci/update-zeekygen-docs to agree with ruff-format (Tim Wojtulewicz, Corelight)
* Bump auxil/spicy to spicy-1.14.0 (Benjamin Bannier, Corelight)
8.0.0-rc2 | 2025-08-12 12:42:54 -0700
* Release 8.0.0-rc2.
8.0.0-rc1.6 | 2025-08-12 12:41:33 -0700
* Bump pre-commit hooks (Benjamin Bannier, Corelight)
(cherry picked from commit cc59bfa5d8a3e9fddc5d65adee68e1937ea5eda7)
* Bump auxil/spicy to latest development snapshot (Benjamin Bannier, Corelight)
(cherry picked from commit cc59bfa5d8a3e9fddc5d65adee68e1937ea5eda7)
* Update docs submodule with 8.0.0-rc2 changes [nomail] [skip ci] (Tim Wojtulewicz, Corelight)
8.0.0-rc1.4 | 2025-08-11 11:38:57 -0700
* smb2/read: Parse only 1 byte for data_offset, ignore reserved1 (Arne Welzel, Corelight)
A user provided a SMB2 pcap with the reserved1 field of a ReadResponse
set to 1 instead of 0. This confused the padding computation due to
including this byte into the offset. Properly split data_offset and
reserved1 into individual byte fields.
(cherry picked from commit 76289a8022d258f94c4cba003dfa657428a247b1)
8.0.0-rc1.3 | 2025-08-11 11:35:42 -0700
* GH-4176: cluster: Add on_subscribe() and on_unsubscribe() hooks (Arne Welzel, Corelight)
(cherry picked from commit 13f613eb1d29895924ae516ad51ca7090acd231f)
8.0.0-rc1.2 | 2025-08-11 11:33:46 -0700
* Add proto to analyzer.log (Johanna Amann, Corelight)
The analyzer.log file was missing the protocol field to distinguish
tcp/udp connections.
(cherry picked from commit 2f2f328a722c38c9d53aa3812e3b35724c7f9e9f)
* Update zeek-aux submodule with c++20 changes (Tim Wojtulewicz, Corelight)
8.0.0-rc1 | 2025-08-04 09:39:08 -0700
* Release 8.0.0-rc1.
8.0.0-dev.828 | 2025-08-04 09:38:55 -0700
* Compile contributors for Zeek 8.0 in the NEWS file (Christian Kreibich, Corelight)
(cherry picked from commit 4fdd83f3f50a0e4631cb8e08ac931cc37f4637a3)
8.0.0-dev.827 | 2025-08-01 17:10:13 +0200
* ci/windows: No ZeroMQ cluster backend (Arne Welzel, Corelight)

51
NEWS
View file

@ -3,10 +3,44 @@ This document summarizes the most important changes in the current Zeek
release. For an exhaustive list of changes, see the ``CHANGES`` file
(note that submodules, such as Broker, come with their own ``CHANGES``.)
Zeek 8.0.1
==========
We would like to thank Fupeng Zhao (@AmazingPP), Mike Dopheide (@dopheide-esnet), and
@DigiAngel for their contributions to this release.
- The official Zeek docker images are now based on Debian 13.0 (trixie).
- Cluster data passed via websockets was previously double-wrapping Broker data records,
leading to decoding issues. This is now resolved.
- Cluster events will no longer pass empty arrays for metadata if there was no metadata
for the event.
- The PostgreSQL analyzer now only reports login success after a ``ReadyForQuery`` message
is received.
- Zeekctl added a new ``MetricsAddr`` address to override the address that the telemetry
uses to communicate to Prometheus. It defaults to ``0.0.0.0`` and the documentation
describes how to override it.
- Zeekctl added documentation for the ``MetricsPort`` option used to control what ports
the telemetry framework listens on to communicate with Prometheus. It describes how
the range is chosen, as well as how to override it.
- The deprecation warning for the ``zeek::Event`` should be more clear as to what action
plugin authors need to take.
Zeek 8.0.0
==========
We would like to thank Bhaskar Bhar (@bhaskarbhar) for their contributions to this
We would like to thank @aidans111, Anthony Verez (@netantho), Baa (@Baa14453),
Bhaskar Bhar (@bhaskarbhar), @dwhitemv25, EdKo (@ephikos), @edoardomich, Fupeng
Zhao (@AmazingPP), hendrik.schwartke@os-s.de (@hendrikschwartke), @i2z1, Jan
Grashöfer (@J-Gras) Jean-Samuel Marier, Justin Azoff (@JustinAzoff), Mario D
(@mari0d), Markus Elfring (@elfring), Peter Cullen (@pbcullen), Sean Donaghy,
Simeon Miteff (@simeonmiteff), Steve Smoot (@stevesmoot), @timo-mue,
@wojciech-graj, and Xiaochuan Ye (@XueSongTap) for their contributions to this
release.
Breaking Changes
@ -282,7 +316,7 @@ New Functionality
- Zeek now supports extracting the PPPoE session ID. The ``PacketAnalyzer::PPPoE::session_id``
BiF can be used to get the session ID of the current packet.
The ``onn/pppoe-session-id-logging.zeek`` policy script adds pppoe session IDs to the
The ``conn/pppoe-session-id-logging.zeek`` policy script adds pppoe session IDs to the
connection log.
The ``get_conn_stats()`` function's return value now includes the number of packets
@ -290,6 +324,10 @@ New Functionality
``get_net_stats()``, it's possible to determine the number of packets that have
been received and accepted by Zeek, but eventually discarded without processing.
- Two new hooks, ``Cluster::on_subscribe()`` and ``Cluster::on_unsubscribe()`` have
been added to allow observing ``Subscribe()`` and ``Unsubscribe()`` calls on
backends by Zeek scripts.
Changed Functionality
---------------------
@ -352,7 +390,7 @@ Changed Functionality
times in X509 certificates as local times.
- The PPPoE parser now respects the size value given in the PPPoE header. Data
beyon the size given in the header will be truncated.
beyond the size given in the header will be truncated.
- Record fields with ``&default`` attributes initializing empty ``vector``, ``table``
or ``set`` instances are now deferred until they are accessed, potentially
@ -419,6 +457,9 @@ Deprecated Functionality
``std::string`` and ``std::string_view`` added ``begins_with`` and ``ends_with`` methods
in C++ 20, and those should be used instead.
- The ``record_type_to_vector`` BIF is deprecated in favor of using the newly ordered
``record_fields`` BIF.
Zeek 7.2.0
==========
@ -741,7 +782,7 @@ New Functionality
some updates to Zeek's internal DNS resolver due to changes in the c-ares
API. At least version v1.28.0 is now required to build Zeek.
- Python 3.9 is now required for Zeek and all of it's associated subprojects.
- Python 3.9 is now required for Zeek and all of its associated subprojects.
- IP-based connections that were previously not logged due to using an unknown
IP protocol (e.g. not TCP, UDP, or ICMP) now appear in conn.log. All conn.log
@ -832,7 +873,7 @@ New Functionality
analyzer used for processing the packet when the event is raised. The
``unknown_protocol.log`` file was extended to include this information.
- The MySQL analyzer now generates a ``mysql_user_change()`` event when the user
- The MySQL analyzer now generates a ``mysql_change_user()`` event when the user
changes mid-session via the ``COM_USER_CHANGE`` command.
- The DNS analyzer was extended to support TKEY RRs (RFC 2390). A corresponding

View file

@ -1 +1 @@
8.0.0-dev.827
8.0.1

@ -1 +1 @@
Subproject commit 140e88c9a8e04eca801bbd891e085cc180eee43f
Subproject commit 63594ca470b215fa4c9f3363a5f337ed97e0e529

@ -1 +1 @@
Subproject commit 6c72725b184cc5fd7d12cea5084f0f51de3e82e3
Subproject commit 6ff49f46d5714b894a1f10f8463941fbda3b9364

@ -1 +1 @@
Subproject commit 93459b37c3deab4bec9e886211672024fa3e4759
Subproject commit 322ff7862667f7c65c91cfb9e532623327a768fb

View file

@ -1,32 +1,36 @@
FROM debian:11
FROM debian:13
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
# A version field to invalidate Cirrus's build cache when needed, as suggested in
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
ENV DOCKERFILE_VERSION 20241024
ENV DOCKERFILE_VERSION 20250813
RUN apt-get update && apt-get -y install \
bison \
bsdmainutils \
ccache \
cmake \
cppzmq-dev \
curl \
dnsmasq \
flex \
g++ \
gcc \
git \
jq \
libkrb5-dev \
libnats-dev \
libnode-dev \
libpcap-dev \
librdkafka-dev \
libssl-dev \
libuv1-dev \
libzmq3-dev \
make \
python3 \
python3-dev \
python3-pip\
python3-websockets \
sqlite3 \
swig \
wget \
@ -35,4 +39,6 @@ RUN apt-get update && apt-get -y install \
&& apt autoclean \
&& rm -rf /var/lib/apt/lists/*
RUN pip3 install websockets junit2html
# Debian trixie really doesn't like using pip to install system wide stuff, but
# doesn't seem there's a python3-junit2html package, so not sure what we'd break.
RUN pip3 install --break-system-packages junit2html

View file

@ -64,7 +64,7 @@ branch="$(git branch --show-current)"
if [[ "$branch" =~ ^release/.* ]]; then
doc_config_file=$source_dir/doc/conf.py
cat ${doc_config_file} | sed \
-e "s#\(zeek_code_version[[:space:]]*=[[:space:]]*\)[^\n]*#\1'$branch'#g" \
-e "s#\(zeek_code_version[[:space:]]*=[[:space:]]*\)[^\n]*#\1\"$branch\"#g" \
>${doc_config_file}.tmp
mv ${doc_config_file}.tmp ${doc_config_file}
fi

2
doc

@ -1 +1 @@
Subproject commit 1ce37d96e268134100fbc6793c0c64d48e162337
Subproject commit f2f68891551733b6ac78a4350dac6da6cff25171

View file

@ -1,7 +1,7 @@
# See the file "COPYING" in the main distribution directory for copyright.
# Layer to build Zeek.
FROM debian:bookworm-slim
FROM debian:13-slim
# Make the shell split commands in the log so we can determine reasons for
# failures more easily.
@ -16,6 +16,7 @@ RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
# Configure system for build.
RUN apt-get -q update \
&& apt-get upgrade -q -y \
&& apt-get install -q -y --no-install-recommends \
bind9 \
bison \
@ -36,7 +37,7 @@ RUN apt-get -q update \
libz-dev \
make \
python3-minimal \
python3.11-dev \
python3-dev \
swig \
ninja-build \
python3-pip \

View file

@ -1,7 +1,7 @@
# See the file "COPYING" in the main distribution directory for copyright.
# Final layer containing all artifacts.
FROM debian:bookworm-slim
FROM debian:13-slim
# Make the shell split commands in the log so we can determine reasons for
# failures more easily.
@ -15,14 +15,15 @@ RUN echo 'Acquire::http::timeout "180";' > /etc/apt/apt.conf.d/99-timeouts
RUN echo 'Acquire::https::timeout "180";' >> /etc/apt/apt.conf.d/99-timeouts
RUN apt-get -q update \
&& apt-get upgrade -q -y \
&& apt-get install -q -y --no-install-recommends \
ca-certificates \
git \
jq \
libmaxminddb0 \
libnode108 \
libnode115 \
libpcap0.8 \
libpython3.11 \
libpython3.13 \
libssl3 \
libuv1 \
libz1 \

View file

@ -23,8 +23,10 @@ export {
uid: string &log &optional;
## File UID if available.
fuid: string &log &optional;
## Connection identifier if available
## Connection identifier if available.
id: conn_id &log &optional;
## Transport protocol for the violation, if available.
proto: transport_proto &log &optional;
## Failure or violation reason, if available.
failure_reason: string &log;
## Data causing failure or violation if available. Truncated
@ -62,6 +64,7 @@ function log_analyzer_failure(ts: time, atype: AllAnalyzers::Tag, info: Analyzer
{
rec$id = info$c$id;
rec$uid = info$c$uid;
rec$proto = get_port_transport_proto(info$c$id$orig_p);
}
if ( info?$f )

View file

@ -401,6 +401,20 @@ export {
## The value of the X-Application-Name HTTP header, if any.
application_name: string &optional;
};
## A hook invoked for every :zeek:see:`Cluster::subscribe` call.
##
## Breaking from this hook has no effect.
##
## topic: The topic string as given to :zeek:see:`Cluster::subscribe`.
global on_subscribe: hook(topic: string);
## A hook invoked for every :zeek:see:`Cluster::subscribe` call.
##
## Breaking from this hook has no effect.
##
## topic: The topic string as given to :zeek:see:`Cluster::subscribe`.
global on_unsubscribe: hook(topic: string);
}
# Needs declaration of Cluster::Event type.

View file

@ -53,7 +53,7 @@ export {
user: string &optional;
database: string &optional;
application_name: string &optional;
rows: count &default=0;
rows: count &optional;
errors: vector of string;
};
@ -197,8 +197,6 @@ event PostgreSQL::authentication_ok(c: connection) {
c$postgresql$backend = "auth_ok";
c$postgresql$success = T;
emit_log(c);
}
event PostgreSQL::terminate(c: connection) {
@ -224,6 +222,9 @@ event PostgreSQL::simple_query(c: connection, query: string) {
event PostgreSQL::data_row(c: connection, column_values: count) {
hook set_session(c);
if ( ! c$postgresql_state?$rows )
c$postgresql_state$rows = 0;
++c$postgresql_state$rows;
}
@ -236,7 +237,11 @@ event PostgreSQL::ready_for_query(c: connection, transaction_status: string) {
if ( ! c$postgresql?$success )
c$postgresql$success = transaction_status == "I" || transaction_status == "T";
if ( c$postgresql_state?$rows ) {
c$postgresql$rows = c$postgresql_state$rows;
delete c$postgresql_state$rows;
}
emit_log(c);
}

View file

@ -55,7 +55,9 @@ constexpr double NO_TIMESTAMP = -1.0;
class Event final : public Obj {
public:
[[deprecated("Remove in v8.1: Do not instantiate raw events. Use EventMgr::Dispatch() or EventMgr::Enqueue().")]]
[[deprecated(
"Remove in v8.1: The public constructor for Event() is deprecated. Pass arguments directly to "
"EventMgr::Dispatch() or EventMgr::Enqueue() instead.")]]
Event(const EventHandlerPtr& handler, zeek::Args args, util::detail::SourceID src = util::detail::SOURCE_LOCAL,
analyzer::ID aid = 0, Obj* obj = nullptr, double ts = run_state::network_time);

View file

@ -426,10 +426,6 @@ IfStmt::IfStmt(ExprPtr test, StmtPtr arg_s1, StmtPtr arg_s2)
: ExprStmt(STMT_IF, std::move(test)), s1(std::move(arg_s1)), s2(std::move(arg_s2)) {
if ( ! e->IsError() && ! IsBool(e->GetType()->Tag()) )
e->Error("conditional in test must be boolean");
const Location* loc1 = s1->GetLocationInfo();
const Location* loc2 = s2->GetLocationInfo();
SetLocationInfo(loc1, loc2);
}
IfStmt::~IfStmt() = default;
@ -1488,7 +1484,7 @@ InitStmt::InitStmt(std::vector<IDPtr> arg_inits) : Stmt(STMT_INIT) {
inits = std::move(arg_inits);
if ( ! inits.empty() )
SetLocationInfo(inits[0]->GetLocationInfo());
SetLocationInfo(inits.front()->GetLocationInfo(), inits.back()->GetLocationInfo());
}
ValPtr InitStmt::Exec(Frame* f, StmtFlowType& flow) {

View file

@ -93,10 +93,11 @@ type SMB2_read_request(header: SMB2_Header) = record {
type SMB2_read_response(header: SMB2_Header) = record {
structure_size : uint16;
data_offset : uint16;
data_offset : uint8;
reserved1 : uint8;
data_len : uint32;
data_remaining : uint32;
reserved : uint32;
reserved2 : uint32;
pad : padding to data_offset - header.head_length;
data : bytestring &length=data_len;
} &let {

View file

@ -12,6 +12,7 @@
#include "zeek/IntrusivePtr.h"
#include "zeek/RE.h"
#include "zeek/Scope.h"
#include "zeek/Type.h"
#include "zeek/broker/data.bif.h"
#include "zeek/module_util.h"
@ -718,7 +719,7 @@ ValPtr data_to_val(broker::data& d, Type* type) {
return visit(val_converter{type}, d);
}
std::optional<broker::data> val_to_data(const Val* v) {
std::optional<broker::data> val_to_data(const Val* v, bool unwrap_broker_data) {
switch ( v->GetType()->Tag() ) {
case TYPE_BOOL: return {v->AsBool()};
case TYPE_INT: return {v->AsInt()};
@ -804,7 +805,7 @@ std::optional<broker::data> val_to_data(const Val* v) {
composite_key.reserve(vl->Length());
for ( auto k = 0; k < vl->Length(); ++k ) {
auto key_part = val_to_data(vl->Idx(k).get());
auto key_part = val_to_data(vl->Idx(k).get(), unwrap_broker_data);
if ( ! key_part )
return std::nullopt;
@ -822,7 +823,7 @@ std::optional<broker::data> val_to_data(const Val* v) {
if ( is_set )
get<broker::set>(rval).emplace(std::move(key));
else {
auto val = val_to_data(te.value->GetVal().get());
auto val = val_to_data(te.value->GetVal().get(), unwrap_broker_data);
if ( ! val )
return std::nullopt;
@ -846,7 +847,7 @@ std::optional<broker::data> val_to_data(const Val* v) {
return std::nullopt;
}
auto item = val_to_data(item_val.get());
auto item = val_to_data(item_val.get(), unwrap_broker_data);
if ( ! item )
return std::nullopt;
@ -871,7 +872,7 @@ std::optional<broker::data> val_to_data(const Val* v) {
return std::nullopt;
}
auto item = val_to_data(item_val.get());
auto item = val_to_data(item_val.get(), unwrap_broker_data);
if ( ! item )
return std::nullopt;
@ -883,6 +884,21 @@ std::optional<broker::data> val_to_data(const Val* v) {
}
case TYPE_RECORD: {
auto rec = v->AsRecordVal();
// If unwrap_broker_data is set and this record is a Broker::Data record,
// use the contained data field directly.
if ( unwrap_broker_data && rec->GetType() == BifType::Record::Broker::Data ) {
const auto ov = rec->GetField<zeek::OpaqueVal>(0);
// Sanity.
if ( ov->GetType() != opaque_of_data_type ) {
reporter->Error("Broker::Data data field has wrong type: %s",
obj_desc_short(ov->GetType()).c_str());
return std::nullopt;
}
return static_cast<const DataVal*>(ov.get())->data;
}
broker::vector rval;
size_t num_fields = v->GetType()->AsRecordType()->NumFields();
rval.reserve(num_fields);
@ -895,7 +911,7 @@ std::optional<broker::data> val_to_data(const Val* v) {
continue;
}
auto item = val_to_data(item_val.get());
auto item = val_to_data(item_val.get(), unwrap_broker_data);
if ( ! item )
return std::nullopt;

View file

@ -76,9 +76,10 @@ EnumValPtr get_data_type(RecordVal* v, zeek::detail::Frame* frame);
/**
* Convert a Zeek value to a Broker data value.
* @param v a Zeek value.
* @param unwrap_broker_data If v or any nested value is a Broker::Data record, use its data broker::value directly.
* @return a Broker data value if the Zeek value could be converted to one.
*/
std::optional<broker::data> val_to_data(const Val* v);
std::optional<broker::data> val_to_data(const Val* v, bool unwrap_broker_data = false);
/**
* Convert a Broker data value to a Zeek value.

View file

@ -11,6 +11,7 @@
#include "zeek/EventHandler.h"
#include "zeek/EventRegistry.h"
#include "zeek/Func.h"
#include "zeek/ID.h"
#include "zeek/Reporter.h"
#include "zeek/Type.h"
#include "zeek/Val.h"
@ -131,14 +132,43 @@ std::optional<Event> Backend::MakeClusterEvent(FuncValPtr handler, ArgsSpan args
*
* @J-Gras prefers the current behavior. @awelzel wonders if there should
* be an opt-in/opt-out for this behavior. Procrastinating it for now.
*
* In any case, if the current event has no timestamp information
* (detail::NO_TIMESTAMP is -1.0), use the current network time for
* the outgoing event instead as network timestamp metadata.
*/
zeek::detail::EventMetadataVectorPtr meta;
if ( zeek::BifConst::EventMetadata::add_network_timestamp )
meta = zeek::detail::MakeEventMetadataVector(zeek::event_mgr.CurrentEventTime());
if ( zeek::BifConst::EventMetadata::add_network_timestamp ) {
auto ts = zeek::event_mgr.CurrentEventTime();
if ( ts == zeek::detail::NO_TIMESTAMP )
ts = run_state::network_time;
meta = zeek::detail::MakeEventMetadataVector(ts);
}
return Event{eh, std::move(*checked_args), std::move(meta)};
}
bool Backend::Subscribe(const std::string& topic_prefix, SubscribeCallback cb) {
static const auto on_subscribe = zeek::id::find_func("Cluster::on_subscribe");
assert(on_subscribe && on_subscribe->Flavor() == FUNC_FLAVOR_HOOK);
if ( on_subscribe && on_subscribe->HasEnabledBodies() )
on_subscribe->Invoke(zeek::make_intrusive<zeek::StringVal>(topic_prefix));
return DoSubscribe(topic_prefix, std::move(cb));
}
bool Backend::Unsubscribe(const std::string& topic_prefix) {
static const auto on_unsubscribe = zeek::id::find_func("Cluster::on_unsubscribe");
assert(on_unsubscribe && on_unsubscribe->Flavor() == FUNC_FLAVOR_HOOK);
if ( on_unsubscribe->HasEnabledBodies() )
on_unsubscribe->Invoke(zeek::make_intrusive<zeek::StringVal>(topic_prefix));
return DoUnsubscribe(topic_prefix);
}
void Backend::DoReadyToPublishCallback(Backend::ReadyCallback cb) {
Backend::ReadyCallbackInfo info{Backend::CallbackStatus::Success};
cb(info);

View file

@ -59,7 +59,7 @@ public:
bool ProcessEvent(std::string_view topic, cluster::Event e) { return DoProcessEvent(topic, std::move(e)); }
/**
* Method for enquing backend specific events.
* Method for enqueuing backend specific events.
*
* Some backend's may raise events destined for the local
* scripting layer. That's usually wanted, but not always.
@ -210,9 +210,7 @@ public:
* @param cb callback invoked when the subscription was processed.
* @return true if it's a new event subscription and it is now registered.
*/
bool Subscribe(const std::string& topic_prefix, SubscribeCallback cb = SubscribeCallback()) {
return DoSubscribe(topic_prefix, std::move(cb));
}
bool Subscribe(const std::string& topic_prefix, SubscribeCallback cb = SubscribeCallback());
/**
* Unregister interest in messages on a certain topic.
@ -220,7 +218,7 @@ public:
* @param topic_prefix a prefix previously supplied to Subscribe()
* @return true if interest in topic prefix is no longer advertised.
*/
bool Unsubscribe(const std::string& topic_prefix) { return DoUnsubscribe(topic_prefix); }
bool Unsubscribe(const std::string& topic_prefix);
/**
* Information passed to a ready callback.

View file

@ -2,6 +2,7 @@
#include "zeek/cluster/serializer/broker/Serializer.h"
#include <cinttypes>
#include <optional>
#include "zeek/DebugLogger.h"
@ -14,7 +15,6 @@
#include "zeek/broker/Data.h"
#include "zeek/cluster/Event.h"
#include "broker/data.bif.h"
#include "broker/data_envelope.hh"
#include "broker/error.hh"
#include "broker/format/json.hh"
@ -59,24 +59,15 @@ std::optional<broker::zeek::Event> detail::to_broker_event(const zeek::cluster::
xs.reserve(ev.Args().size());
for ( const auto& a : ev.Args() ) {
if ( a->GetType() == zeek::BifType::Record::Broker::Data ) {
// When encountering a Broker::Data instance within args, pick out
// the broker::data directly to avoid double encoding, Broker::Data.
const auto& val = a->AsRecordVal()->GetField(0);
auto* data_val = static_cast<zeek::Broker::detail::DataVal*>(val.get());
xs.emplace_back(data_val->data);
}
else if ( auto res = zeek::Broker::detail::val_to_data(a.get()) ) {
if ( auto res = zeek::Broker::detail::val_to_data(a.get(), /*unwrap_broker_data=*/true) )
xs.emplace_back(std::move(res.value()));
}
else {
else
return std::nullopt;
}
}
// Convert metadata from the cluster::detail::Event event to broker's event metadata format.
broker::vector broker_meta;
if ( const auto* meta = ev.Metadata(); meta != nullptr ) {
broker::vector broker_meta;
broker_meta.reserve(meta->size());
for ( const auto& m : *meta ) {
@ -90,9 +81,11 @@ std::optional<broker::zeek::Event> detail::to_broker_event(const zeek::cluster::
obj_desc_short(m.Val()).c_str());
}
}
}
return broker::zeek::Event(ev.HandlerName(), xs, broker_meta);
}
return broker::zeek::Event(ev.HandlerName(), xs);
}
std::optional<zeek::cluster::Event> detail::to_zeek_event(const broker::zeek::Event& ev) {

View file

@ -117,7 +117,7 @@ static std::unordered_map<std::string, unsigned int> func_attrs = {
{"Option::set_change_handler", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"PacketAnalyzer::GTPV1::remove_gtpv1_connection", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"PacketAnalyzer::Geneve::get_options", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"PacketAnalyzer::PPPoE::session_id", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"PacketAnalyzer::PPPoE::session_id", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"PacketAnalyzer::TEREDO::remove_teredo_connection", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"PacketAnalyzer::__disable_analyzer", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"PacketAnalyzer::__enable_analyzer", ATTR_NO_SCRIPT_SIDE_EFFECTS},
@ -162,6 +162,7 @@ static std::unordered_map<std::string, unsigned int> func_attrs = {
{"Storage::Sync::__get", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"Storage::Sync::__open_backend", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"Storage::Sync::__put", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"Storage::is_forced_sync", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"Supervisor::__create", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"Supervisor::__destroy", ATTR_NO_SCRIPT_SIDE_EFFECTS},
{"Supervisor::__is_supervised", ATTR_IDEMPOTENT},
@ -301,6 +302,7 @@ static std::unordered_map<std::string, unsigned int> func_attrs = {
{"get_net_stats", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"get_orig_seq", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"get_package_readme", ATTR_FOLDABLE},
{"get_plugin_components", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"get_port_transport_proto", ATTR_FOLDABLE},
{"get_proc_stats", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"get_reassembler_stats", ATTR_NO_ZEEK_SIDE_EFFECTS},
@ -318,7 +320,6 @@ static std::unordered_map<std::string, unsigned int> func_attrs = {
{"global_ids", ATTR_IDEMPOTENT},
{"global_options", ATTR_IDEMPOTENT},
{"gsub", ATTR_FOLDABLE},
{"get_plugin_components", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"has_event_group", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"has_module_events", ATTR_NO_ZEEK_SIDE_EFFECTS},
{"have_spicy", ATTR_IDEMPOTENT},

View file

@ -14,7 +14,7 @@ install(
CODE "execute_process( \
COMMAND ${CMAKE_COMMAND} -E create_symlink \
${ZEEK_CONFIG_BTEST_TOOLS_DIR}/data \
${CMAKE_INSTALL_PREFIX}/share/zeek/tests \
\$ENV{DESTDIR}/${CMAKE_INSTALL_PREFIX}/share/zeek/tests \
)")
install(DIRECTORY scripts/spicy/ DESTINATION ${ZEEK_CONFIG_BTEST_TOOLS_DIR}/data/Scripts

View file

@ -1,5 +1,5 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
error in <...>/main.zeek, line 677: Already listening on 127.0.0.1:<port> (Cluster::__listen_websocket(ws_opts_x))
error in <...>/main.zeek, line 677: Already listening on 127.0.0.1:<port> (Cluster::__listen_websocket(ws_opts_wss_port))
error in <...>/main.zeek, line 677: Already listening on 127.0.0.1:<port> (Cluster::__listen_websocket(ws_opts_qs))
error in <...>/main.zeek, line 691: Already listening on 127.0.0.1:<port> (Cluster::__listen_websocket(ws_opts_x))
error in <...>/main.zeek, line 691: Already listening on 127.0.0.1:<port> (Cluster::__listen_websocket(ws_opts_wss_port))
error in <...>/main.zeek, line 691: Already listening on 127.0.0.1:<port> (Cluster::__listen_websocket(ws_opts_qs))
received termination signal

View file

@ -1,3 +1,3 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
error in <...>/main.zeek, line 677: Invalid tls_options: No key_file field (Cluster::__listen_websocket(Cluster::options.0))
error in <...>/main.zeek, line 677: Invalid tls_options: No cert_file field (Cluster::__listen_websocket(Cluster::options.3))
error in <...>/main.zeek, line 691: Invalid tls_options: No key_file field (Cluster::__listen_websocket(Cluster::options.0))
error in <...>/main.zeek, line 691: Invalid tls_options: No cert_file field (Cluster::__listen_websocket(Cluster::options.3))

View file

@ -0,0 +1 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.

View file

@ -0,0 +1,6 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
on_subscribe, zeek/supervisor
on_subscribe, /my_topic
on_unsubscribe, /my_topic
on_unsubscribe, /my_topic
on_subscribe, /my_topic2

View file

@ -35,3 +35,10 @@ got pong, 27, with, 4, time (cluster publish), Broker::Data, [data=broker::data{
got pong, 28, with, 4, time (cluster event ), Broker::Data, [data=broker::data{42000000000ns}]
got pong, 29, with, 4, time (cluster publish), Broker::Data, [data=broker::data{42000000000ns}]
got pong, 30, with, 4, time (cluster event ), Broker::Data, [data=broker::data{42000000000ns}]
sending pings, 5, R, [c=42, a=[[c=42, a=hello]]]
got pong, 31, with, 5, R (cluster publish), Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got pong, 32, with, 5, R (cluster event ), Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got pong, 33, with, 5, R (cluster publish), Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got pong, 34, with, 5, R (cluster event ), Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got pong, 35, with, 5, R (cluster publish), Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got pong, 36, with, 5, R (cluster event ), Broker::Data, [data=broker::data{(42, ((42, hello)))}]

View file

@ -14,4 +14,7 @@ got ping, 3, vector of count, Broker::Data, [data=broker::data{(1, 2, 3)}]
got ping, 4, time, Broker::Data, [data=broker::data{42000000000ns}]
got ping, 4, time, Broker::Data, [data=broker::data{42000000000ns}]
got ping, 4, time, Broker::Data, [data=broker::data{42000000000ns}]
got ping, 5, R, Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got ping, 5, R, Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got ping, 5, R, Broker::Data, [data=broker::data{(42, ((42, hello)))}]
got finish!

View file

@ -0,0 +1 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.

View file

@ -0,0 +1,5 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
Connected!
ack {'type': 'ack', 'endpoint': 'endpoint', 'version': 'endpoint'}
ping {'type': 'data-message', 'topic': '/test/pings/', '@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'count', 'data': 1}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'ping'}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'fourty-two'}, {'@data-type': 'count', 'data': 42}]}, {'@data-type': 'vector', 'data': [{'@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'timestamp', 'data': '1970-01-01T01:42:42'}]}]}]}]}
pong {'type': 'data-message', 'topic': '/test/pongs/', '@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'count', 'data': 1}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'pong'}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'fourty-two fourty-two'}, {'@data-type': 'count', 'data': 84}]}, {'@data-type': 'vector', 'data': [{'@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'timestamp', 'data': '1970-01-01T01:42:42.000'}]}]}]}]}

View file

@ -0,0 +1 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.

View file

@ -0,0 +1,5 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
Connected!
ack {'type': 'ack', 'endpoint': 'endpoint', 'version': 'endpoint'}
ping {'type': 'data-message', 'topic': '/test/pings/', '@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'count', 'data': 1}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'ping'}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'fourty-two'}, {'@data-type': 'count', 'data': 42}]}]}]}
pong {'type': 'data-message', 'topic': '/test/pongs/', '@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'count', 'data': 1}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'pong'}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'fourty-two fourty-two'}, {'@data-type': 'count', 'data': 84}]}, {'@data-type': 'vector', 'data': [{'@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'timestamp', 'data': '1970-01-01T01:18:31.000'}]}]}]}]}

View file

@ -0,0 +1 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.

View file

@ -0,0 +1,5 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
Connected!
ack {'type': 'ack', 'endpoint': 'endpoint', 'version': 'endpoint'}
ping {'type': 'data-message', 'topic': '/test/pings/', '@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'count', 'data': 1}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'ping'}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'fourty-two'}, {'@data-type': 'count', 'data': 42}]}]}]}
pong {'type': 'data-message', 'topic': '/test/pongs/', '@data-type': 'vector', 'data': [{'@data-type': 'count', 'data': 1}, {'@data-type': 'count', 'data': 1}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'pong'}, {'@data-type': 'vector', 'data': [{'@data-type': 'string', 'data': 'fourty-two fourty-two'}, {'@data-type': 'count', 'data': 84}]}]}]}

View file

@ -0,0 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
<params>, line 1: received termination signal

View file

@ -0,0 +1,4 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
Cluster::websocket_client_added, [/test/pongs/, /zeek/wstest/ws1/]
ping: fourty-two, 42 (metadata=[[id=EventMetadata::NETWORK_TIMESTAMP, val=6162.0]]), sending pong...
Cluster::websocket_client_lost

View file

@ -0,0 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
<params>, line 1: received termination signal

View file

@ -0,0 +1,4 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
Cluster::websocket_client_added, [/test/pongs/, /zeek/wstest/ws1/]
ping: fourty-two, 42 (metadata=[]), sending pong...
Cluster::websocket_client_lost

View file

@ -0,0 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
received termination signal

View file

@ -0,0 +1,4 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
Cluster::websocket_client_added, [/test/pongs/, /zeek/wstest/ws1/]
ping: fourty-two, 42 (metadata=[]), sending pong...
Cluster::websocket_client_lost

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX packet GTPV1 CHhAvVGS1DHFjwGM9 - 173.86.159.28 2152 213.72.147.186 2152 Truncated GTPv1 -
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX packet GTPV1 CHhAvVGS1DHFjwGM9 - 173.86.159.28 2152 213.72.147.186 2152 udp Truncated GTPv1 -
#close XXXX-XX-XX-XX-XX-XX

View file

@ -1,2 +1,2 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
564 seen BiFs, 0 unseen BiFs (), 0 new BiFs ()
566 seen BiFs, 0 unseen BiFs (), 0 new BiFs ()

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX protocol NTLM CHhAvVGS1DHFjwGM9 - 192.168.0.173 1068 192.168.0.2 4997 NTLM AV Pair loop underflow -
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX protocol NTLM CHhAvVGS1DHFjwGM9 - 192.168.0.173 1068 192.168.0.2 4997 tcp NTLM AV Pair loop underflow -
#close XXXX-XX-XX-XX-XX-XX

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX protocol NTLM CHhAvVGS1DHFjwGM9 - 192.168.0.173 1068 192.168.0.2 4997 NTLM AV Pair loop underflow -
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX protocol NTLM CHhAvVGS1DHFjwGM9 - 192.168.0.173 1068 192.168.0.2 4997 tcp NTLM AV Pair loop underflow -
#close XXXX-XX-XX-XX-XX-XX

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 127.0.0.1 51354 127.0.0.1 21 non-numeric reply code 99 PASV invalid
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 127.0.0.1 51354 127.0.0.1 21 tcp non-numeric reply code 99 PASV invalid
#close XXXX-XX-XX-XX-XX-XX

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 127.0.0.1 51344 127.0.0.1 21 non-numeric reply code SYST not supported
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 127.0.0.1 51344 127.0.0.1 21 tcp non-numeric reply code SYST not supported
#close XXXX-XX-XX-XX-XX-XX

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 127.0.0.1 51346 127.0.0.1 21 invalid reply line 230_no_space
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 127.0.0.1 51346 127.0.0.1 21 tcp invalid reply line 230_no_space
#close XXXX-XX-XX-XX-XX-XX

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX protocol HTTP CHhAvVGS1DHFjwGM9 - 192.168.12.5 51792 192.0.78.212 80 not a http request line -
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX protocol HTTP CHhAvVGS1DHFjwGM9 - 192.168.12.5 51792 192.0.78.212 80 tcp not a http request line -
#close XXXX-XX-XX-XX-XX-XX

View file

@ -1,3 +1,3 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
1673270800.189652 protocol POSTGRESQL CHhAvVGS1DHFjwGM9 - 127.0.0.1 54958 127.0.0.1 5432 error while parsing PostgreSQL: &requires failed: (self.length >= 4) (...) -
ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
1673270800.189652 protocol POSTGRESQL CHhAvVGS1DHFjwGM9 - 127.0.0.1 54958 127.0.0.1 5432 tcp error while parsing PostgreSQL: &requires failed: (self.length >= 4) (...) -

View file

@ -0,0 +1,3 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
ts uid id.orig_h id.orig_p id.resp_h id.resp_p service
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 10.21.179.53 51625 192.168.115.201 5432 postgresql

View file

@ -0,0 +1,4 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
ts uid id.orig_h id.orig_p id.resp_h id.resp_p user database application_name frontend frontend_arg backend backend_arg success rows
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 10.21.179.53 51625 192.168.115.201 5432 - - - ssl_request - ssl_reply N F -
XXXXXXXXXX.XXXXXX CHhAvVGS1DHFjwGM9 10.21.179.53 51625 192.168.115.201 5432 test postgres Navicat startup - auth_ok,error SeverityLocalized=FATAL,Severity=FATAL,Code=28000,Message=role "test" does not exist,File=miscinit.c,Line=694,Routine=InitializeSessionUserId F -

View file

@ -0,0 +1,11 @@
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path files
#open XXXX-XX-XX-XX-XX-XX
#fields ts fuid uid id.orig_h id.orig_p id.resp_h id.resp_p source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid
#types time string string addr port addr port string count set[string] string string interval bool bool count count count count bool string
XXXXXXXXXX.XXXXXX FmcSEk2dq4v0hewpM4 CHhAvVGS1DHFjwGM9 172.31.112.17 57829 172.31.112.16 445 SMB 0 (empty) text/plain Test.txt 0.000000 T F 189 189 0 0 F -
#close XXXX-XX-XX-XX-XX-XX

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data packet_segment
#types time string string string string addr port addr port string string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 2001:470:1f05:17a6:d69a:20ff:fefd:6b88 24316 2001:6a8:a40::21 21 non-numeric reply code SSH-2.0-mod_sftp/0.9.7 \xd4\x9a \xfdk\x88\x00\x80\xc8\xb9\xc2\x06\x86\xdd`\x00\x00\x00\x00t\x067 \x01\x06\xa8\x0a@\x00\x00\x00\x00\x00\x00\x00\x00\x00! \x01\x04p\x1f\x05\x17\xa6\xd6\x9a \xff\xfe\xfdk\x88\x00\x15^\xfc\x1f]\xed\x1b\xa9\x9f`\xf1P\x18\x00\x09~n\x00\x00SSH-2.0-mod_sftp/0.9.7\x0d\x0a\x00\x00\x00D\x08\x01\x00\x00\x00\x0c\x00\x00\x00)Maximum connections for host/user reached\x00\x00\x00\x05en-USI\xf8\xb9C\xae\xcf`\xc4
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data packet_segment
#types time string string string string addr port addr port enum string string string
XXXXXXXXXX.XXXXXX protocol FTP CHhAvVGS1DHFjwGM9 - 2001:470:1f05:17a6:d69a:20ff:fefd:6b88 24316 2001:6a8:a40::21 21 tcp non-numeric reply code SSH-2.0-mod_sftp/0.9.7 \xd4\x9a \xfdk\x88\x00\x80\xc8\xb9\xc2\x06\x86\xdd`\x00\x00\x00\x00t\x067 \x01\x06\xa8\x0a@\x00\x00\x00\x00\x00\x00\x00\x00\x00! \x01\x04p\x1f\x05\x17\xa6\xd6\x9a \xff\xfe\xfdk\x88\x00\x15^\xfc\x1f]\xed\x1b\xa9\x9f`\xf1P\x18\x00\x09~n\x00\x00SSH-2.0-mod_sftp/0.9.7\x0d\x0a\x00\x00\x00D\x08\x01\x00\x00\x00\x0c\x00\x00\x00)Maximum connections for host/user reached\x00\x00\x00\x05en-USI\xf8\xb9C\xae\xcf`\xc4
#close XXXX-XX-XX-XX-XX-XX

View file

@ -5,7 +5,7 @@
#unset_field -
#path analyzer
#open XXXX-XX-XX-XX-XX-XX
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p failure_reason failure_data
#types time string string string string addr port addr port string string
XXXXXXXXXX.XXXXXX protocol SPICY_SSH CHhAvVGS1DHFjwGM9 - 192.150.186.169 49244 131.159.14.23 22 failed to match regular expression (<...>/test.spicy:9:15-9:22) SSH-2.0-OpenSSH_3.8.1p1\x0a
#fields ts analyzer_kind analyzer_name uid fuid id.orig_h id.orig_p id.resp_h id.resp_p proto failure_reason failure_data
#types time string string string string addr port addr port enum string string
XXXXXXXXXX.XXXXXX protocol SPICY_SSH CHhAvVGS1DHFjwGM9 - 192.150.186.169 49244 131.159.14.23 22 tcp failed to match regular expression (<...>/test.spicy:9:15-9:22) SSH-2.0-OpenSSH_3.8.1p1\x0a
#close XXXX-XX-XX-XX-XX-XX

View file

@ -53,3 +53,6 @@ Trace Index/Sources:
- ldap/adduser1.pcap ldap/adduser1-ntlm.pcap
Provided by Mohan-Dhawan on #4275
https://github.com/zeek/zeek/issues/4275
- smb_v2_only_non_zero_reserved1.pcap
Provided by @predator89090 on #4730
https://github.com/zeek/zeek/issues/4730

View file

@ -0,0 +1,24 @@
# @TEST-DOC: Cluster::on_subscribe and Cluster::on_unsubscribe hooks
#
# @TEST-EXEC: zeek --parse-only -b %INPUT
# @TEST-EXEC: zeek -b %INPUT
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stdout
hook Cluster::on_subscribe(topic: string)
{
print "on_subscribe", topic;
}
hook Cluster::on_unsubscribe(topic: string)
{
print "on_unsubscribe", topic;
}
event zeek_init()
{
Cluster::subscribe("/my_topic");
Cluster::unsubscribe("/my_topic");
Cluster::unsubscribe("/my_topic");
Cluster::subscribe("/my_topic2");
}

View file

@ -34,9 +34,14 @@ global pong: event(c: count, what: string, val: any) &is_used;
global i = 0;
global pongs = 0;
type R: record {
c: count;
a: any;
};
event send_any()
{
if ( i > 4 )
if ( i > 5 )
return;
local val: any;
@ -48,8 +53,10 @@ event send_any()
val = 42/tcp;
else if ( i == 3 )
val = vector(1, 2, 3);
else
else if ( i == 4 )
val = double_to_time(42.0);
else
val = R($c=42, $a=vector(R($c=42, $a="hello")));
print "sending pings", i, type_name(val), val;
Cluster::publish_hrw(Cluster::worker_pool, cat(i), ping, i, type_name(val), val);
@ -64,10 +71,10 @@ event pong(c: count, what: string, val: any)
++pongs;
print "got pong", pongs, "with", c, what, type_name(val), val;
# The manager sends 5 types of pings, in 3 different ways. The worker
# answers each ping in two ways, for a total of 30 expected pongs at the
# The manager sends 6 types of pings, in 3 different ways. The worker
# answers each ping in two ways, for a total of 36 expected pongs at the
# manager. Every batch of pings involves 6 pongs.
if ( pongs == 30 )
if ( pongs == 36 )
Cluster::publish(Cluster::worker_topic, finish);
else if ( pongs > 0 && pongs % 6 == 0 )
{

View file

@ -105,7 +105,7 @@ def run(ws_url):
# This should be good ping(string, count)
ws.send(json.dumps(make_ping([{"@data-type": "string", "data": "Hello"}, {"@data-type": "count", "data": 42}])))
pong = json.loads(ws.recv())
name, args, _ = pong["data"][2]["data"]
name, args = pong["data"][2]["data"]
print("pong", name, args)
# This one fails again

View file

@ -0,0 +1,121 @@
# @TEST-DOC: Run a single node cluster (manager) with a websocket server and have a single client connect to check the metadata it receives.
#
# @TEST-REQUIRES: have-zeromq
# @TEST-REQUIRES: python3 -c 'import websockets.sync'
#
# @TEST-GROUP: cluster-zeromq
#
# @TEST-PORT: XPUB_PORT
# @TEST-PORT: XSUB_PORT
# @TEST-PORT: LOG_PULL_PORT
# @TEST-PORT: WEBSOCKET_PORT
#
# @TEST-EXEC: cp $FILES/zeromq/cluster-layout-simple.zeek cluster-layout.zeek
# @TEST-EXEC: cp $FILES/zeromq/test-bootstrap.zeek zeromq-test-bootstrap.zeek
# @TEST-EXEC: cp $FILES/ws/wstest.py .
#
# @TEST-EXEC: zeek -b --parse-only manager.zeek
# @TEST-EXEC: python3 -m py_compile client.py
#
# @TEST-EXEC: btest-bg-run manager-no-metadata "ZEEKPATH=$ZEEKPATH:.. && CLUSTER_NODE=manager zeek -b ../manager.zeek >out"
# @TEST-EXEC: btest-bg-run client-no-metadata "python3 ../client.py >out"
#
# @TEST-EXEC: btest-bg-wait 30
# @TEST-EXEC: btest-diff ./manager-no-metadata/out
# @TEST-EXEC: btest-diff ./manager-no-metadata/.stderr
# @TEST-EXEC: btest-diff ./client-no-metadata/out
# @TEST-EXEC: btest-diff ./client-no-metadata/.stderr
#
# @TEST-EXEC: btest-bg-run manager-metadata "ZEEKPATH=$ZEEKPATH:.. && CLUSTER_NODE=manager zeek -b ../manager.zeek EventMetadata::add_network_timestamp=T >out"
# @TEST-EXEC: btest-bg-run client-metadata "python3 ../client.py >out"
#
# @TEST-EXEC: btest-bg-wait 30
# @TEST-EXEC: btest-diff ./manager-metadata/out
# @TEST-EXEC: btest-diff ./manager-metadata/.stderr
# @TEST-EXEC: btest-diff ./client-metadata/out
# @TEST-EXEC: btest-diff ./client-metadata/.stderr
#
# @TEST-EXEC: btest-bg-run manager-metadata-from-client "ZEEKPATH=$ZEEKPATH:.. && CLUSTER_NODE=manager zeek -b ../manager.zeek EventMetadata::add_network_timestamp=T >out"
# @TEST-EXEC: btest-bg-run client-metadata-from-client "NETWORK_TIMESTAMP=1970-01-01T01:42:42 python3 ../client.py >out"
# @TEST-EXEC: btest-bg-wait 30
# @TEST-EXEC: btest-diff ./manager-metadata-from-client/out
# @TEST-EXEC: btest-diff ./manager-metadata-from-client/.stderr
# @TEST-EXEC: btest-diff ./client-metadata-from-client/out
# @TEST-EXEC: btest-diff ./client-metadata-from-client/.stderr
# @TEST-START-FILE manager.zeek
@load ./zeromq-test-bootstrap
redef exit_only_after_terminate = T;
redef allow_network_time_forward = F;
global ping: event(msg: string, c: count) &is_used;
global pong: event(msg: string, c: count) &is_used;
event zeek_init()
{
set_network_time(double_to_time(4711.0));
Cluster::subscribe("/test/pings/");
Cluster::listen_websocket([$listen_addr=127.0.0.1, $listen_port=to_port(getenv("WEBSOCKET_PORT"))]);
}
event ping(msg: string, n: count) &is_used
{
print fmt("ping: %s, %s (metadata=%s), sending pong...", msg, n, EventMetadata::current_all());
Cluster::publish("/test/pongs/", pong, msg + " " + msg, n + n);
}
event Cluster::websocket_client_added(info: Cluster::EndpointInfo, subscriptions: string_vec)
{
print "Cluster::websocket_client_added", subscriptions;
}
event Cluster::websocket_client_lost(info: Cluster::EndpointInfo, code: count, reason: string)
{
print "Cluster::websocket_client_lost";
terminate();
}
# @TEST-END-FILE
# @TEST-START-FILE client.py
import os
import wstest
def run(ws_url):
with wstest.connect("ws1", ws_url) as c:
print("Connected!")
ack = c.hello_v1(["/test/pongs/"])
assert "type" in ack
assert ack["type"] == "ack"
assert "endpoint" in ack
assert "version" in ack
ack["endpoint"] = "endpoint"
ack["version"] = "endpoint"
print("ack", ack)
ping = wstest.build_event_v1("/test/pings/", "ping", ["fourty-two", 42])
if ts_str := os.environ.get("NETWORK_TIMESTAMP"):
# Sneak timestamp metadata into the ping if the env variable is set
ping["data"][2]["data"] += [{
"@data-type": "vector",
"data": [{
"@data-type": "vector", "data": [
{"@data-type": "count", "data": 1},
{"@data-type": "timestamp", "data": ts_str}
],
}]
}]
print("ping", ping)
c.send_json(ping)
pong = c.recv_json()
print("pong", pong)
if __name__ == "__main__":
wstest.main(run, wstest.WS4_URL_V1)
# @TEST-END-FILE

View file

@ -150,6 +150,7 @@ global known_BiFs = set(
"Option::set_change_handler",
"PacketAnalyzer::GTPV1::remove_gtpv1_connection",
"PacketAnalyzer::Geneve::get_options",
"PacketAnalyzer::PPPoE::session_id",
"PacketAnalyzer::TEREDO::remove_teredo_connection",
"PacketAnalyzer::__disable_analyzer",
"PacketAnalyzer::__enable_analyzer",
@ -184,7 +185,6 @@ global known_BiFs = set(
"Reporter::warning",
"Spicy::__resource_usage",
"Spicy::__toggle_analyzer",
"Storage::is_open",
"Storage::Async::__close_backend",
"Storage::Async::__erase",
"Storage::Async::__get",
@ -195,6 +195,8 @@ global known_BiFs = set(
"Storage::Sync::__get",
"Storage::Sync::__open_backend",
"Storage::Sync::__put",
"Storage::is_forced_sync",
"Storage::is_open",
"Supervisor::__create",
"Supervisor::__destroy",
"Supervisor::__is_supervised",
@ -337,6 +339,7 @@ global known_BiFs = set(
"get_net_stats",
"get_orig_seq",
"get_package_readme",
"get_plugin_components",
"get_port_transport_proto",
"get_proc_stats",
"get_reassembler_stats",
@ -345,7 +348,6 @@ global known_BiFs = set(
"get_reporter_stats",
"get_resp_seq",
"get_script_comments",
"get_plugin_components",
"get_thread_stats",
"get_timer_stats",
"getenv",

View file

@ -0,0 +1,12 @@
# @TEST-DOC: Test Zeek parsing a trace file through the PostgreSQL analyzer.
#
# @TEST-REQUIRES: ${SCRIPTS}/have-spicy
# @TEST-EXEC: zeek -b -r ${TRACES}/postgresql/psql-login-no-role.pcap %INPUT >output
# @TEST-EXEC: zeek-cut -m ts uid id.orig_h id.orig_p id.resp_h id.resp_p service < conn.log > conn.cut
# @TEST-EXEC: zeek-cut -m < postgresql.log > postgresql.cut
#
# @TEST-EXEC: btest-diff conn.cut
# @TEST-EXEC: btest-diff postgresql.cut
@load base/protocols/conn
@load base/protocols/postgresql

View file

@ -0,0 +1,9 @@
# @TEST-DOC: Regression test for #4730, ReadResponse not parsed properly.
#
# @TEST-EXEC: zeek -b -C -r $TRACES/smb/smb_v2_only_non_zero_reserved1.pcap %INPUT
# @TEST-EXEC: btest-diff files.log
# @TEST-EXEC: test ! -f analyzer.log
# @TEST-EXEC: test ! -f weird.log
@load base/protocols/smb

View file

@ -1 +1 @@
270d4b46fa1ab9f2951c2945937bdf739e864304
6dafc6fd68d9821f33b7f8f4d7d4d877b5827ae3

View file

@ -1 +1 @@
034c859753b435dc2a6368fa46ecf3e92c98d9da
1edbd3ae959471e8573c9edc0374235727970710