mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
Merge remote-tracking branch 'origin/topic/timw/python-upgrade'
* origin/topic/timw/python-upgrade: (65 commits) Swap pre-commit yapf for ruff/ruff-format, fix findings Upgrade package-manager submodule to get python upgrade Upgrade broker submodule to get python upgrade Upgrade zeekctl submodule to get python upgrade Upgrade zeek-client submodule to get python upgrade Upgrade btest submodule to get python upgrade Require Python 3.9 in CMakeLists.txt CI: Install python 3.9 on ubuntu 20 btest: Avoid loading ZeroMQ if not compiled in btest/coverage: Avoid warnings in test-all-policy-cluster Broker::publish: Warn on using Broker::publish() when inactive Update doc submodule [nomail] [skip ci] Update zeek-testing and zeek-testing-cluster commit hashes Update ZAM BiF-tracking configure: Add --disable-cluster-backend-zeromq CMakeLists: Cluster backends output zeromq: Conditionally enable by default btest/generic: Add publish_hrw(), publish_rr() and logging tests generate-docs: Run on Ubuntu 24.04, add cppzmq docker: Add cppzmq/libzmq dependencies ...
This commit is contained in:
commit
649cedb693
410 changed files with 4922 additions and 559 deletions
5
.github/workflows/generate-docs.yml
vendored
5
.github/workflows/generate-docs.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
|||
permissions:
|
||||
contents: write # for Git to git push
|
||||
if: github.repository == 'zeek/zeek'
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
steps:
|
||||
# We only perform a push if the action was triggered via a schedule
|
||||
|
@ -51,6 +51,7 @@ jobs:
|
|||
bsdmainutils \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-dev \
|
||||
flex \
|
||||
g++ \
|
||||
gcc \
|
||||
|
@ -71,7 +72,7 @@ jobs:
|
|||
# `python2` so this is a simple workaround until we drop Python 2
|
||||
# support and explicitly use `python3` for all invocations.
|
||||
sudo ln -sf /usr/bin/python3 /usr/local/bin/python
|
||||
sudo pip3 install -r doc/requirements.txt
|
||||
sudo pip3 install --break-system-packages -r doc/requirements.txt
|
||||
|
||||
- name: ccache
|
||||
uses: hendrikmuhs/ccache-action@v1.2
|
||||
|
|
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -76,3 +76,6 @@
|
|||
[submodule "auxil/prometheus-cpp"]
|
||||
path = auxil/prometheus-cpp
|
||||
url = https://github.com/zeek/prometheus-cpp
|
||||
[submodule "src/cluster/backend/zeromq/auxil/cppzmq"]
|
||||
path = src/cluster/backend/zeromq/auxil/cppzmq
|
||||
url = https://github.com/zeromq/cppzmq
|
||||
|
|
|
@ -2,8 +2,19 @@
|
|||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
#
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: license
|
||||
name: Check for license headers
|
||||
entry: ./ci/license-header.py
|
||||
language: python
|
||||
types_or:
|
||||
- "c"
|
||||
- "c++"
|
||||
exclude: '^(testing/btest/plugins/.*|testing/builtin-plugins/.*)$'
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: 'v18.1.8'
|
||||
rev: 'v19.1.4'
|
||||
hooks:
|
||||
- id: clang-format
|
||||
types_or:
|
||||
|
@ -17,10 +28,12 @@ repos:
|
|||
- id: shfmt
|
||||
args: ["-w", "-i", "4", "-ci"]
|
||||
|
||||
- repo: https://github.com/google/yapf
|
||||
rev: v0.40.2
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.8.1
|
||||
hooks:
|
||||
- id: yapf
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
|
||||
- repo: https://github.com/cheshirekow/cmake-format-precommit
|
||||
rev: v0.6.13
|
||||
|
@ -28,13 +41,13 @@ repos:
|
|||
- id: cmake-format
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.16.21
|
||||
rev: v1.28.2
|
||||
hooks:
|
||||
- id: typos
|
||||
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES)$'
|
||||
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES|scripts/base/protocols/ssl/mozilla-ca-list.zeek)$'
|
||||
|
||||
- repo: https://github.com/bbannier/spicy-format
|
||||
rev: v0.20.0
|
||||
rev: v0.22.0
|
||||
hooks:
|
||||
- id: spicy-format
|
||||
# TODO: Reformat existing large analyzers just before 8.0.
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
[style]
|
||||
column_limit=100
|
|
@ -48,6 +48,15 @@ extend-ignore-identifiers-re = [
|
|||
"ND_ROUTER_.*",
|
||||
"ND_NEIGHBOR_.*",
|
||||
".*_ND_option.*",
|
||||
"bck", # Used with same length as `fwd`
|
||||
"pn", # Use for `PoolNode` variables
|
||||
"ffrom_[ip|port|mac]", # Used in netcontrol.
|
||||
"complte_flag", # Existing use in exported record in base.
|
||||
"VidP(n|N)", # In SMB.
|
||||
"iin", # In DNP3.
|
||||
"(ScValidatePnPService|ScSendPnPMessage)", # In DCE-RPC.
|
||||
"snet", # Used as shorthand for subnet in base scripts.
|
||||
"(e|i)it", # Used as name for some iterators.
|
||||
]
|
||||
|
||||
[default.extend-identifiers]
|
||||
|
|
211
CHANGES
211
CHANGES
|
@ -1,3 +1,214 @@
|
|||
7.1.0-dev.775 | 2024-12-11 11:45:31 -0700
|
||||
|
||||
* Swap pre-commit yapf for ruff/ruff-format, fix findings (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Upgrade package-manager submodule to get python upgrade (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Upgrade broker submodule to get python upgrade (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Upgrade zeekctl submodule to get python upgrade (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Upgrade zeek-client submodule to get python upgrade (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Upgrade btest submodule to get python upgrade (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Require Python 3.9 in CMakeLists.txt (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* CI: Install python 3.9 on ubuntu 20 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.1.0-dev.766 | 2024-12-11 11:07:53 -0700
|
||||
|
||||
* btest: Avoid loading ZeroMQ if not compiled in (Arne Welzel, Corelight)
|
||||
|
||||
...at the same time, add some `TEST-REQUIRES: have-zeromq` which
|
||||
unfortunately means that developers will usually want libzmq
|
||||
installed on their system.
|
||||
|
||||
* btest/coverage: Avoid warnings in test-all-policy-cluster (Arne Welzel, Corelight)
|
||||
|
||||
* Broker::publish: Warn on using Broker::publish() when inactive (Arne Welzel, Corelight)
|
||||
|
||||
This is mostly for transitioning base scripts to Cluster::publish() and
|
||||
avoid silent surprises why certain things don't work when using ZeroMQ.
|
||||
|
||||
* Update zeek-testing and zeek-testing-cluster commit hashes (Tim Wojtulewicz, Corelight)
|
||||
|
||||
7.1.0-dev.760 | 2024-12-11 09:28:04 +0100
|
||||
|
||||
* Update ZAM BiF-tracking (Arne Welzel, Corelight)
|
||||
|
||||
* configure: Add --disable-cluster-backend-zeromq (Arne Welzel, Corelight)
|
||||
|
||||
* CMakeLists: Cluster backends output (Arne Welzel, Corelight)
|
||||
|
||||
* zeromq: Conditionally enable by default (Arne Welzel, Corelight)
|
||||
|
||||
Instead of having ZeroMQ as a new dependency, enable the ZeroMQ backend only
|
||||
if ZeroMQ is available on the system as suggested by Tim.
|
||||
|
||||
* btest/generic: Add publish_hrw(), publish_rr() and logging tests (Arne Welzel, Corelight)
|
||||
|
||||
They currently use zeromq, but technically they should be valid for
|
||||
any other backend, too, even broker.
|
||||
|
||||
* generate-docs: Run on Ubuntu 24.04, add cppzmq (Arne Welzel, Corelight)
|
||||
|
||||
* docker: Add cppzmq/libzmq dependencies (Arne Welzel, Corelight)
|
||||
|
||||
* tsan_suppressions: Add called_from_lib: libzmq (Arne Welzel, Corelight)
|
||||
|
||||
* ci: Add cppzmq and libzmq to most platforms (Arne Welzel, Corelight)
|
||||
|
||||
* cluster/backend/zeromq: Add ZeroMQ based cluster backend (Arne Welzel, Corelight)
|
||||
|
||||
This is a cluster backend implementation using a central XPUB/XSUB proxy
|
||||
that by default runs on the manager node. Logging is implemented leveraging
|
||||
PUSH/PULL sockets between logger and other nodes, rather than going
|
||||
through XPUB/XSUB.
|
||||
|
||||
The test-all-policy-cluster baseline changed: Previously, Broker::peer()
|
||||
would be called from setup-connections.zeek, causing the IO loop to be
|
||||
alive. With the ZeroMQ backend, the IO loop is only alive when
|
||||
Cluster::init() is called, but that doesn't happen anymore.
|
||||
|
||||
* cluster/backend/zeromq: Add cppzmq submodule (Arne Welzel, Corelight)
|
||||
|
||||
Not all supported platforms provide a recent enough cppzmq version,
|
||||
add a fallback as submodule. cppzmq is a header-only library, so there's
|
||||
no build step involved.
|
||||
|
||||
* cluster: Add Cluster::node_id(), allow redef of node_topic(), nodeid_topic() (Arne Welzel, Corelight)
|
||||
|
||||
This provides a way for non-broker cluster backends to override a
|
||||
node's identifier and its own topics that it listens on by default.
|
||||
|
||||
* cluster: Move publish_hrw() and publish_rr() to cluster.bif (Arne Welzel, Corelight)
|
||||
|
||||
From this point on, Cluster::publish_hrw() and Cluster::publish_rr()
|
||||
go through cluster/Backend.cc code.
|
||||
|
||||
7.1.0-dev.745 | 2024-12-10 16:15:57 -0700
|
||||
|
||||
* Add btest for unknown_protocols.log (Jan Grashoefer, Corelight)
|
||||
|
||||
* Add protocol_id count to unknown protocol record (Jan Grashoefer, Corelight)
|
||||
|
||||
The count representation is not logged and added for access to the value
|
||||
in log policy hooks without converting the logged hex representation.
|
||||
|
||||
7.1.0-dev.742 | 2024-12-10 14:41:10 -0700
|
||||
|
||||
* NEWS tweaks [skip ci] (Christian Kreibich, Corelight)
|
||||
|
||||
- Switch list items back to "-" from "*" -- we hadn't used "*" since
|
||||
2.5.4 but started in 7.1, probably by accident? :-)
|
||||
|
||||
- Fix a typo.
|
||||
|
||||
7.1.0-dev.740 | 2024-12-10 20:31:36 +0100
|
||||
|
||||
* Update BiF-tracking, add get_current_packet_ts() (Arne Welzel, Corelight)
|
||||
|
||||
Also, run the ZAM-bif-tracking test in non-ZAM environments so
|
||||
failures are caught immediately. There's nothing overly ZAM specific about
|
||||
running this test.
|
||||
|
||||
I'm not sure I like the fact that any new contributor adding a BiF
|
||||
will need to dig into this... it might be a bit intimidating.
|
||||
|
||||
7.1.0-dev.738 | 2024-12-10 18:26:31 +0100
|
||||
|
||||
* Introduce get_packet_lag() (Jan Grashoefer, Corelight)
|
||||
|
||||
* Add btest for get_current_packet_ts() (Jan Grashoefer, Corelight)
|
||||
|
||||
* Introduce get_current_packet_ts to fix packet lag (Jan Grashoefer, Corelight)
|
||||
|
||||
Using network_time to calculate packet lag will produce wrong results
|
||||
when there is no packet available but network time does not (yet) fall
|
||||
back to wall clock.
|
||||
|
||||
7.1.0-dev.734 | 2024-12-10 09:56:46 +0100
|
||||
|
||||
* DNS/dns_binds_rr: Fix complte to complete typo, switch to count (Arne Welzel, Corelight)
|
||||
|
||||
From my reading in the docs the complete_flag should only ever be a
|
||||
single byte, so add a weird for when it is longer, but use count
|
||||
as the new type.
|
||||
|
||||
7.1.0-dev.732 | 2024-12-09 23:28:30 -0800
|
||||
|
||||
* Support for Broker I/O backpressure overflow policies (Christian Kreibich, Corelight, and Dominik Charousset)
|
||||
|
||||
- Add sleep() BiF
|
||||
- Add backpressure disconnect notification to cluster.log and via telemetry
|
||||
- Remove unneeded @loads from base/misc/version.zeek
|
||||
- Add Cluster::nodeid_to_node() helper function
|
||||
- Support re-peering with Broker peers that fall behind
|
||||
- Add Zeek-level configurability of Broker slow-peer disconnects
|
||||
- Bump Broker to pull in disconnect feature and infinite-loop fix
|
||||
- No need to namespace Cluster:: functions in their own namespace
|
||||
|
||||
* Update doc submodule [nomail] [skip ci] (zeek-bot)
|
||||
|
||||
7.1.0-dev.720 | 2024-12-09 12:22:44 -0700
|
||||
|
||||
* Add missing copyright line to headers and cc files (Arne Welzel, Corelight)
|
||||
|
||||
* pre-commit: Add license-header check inspired by Spicy (Arne Welzel, Corelight)
|
||||
|
||||
* Add missing "COPYING" in file comments (Arne Welzel, Corelight)
|
||||
|
||||
This was just done via sed. There's a number of files that don't
|
||||
have a license entry at all.
|
||||
|
||||
7.1.0-dev.716 | 2024-12-09 12:15:46 -0700
|
||||
|
||||
* BTest baseline updates for compile-to-C++ (Vern Paxson, Corelight)
|
||||
|
||||
* mark ZAM regression BTests as not suitable for compile-to-C++ (Vern Paxson, Corelight)
|
||||
|
||||
* fix for -O gen-C++ maintenance helper to skip BTest intermediary files (Vern Paxson, Corelight)
|
||||
|
||||
* introduced simplified initialization for non-standalone -O gen-C++ code (Vern Paxson, Corelight)
|
||||
tied -O gen-standalone-C++ to use of --optimize-files
|
||||
|
||||
* streamline generated -O C++ code by relying on per-function profiles rather than aggregate profile (Vern Paxson, Corelight)
|
||||
|
||||
* when reporting available/unavailble C++ script bodies, flag those that are skipped (Vern Paxson, Corelight)
|
||||
|
||||
* modified AST profiling to mark (and fully skip) non-optimizable functions (Vern Paxson, Corelight)
|
||||
|
||||
* modified merge_types() to skip work if given identical types, which (Vern Paxson, Corelight)
|
||||
also preserves type names (useful for -O gen-C++)
|
||||
|
||||
7.1.0-dev.707 | 2024-12-09 12:08:21 -0700
|
||||
|
||||
* Move python signatures to a separate file (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Add signatures for Python bytecode for 3.8-3.14 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Bump pre-commit hooks (Benjamin Bannier, Corelight)
|
||||
|
||||
* Bump typos pre-commit hooks (Benjamin Bannier, Corelight)
|
||||
|
||||
This now picks up additional typical misspellings, but also triggers on
|
||||
more identifiers we use. I opted for fixing the obvious misspellings and
|
||||
updated the allowlist for anything else.
|
||||
|
||||
* Bump clang-format pre-commit hooks (Benjamin Bannier, Corelight)
|
||||
|
||||
This brings in slightly better formatting around uniform initialization,
|
||||
and comments after blocks not surrounded by `{ .. }`.
|
||||
|
||||
7.1.0-dev.700 | 2024-12-09 14:03:29 +0100
|
||||
|
||||
* ScriptOpt: Fail compilation if known exprs/stmts is outdated (Arne Welzel, Corelight)
|
||||
|
||||
* skip optimization of functions with AST nodes unknown to script optimization (Vern Paxson, Corelight)
|
||||
|
||||
* Disable CTU-SME test under TSAN (Arne Welzel, Corelight)
|
||||
|
||||
7.1.0-dev.695 | 2024-12-06 17:33:19 +0100
|
||||
|
||||
* btest/http: Demo StreamEvent analyzer with HTTP::upgrade_analyzers (Arne Welzel, Corelight)
|
||||
|
|
|
@ -772,7 +772,7 @@ if (NOT SED_EXE)
|
|||
endif ()
|
||||
endif ()
|
||||
|
||||
set(ZEEK_PYTHON_MIN 3.5.0)
|
||||
set(ZEEK_PYTHON_MIN 3.9.0)
|
||||
set(Python_FIND_UNVERSIONED_NAMES FIRST)
|
||||
find_package(Python ${ZEEK_PYTHON_MIN} REQUIRED COMPONENTS Interpreter)
|
||||
find_package(FLEX REQUIRED)
|
||||
|
@ -1490,6 +1490,10 @@ message(
|
|||
"\n - debugging: ${USE_PERFTOOLS_DEBUG}"
|
||||
"\njemalloc: ${ENABLE_JEMALLOC}"
|
||||
"\n"
|
||||
"\nCluster backends:"
|
||||
"\n - Broker: ON"
|
||||
"\n - ZeroMQ: ${ENABLE_CLUSTER_BACKEND_ZEROMQ}"
|
||||
"\n"
|
||||
"\nFuzz Targets: ${ZEEK_ENABLE_FUZZERS}"
|
||||
"\nFuzz Engine: ${ZEEK_FUZZING_ENGINE}"
|
||||
"${_analyzer_warning}"
|
||||
|
|
133
NEWS
133
NEWS
|
@ -9,14 +9,14 @@ Zeek 7.1.0
|
|||
Breaking Changes
|
||||
----------------
|
||||
|
||||
* The ``OpaqueVal::DoSerialize`` and ``OpaqueVal::DoUnserialize`` methods were
|
||||
- The ``OpaqueVal::DoSerialize`` and ``OpaqueVal::DoUnserialize`` methods were
|
||||
marked as deprecated in v7.0 and have now been removed as per the Zeek
|
||||
deprecation policy. Plugins that were overriding these methods and were not
|
||||
updated will fail to compile. Those plugins should be updated to override the
|
||||
new ``OpaqueVal::DoSerializeData`` and ``OpaqueVal::DoUnserializeData``
|
||||
methods.
|
||||
|
||||
* Certain internal methods on the broker and logging classes have been changed to
|
||||
- Certain internal methods on the broker and logging classes have been changed to
|
||||
accept std::vector<threading::Value> parameters instead of threading::Value**
|
||||
to leverage automatic memory management, reduce the number of allocations
|
||||
and use move semantics to express ownership.
|
||||
|
@ -25,12 +25,12 @@ Breaking Changes
|
|||
are not affected by this change, so we keep backwards compatibility with
|
||||
existing log writers.
|
||||
|
||||
* ``Func::Name()`` was deprecated, use ``Func::GetName()`` instead.
|
||||
- ``Func::Name()`` was deprecated, use ``Func::GetName()`` instead.
|
||||
|
||||
New Functionality
|
||||
-----------------
|
||||
|
||||
* IP-based connections that were previously not logged due to using an unknown
|
||||
- IP-based connections that were previously not logged due to using an unknown
|
||||
IP protocol (e.g. not TCP, UDP, or ICMP) now appear in conn.log. All conn.log
|
||||
entries have a new ``ip_proto`` column that indicates the numeric IP protocol
|
||||
identifier used by the connection. A new policy script at
|
||||
|
@ -39,6 +39,12 @@ New Functionality
|
|||
This entire feature can be disabled by loading the new
|
||||
``policy/protocols/conn/disable-unknown-ip-proto-support.zeek`` policy script.
|
||||
|
||||
- New ``Cluster::publish()``, ``Cluster::subscribe()`` and ``Cluster::unsubscribe()``
|
||||
functions have been added. In contrast to their ``Broker`` counterparts, these
|
||||
will operator on whichever cluster backend is enabled. Going forward, in-tree
|
||||
``Broker::publish()`` usages will be replaced with ``Cluster::publish()`` and
|
||||
script writers should opt to prefer these over the Broker-specific functions.
|
||||
|
||||
- Zeek now includes a PostgreSQL protocol analyzer. This analyzer is enabled
|
||||
by default. The analyzer's events and its ``postgresql.log`` should be
|
||||
considered preliminary and experimental until the arrival of Zeek's next
|
||||
|
@ -55,38 +61,86 @@ New Functionality
|
|||
If you observe PostgreSQL traffic in your environment, please provide feedback
|
||||
about the analyzer and structure of the new log.
|
||||
|
||||
* The LDAP analyzer now supports handling of non-sealed GSS-API WRAP tokens.
|
||||
- Broker's message I/O buffering now operates on per-peering granularity at the
|
||||
sender (it was previously global) and provides configurable overflow handling
|
||||
when a fast sender overwhelms a slow receiver, via the following new tunables
|
||||
in the ``Broker`` module:
|
||||
|
||||
* StartTLS support was added to the LDAP analyzer. The SSL analyzer is enabled
|
||||
const peer_buffer_size = 2048 &redef;
|
||||
const peer_overflow_policy = "disconnect" &redef;
|
||||
const web_socket_buffer_size = 512 &redef;
|
||||
const web_socket_overflow_policy = "disconnect" &redef;
|
||||
|
||||
When a send buffer overflows (i.e., it is full when a node tries to transmit
|
||||
another message), the sender may drop the message and unpeer the slow receiver
|
||||
(policy ``disconnect``, the default), drop the newest message in the buffer
|
||||
(``drop_newest``), or drop the oldest (``drop_oldest``). Buffer sizes are
|
||||
measured in number of messages, not bytes. Note that "sender" and "receiver"
|
||||
are independent of the direction in which Zeek established the peering. After
|
||||
disconnects Zeek automatically tries to re-establish peering with the slow
|
||||
node, in case it recovers.
|
||||
|
||||
Zeek notifies you in two ways of such disconnects:
|
||||
|
||||
* A cluster.log entry for the sending node indicates that a slow peered node
|
||||
has been removed. Here node ``worker01`` has removed a peered ``proxy01`:
|
||||
|
||||
1733468802.626622 worker01 removed due to backpressure overflow: 127.0.0.1:42204/tcp (proxy01)
|
||||
|
||||
* The labeled counter metric ``zeek_broker_backpressure_disconnects_total``
|
||||
in the telemetry framework tracks the number of times such disconnects
|
||||
happen between respective nodes. The following scraped telemetry indicates
|
||||
the same disconnect as above:
|
||||
|
||||
zeek_broker_backpressure_disconnects_total{endpoint="worker01",peer="proxy01"} 1
|
||||
|
||||
To implement custom handling of a backpressure-induced disconnect, add a
|
||||
``Broker::peer_removed`` event handler, as follows:
|
||||
|
||||
event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
if ( "caf::sec::backpressure_overflow" !in msg )
|
||||
return;
|
||||
|
||||
# The local node has disconnected the given endpoint,
|
||||
# add your logic here.
|
||||
}
|
||||
|
||||
These new policies fix a problem in which misbehaving nodes could trigger
|
||||
cascading "lockups" of nodes, each ceasing to transmit any messages.
|
||||
|
||||
- The LDAP analyzer now supports handling of non-sealed GSS-API WRAP tokens.
|
||||
|
||||
- StartTLS support was added to the LDAP analyzer. The SSL analyzer is enabled
|
||||
for connections where client and server negotiate to TLS through the extended
|
||||
request/response mechanism.
|
||||
|
||||
* The ``unknown_protocols()`` event now includes the name of all packet
|
||||
- The ``unknown_protocols()`` event now includes the name of all packet
|
||||
analyzer used for processing the packet when the event is raised. The
|
||||
``unknown_protocol.log`` file was extended to include this information.
|
||||
|
||||
* The MySQL analyzer now generates a ``mysql_user_change()`` event when
|
||||
- The MySQL analyzer now generates a ``mysql_user_change()`` event when
|
||||
the user changes mid-session via the ``COM_USER_CHANGE`` command.
|
||||
|
||||
* The DNS analyzer was extended to support TKEY RRs (RFC 2390). A corresponding
|
||||
- The DNS analyzer was extended to support TKEY RRs (RFC 2390). A corresponding
|
||||
``dns_TKEY`` event was added.
|
||||
|
||||
* The ``signature_match()`` and custom signature events now receive the end of
|
||||
- The ``signature_match()`` and custom signature events now receive the end of
|
||||
match offset within the ``data`` parameter as an optional parameter named
|
||||
``end_of_match``.
|
||||
|
||||
event signature_match(state: signature_state, msg: string, data: string, end_of_match: count);
|
||||
|
||||
* A we plugin hook ``InitPreExecution()`` has been added to allow introspection
|
||||
- A new plugin hook ``InitPreExecution()`` has been added to allow introspection
|
||||
of Zeek's AST after ZAM optimizations ran. This hook executes right before
|
||||
the ``zeek_init()`` event is enqueued.
|
||||
|
||||
* The SQLite logger now supports setting the value of the SQLite synchronous mode,
|
||||
- The SQLite logger now supports setting the value of the SQLite synchronous mode,
|
||||
as well as of the journal mode. For example, WAL mode can be enabled by setting:
|
||||
|
||||
redef LogSQLite::journal_mode=LogSQLite::SQLITE_JOURNAL_MODE_WAL;
|
||||
|
||||
* A pseudo protocol analyzer StreamEvent has been added. Attaching this analyzer
|
||||
- A pseudo protocol analyzer StreamEvent has been added. Attaching this analyzer
|
||||
to TCP connections allows processing the connection's stream data in the
|
||||
scripting layer. One example use-case is interactive terminal sessions over
|
||||
HTTP connections upgraded to TCP.
|
||||
|
@ -100,46 +154,64 @@ New Functionality
|
|||
This comes with performance caveats: For use-cases with high-data rates
|
||||
a native protocol analyzer with dedicated events will be far more efficient.
|
||||
|
||||
- Experimental support for pluggable cluster backends has been added. New plugin
|
||||
components have been introduced to support switching Zeek's Broker-based
|
||||
publish-subscribe and remote logging functionality to alternative implementations.
|
||||
|
||||
redef Cluster::backend = Cluster::CLUSTER_BACKEND_ZEROMQ;
|
||||
|
||||
Besides the backend, the serialization format used for events and log-writes
|
||||
has become pluggable as well.
|
||||
|
||||
- The Zeek distribution now includes an experimental ZeroMQ based cluster backend.
|
||||
To experiment with it, load the following script on each cluster node.
|
||||
|
||||
@load frameworks/cluster/backend/zeromq/connect
|
||||
|
||||
Note that Broker-dependent scripts or integrations will become non-functional
|
||||
when doing so as Zeek nodes will not listen on Broker ports anymore, nor will
|
||||
they establish a peering to other nodes.
|
||||
|
||||
Changed Functionality
|
||||
---------------------
|
||||
|
||||
* Heuristics for parsing SASL encrypted and signed LDAP traffic have been
|
||||
- Heuristics for parsing SASL encrypted and signed LDAP traffic have been
|
||||
made more strict and predictable. Please provide input if this results in
|
||||
less visibility in your environment.
|
||||
|
||||
* The MySQL analyzer has been improved to better support plugin authentication
|
||||
- The MySQL analyzer has been improved to better support plugin authentication
|
||||
mechanisms, like caching_sha2_password, as well as recognizing MySQL query
|
||||
attributes.
|
||||
|
||||
* The ``mysql.log`` for user change commands will contain *just* the username
|
||||
- The ``mysql.log`` for user change commands will contain *just* the username
|
||||
instead of the remaining parts of the command, including auth plugin data.
|
||||
|
||||
* The POP3 parser has been hardened to avoid unbounded state growth in the
|
||||
- The POP3 parser has been hardened to avoid unbounded state growth in the
|
||||
face of one-sided traffic capture or when enabled for non-POP3 traffic.
|
||||
Concretely, the Redis protocol's AUTH mechanism enables the POP3 analyzer
|
||||
for such connections through DPD.
|
||||
|
||||
* Batching and flushing for local log writers can now be controlled via the
|
||||
- Batching and flushing for local log writers can now be controlled via the
|
||||
options ``Log::flush_interval`` and ``Log::write_buffer_size``. Previously
|
||||
the ``Threading::heartbeat_interval`` was used for flushing and the buffer
|
||||
size fixed at 1000.
|
||||
|
||||
* Logging of the FTP PASS command in ``ftp.log`` now honors ``FTP::default_capture_password``
|
||||
- Logging of the FTP PASS command in ``ftp.log`` now honors ``FTP::default_capture_password``
|
||||
and the password is blanked with "<hidden>". Previously, the argument for the PASS
|
||||
command would be logged in clear.
|
||||
|
||||
* The ASCII input reader now suppresses warnings for consecutive invalid lines,
|
||||
- The ASCII input reader now suppresses warnings for consecutive invalid lines,
|
||||
producing a summary of total suppressions once a valid line is encountered.
|
||||
|
||||
* The `Telemetry::sync()` hook is now invoked on demand. Either when the metrics
|
||||
- The `Telemetry::sync()` hook is now invoked on demand. Either when the metrics
|
||||
of a node are scraped via the Prometheus HTTP endpoint, or one of the collect
|
||||
methods is invoked from Zeek script.
|
||||
|
||||
* The community-id-logging.zeek policy script was used to set ``c$conn$community_id``
|
||||
- The community-id-logging.zeek policy script was used to set ``c$conn$community_id``
|
||||
during ``new_connection()`` rather than ``connection_state_remove()``, allowing
|
||||
other scripts to reuse its value early.
|
||||
|
||||
* Calling ``Broker::publish()`` now uses the event time of the currently
|
||||
- Calling ``Broker::publish()`` now uses the event time of the currently
|
||||
executing event as network time metadata attached to the remote event.
|
||||
Previously, ``network_time()`` was used. This matters if ``Broker::publish()``
|
||||
is called within scheduled events or called within remote events.
|
||||
|
@ -150,10 +222,13 @@ Removed Functionality
|
|||
Deprecated Functionality
|
||||
------------------------
|
||||
|
||||
* The ``Broker::auto_publish()`` function has been deprecated and should
|
||||
- The ``Broker::auto_publish()`` function has been deprecated and should
|
||||
be replaced with explicit ``Broker::publish()`` invocations that are
|
||||
potentially guarded with appropriate ``@if`` or ``@ifdef`` directives.
|
||||
|
||||
- The misspelled ``complte_flag`` in the ``dns_binds_rr`` record has been deprecated.
|
||||
The new ``complete_flag`` uses type ``count`` instead of ``string``.
|
||||
|
||||
Zeek 7.0.0
|
||||
==========
|
||||
|
||||
|
@ -3859,7 +3934,7 @@ Removed Functionality
|
|||
|
||||
- Functionality for writing/reading binary event streams was
|
||||
removed. This functionality relied on the old communication code
|
||||
anc was basically untested. The ``-R`` command-line option (replay)
|
||||
and was basically untested. The ``-R`` command-line option (replay)
|
||||
as well as the ``capture_events`` function were removed.
|
||||
|
||||
- Removed p0f (passive OS fingerprinting) support. The version of
|
||||
|
@ -4539,14 +4614,14 @@ Bro 2.5.4
|
|||
|
||||
Bro 2.5.4 primarily fixes security issues:
|
||||
|
||||
* Multiple fixes and improvements to BinPAC generated code related to
|
||||
- Multiple fixes and improvements to BinPAC generated code related to
|
||||
array parsing, with potential impact to all Bro's BinPAC-generated
|
||||
analyzers in the form of buffer over-reads or other invalid memory
|
||||
accesses depending on whether a particular analyzer incorrectly
|
||||
assumed that the evaluated-array-length expression is actually the
|
||||
number of elements that were parsed out from the input.
|
||||
|
||||
* The NCP analyzer (not enabled by default and also updated to actually
|
||||
- The NCP analyzer (not enabled by default and also updated to actually
|
||||
work with newer Bro APIs in the release) performed a memory allocation
|
||||
based directly on a field in the input packet and using signed integer
|
||||
storage. This could result in a signed integer overflow and memory
|
||||
|
@ -4556,9 +4631,9 @@ Bro 2.5.4 primarily fixes security issues:
|
|||
|
||||
There's also the following bug fixes:
|
||||
|
||||
* A memory leak in the SMBv1 analyzer.
|
||||
- A memory leak in the SMBv1 analyzer.
|
||||
|
||||
* The MySQL analyzer was generally not working as intended, for example,
|
||||
- The MySQL analyzer was generally not working as intended, for example,
|
||||
it now is able to parse responses that contain multiple results/rows.
|
||||
|
||||
Bro 2.5.3
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
7.1.0-dev.695
|
||||
7.1.0-dev.775
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 2a6e6201f7b43e213f2bac3863ca571b659e8a16
|
||||
Subproject commit 222571c9bcbb84dcd68df5a02c91dec9988646d2
|
|
@ -1 +1 @@
|
|||
Subproject commit 989c7513c3b6056a429a5d48dacdc9a2c1b216a7
|
||||
Subproject commit 9590947dc1d4e8096af21e344311c6b1d188d197
|
|
@ -1 +1 @@
|
|||
Subproject commit bdc15fab95b1ca2bd370fa25d91f7879b5da35fc
|
||||
Subproject commit ab6aff89296d11363427beab34f88258c0abd467
|
|
@ -1 +1 @@
|
|||
Subproject commit 5bcc14085178ed4ddfa9ad972b441c36e8bc0787
|
||||
Subproject commit 1f249e911a1a4f7b90ec99e9aed8c3f7b7fcfb79
|
|
@ -1 +1 @@
|
|||
Subproject commit 7e1a8448083ef0013f15e67ce001836e680589a2
|
||||
Subproject commit 67ae69914d78d987bffd7a6f22f0eead3772fe72
|
|
@ -2,7 +2,7 @@ FROM alpine:latest
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230823
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
|
@ -10,6 +10,7 @@ RUN apk add --no-cache \
|
|||
bsd-compat-headers \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq \
|
||||
curl \
|
||||
diffutils \
|
||||
dnsmasq \
|
||||
|
|
|
@ -2,7 +2,7 @@ FROM quay.io/centos/centos:stream9
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230801
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
# dnf config-manager isn't available at first, and
|
||||
# we need it to install the CRB repo below.
|
||||
|
@ -22,6 +22,7 @@ RUN dnf -y --nobest install \
|
|||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-devel \
|
||||
diffutils \
|
||||
flex \
|
||||
gcc \
|
||||
|
|
|
@ -12,8 +12,8 @@ import argparse
|
|||
import copy
|
||||
import json
|
||||
import logging
|
||||
import pathlib
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
@ -38,14 +38,22 @@ def git_available():
|
|||
|
||||
def git_is_repo(d: pathlib.Path):
|
||||
try:
|
||||
git("-C", str(d), "rev-parse", "--is-inside-work-tree", stderr=subprocess.DEVNULL)
|
||||
git(
|
||||
"-C",
|
||||
str(d),
|
||||
"rev-parse",
|
||||
"--is-inside-work-tree",
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def git_is_dirty(d: pathlib.Path):
|
||||
return (len(git("-C", str(d), "status", "--untracked=no", "--short").splitlines()) > 0)
|
||||
return (
|
||||
len(git("-C", str(d), "status", "--untracked=no", "--short").splitlines()) > 0
|
||||
)
|
||||
|
||||
|
||||
def git_generic_info(d: pathlib.Path):
|
||||
|
@ -111,7 +119,9 @@ def collect_git_info(zeek_dir: pathlib.Path):
|
|||
info["name"] = "zeek"
|
||||
info["version"] = (zeek_dir / "VERSION").read_text().strip()
|
||||
info["submodules"] = collect_submodule_info(zeek_dir)
|
||||
info["branch"] = git("-C", str(zeek_dir), "rev-parse", "--abbrev-ref", "HEAD").strip()
|
||||
info["branch"] = git(
|
||||
"-C", str(zeek_dir), "rev-parse", "--abbrev-ref", "HEAD"
|
||||
).strip()
|
||||
info["source"] = "git"
|
||||
|
||||
return info
|
||||
|
@ -156,14 +166,13 @@ def main():
|
|||
for p in [p.strip() for p in v.split(";") if p.strip()]:
|
||||
yield pathlib.Path(p)
|
||||
|
||||
parser.add_argument("included_plugin_dirs",
|
||||
default="",
|
||||
nargs="?",
|
||||
type=included_plugin_dir_conv)
|
||||
parser.add_argument(
|
||||
"included_plugin_dirs", default="", nargs="?", type=included_plugin_dir_conv
|
||||
)
|
||||
parser.add_argument("--dir", default=".")
|
||||
parser.add_argument("--only-git",
|
||||
action="store_true",
|
||||
help="Do not try repo-info.json fallback")
|
||||
parser.add_argument(
|
||||
"--only-git", action="store_true", help="Do not try repo-info.json fallback"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
@ -210,7 +219,9 @@ def main():
|
|||
|
||||
zkg_provides_info = copy.deepcopy(included_plugins_info)
|
||||
# Hardcode the former spicy-plugin so that zkg knows Spicy is available.
|
||||
zkg_provides_info.append({"name": "spicy-plugin", "version": info["version"].split("-")[0]})
|
||||
zkg_provides_info.append(
|
||||
{"name": "spicy-plugin", "version": info["version"].split("-")[0]}
|
||||
)
|
||||
info["zkg"] = {"provides": zkg_provides_info}
|
||||
|
||||
json_str = json.dumps(info, indent=2, sort_keys=True)
|
||||
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230801
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN apt-get update && apt-get -y install \
|
||||
bison \
|
||||
|
@ -22,6 +22,7 @@ RUN apt-get update && apt-get -y install \
|
|||
libpcap-dev \
|
||||
libssl-dev \
|
||||
libuv1-dev \
|
||||
libzmq3-dev \
|
||||
make \
|
||||
python3 \
|
||||
python3-dev \
|
||||
|
|
|
@ -4,13 +4,14 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230801
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN apt-get update && apt-get -y install \
|
||||
bison \
|
||||
bsdmainutils \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-dev \
|
||||
curl \
|
||||
dnsmasq \
|
||||
flex \
|
||||
|
|
|
@ -2,12 +2,13 @@ FROM fedora:40
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20240617
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN dnf -y install \
|
||||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-devel \
|
||||
diffutils \
|
||||
dnsmasq \
|
||||
flex \
|
||||
|
|
|
@ -2,12 +2,13 @@ FROM fedora:41
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20241112
|
||||
ENV DOCKERFILE_VERSION 20241115
|
||||
|
||||
RUN dnf -y install \
|
||||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-devel \
|
||||
diffutils \
|
||||
findutils \
|
||||
flex \
|
||||
|
|
|
@ -6,7 +6,7 @@ set -e
|
|||
set -x
|
||||
|
||||
env ASSUME_ALWAYS_YES=YES pkg bootstrap
|
||||
pkg install -y bash git cmake swig bison python3 base64 flex ccache jq dnsmasq
|
||||
pkg install -y bash cppzmq git cmake swig bison python3 base64 flex ccache jq dnsmasq
|
||||
pkg upgrade -y curl
|
||||
pyver=$(python3 -c 'import sys; print(f"py{sys.version_info[0]}{sys.version_info[1]}")')
|
||||
pkg install -y $pyver-sqlite3
|
||||
|
|
38
ci/license-header.py
Executable file
38
ci/license-header.py
Executable file
|
@ -0,0 +1,38 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
exit_code = 0
|
||||
|
||||
pat1 = re.compile(
|
||||
r"See the file \"COPYING\" in the main distribution directory for copyright."
|
||||
)
|
||||
|
||||
# This is the copyright line used within Spicy plugin and popular in
|
||||
# Spicy analyzers.
|
||||
pat2 = re.compile(r"Copyright \(c\) 2... by the Zeek Project. See COPYING for details.")
|
||||
|
||||
|
||||
def match_line(line):
|
||||
for pat in [pat1, pat2]:
|
||||
m = pat.search(line)
|
||||
if m is not None:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
for f in sys.argv[1:]:
|
||||
has_license_header = False
|
||||
with open(f) as fp:
|
||||
for line in fp:
|
||||
line = line.strip()
|
||||
if has_license_header := match_line(line):
|
||||
break
|
||||
|
||||
if not has_license_header:
|
||||
print(f"{f}:does not seem to contain a license header", file=sys.stderr)
|
||||
exit_code = 1
|
||||
|
||||
sys.exit(exit_code)
|
|
@ -7,7 +7,7 @@ set -x
|
|||
|
||||
brew update
|
||||
brew upgrade cmake
|
||||
brew install openssl@3 swig bison flex ccache libmaxminddb dnsmasq
|
||||
brew install cppzmq openssl@3 swig bison flex ccache libmaxminddb dnsmasq
|
||||
|
||||
if [ $(sw_vers -productVersion | cut -d '.' -f 1) -lt 14 ]; then
|
||||
python3 -m pip install --upgrade pip
|
||||
|
|
|
@ -2,7 +2,7 @@ FROM opensuse/leap:15.5
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230905
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.5:Update/standard/openSUSE:Leap:15.5:Update.repo \
|
||||
&& zypper refresh \
|
||||
|
@ -10,6 +10,7 @@ RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.5
|
|||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-devel \
|
||||
curl \
|
||||
flex \
|
||||
gcc12 \
|
||||
|
|
|
@ -2,7 +2,7 @@ FROM opensuse/leap:15.6
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230905
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.6:Update/standard/openSUSE:Leap:15.6:Update.repo \
|
||||
&& zypper refresh \
|
||||
|
@ -10,6 +10,7 @@ RUN zypper addrepo https://download.opensuse.org/repositories/openSUSE:Leap:15.6
|
|||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-devel \
|
||||
curl \
|
||||
dnsmasq \
|
||||
flex \
|
||||
|
|
|
@ -2,7 +2,7 @@ FROM opensuse/tumbleweed
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230801
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
# Remove the repo-openh264 repository, it caused intermittent issues
|
||||
# and we should not be needing any packages from it.
|
||||
|
@ -14,6 +14,7 @@ RUN zypper refresh \
|
|||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-devel \
|
||||
curl \
|
||||
diffutils \
|
||||
dnsmasq \
|
||||
|
|
|
@ -41,7 +41,7 @@ function banner {
|
|||
|
||||
function run_unit_tests {
|
||||
if [[ ${ZEEK_CI_SKIP_UNIT_TESTS} -eq 1 ]]; then
|
||||
printf "Skipping unit tests as requested by task configureation\n\n"
|
||||
printf "Skipping unit tests as requested by task configuration\n\n"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
|
|
@ -46,3 +46,16 @@ deadlock:zeek::threading::Queue<zeek::threading::BasicInputMessage*>::LocksForAl
|
|||
# This only happens at shutdown. It was supposedly fixed in civetweb, but has cropped
|
||||
# up again. See https://github.com/civetweb/civetweb/issues/861 for details.
|
||||
race:mg_stop
|
||||
|
||||
# Uninstrumented library.
|
||||
#
|
||||
# We'd need to build zmq with TSAN enabled, without it reports data races
|
||||
# as it doesn't see the synchronization done [1], but also there's reports
|
||||
# that ZeroMQ uses non-standard synchronization that may be difficult for
|
||||
# TSAN to see.
|
||||
#
|
||||
# [1] https://groups.google.com/g/thread-sanitizer/c/7UZqM02yMYg/m/KlHOv2ckr9sJ
|
||||
# [2] https://github.com/zeromq/libzmq/issues/3919
|
||||
#
|
||||
called_from_lib:libzmq.so.5
|
||||
called_from_lib:libzmq.so
|
||||
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20240528
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN apt-get update && apt-get -y install \
|
||||
bc \
|
||||
|
@ -23,9 +23,10 @@ RUN apt-get update && apt-get -y install \
|
|||
libmaxminddb-dev \
|
||||
libpcap-dev \
|
||||
libssl-dev \
|
||||
libzmq3-dev \
|
||||
make \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3.9 \
|
||||
python3.9-dev \
|
||||
python3-pip\
|
||||
ruby \
|
||||
sqlite3 \
|
||||
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230801
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN apt-get update && apt-get -y install \
|
||||
bc \
|
||||
|
@ -23,6 +23,7 @@ RUN apt-get update && apt-get -y install \
|
|||
libmaxminddb-dev \
|
||||
libpcap-dev \
|
||||
libssl-dev \
|
||||
libzmq3-dev \
|
||||
make \
|
||||
python3 \
|
||||
python3-dev \
|
||||
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20240807
|
||||
ENV DOCKERFILE_VERSION 20241024
|
||||
|
||||
RUN apt-get update && apt-get -y install \
|
||||
bc \
|
||||
|
@ -14,6 +14,7 @@ RUN apt-get update && apt-get -y install \
|
|||
clang-18 \
|
||||
clang++-18 \
|
||||
cmake \
|
||||
cppzmq-dev \
|
||||
curl \
|
||||
dnsmasq \
|
||||
flex \
|
||||
|
|
|
@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20240807
|
||||
ENV DOCKERFILE_VERSION 20241115
|
||||
|
||||
RUN apt-get update && apt-get -y install \
|
||||
bc \
|
||||
|
@ -14,6 +14,7 @@ RUN apt-get update && apt-get -y install \
|
|||
clang-18 \
|
||||
clang++-18 \
|
||||
cmake \
|
||||
cppzmq-dev \
|
||||
curl \
|
||||
dnsmasq \
|
||||
flex \
|
||||
|
|
4
configure
vendored
4
configure
vendored
|
@ -75,6 +75,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
|
|||
--disable-broker-tests don't try to build Broker unit tests
|
||||
--disable-btest don't install BTest
|
||||
--disable-btest-pcaps don't install Zeek's BTest input pcaps
|
||||
--disable-cluster-backend-zeromq don't build Zeek's ZeroMQ cluster backend
|
||||
--disable-cpp-tests don't build Zeek's C++ unit tests
|
||||
--disable-javascript don't build Zeek's JavaScript support
|
||||
--disable-port-prealloc disable pre-allocating the PortVal array in ValManager
|
||||
|
@ -333,6 +334,9 @@ while [ $# -ne 0 ]; do
|
|||
--disable-btest-pcaps)
|
||||
append_cache_entry INSTALL_BTEST_PCAPS BOOL false
|
||||
;;
|
||||
--disable-cluster-backend-zeromq)
|
||||
append_cache_entry ENABLE_CLUSTER_BACKEND_ZEROMQ BOOL false
|
||||
;;
|
||||
--disable-cpp-tests)
|
||||
append_cache_entry ENABLE_ZEEK_UNIT_TESTS BOOL false
|
||||
;;
|
||||
|
|
2
doc
2
doc
|
@ -1 +1 @@
|
|||
Subproject commit 9c8b992a55908628f7b6ccc119d7cefb2c2cc0a1
|
||||
Subproject commit 240a3d2d8cbbfcfd83d7e6a41c648d26ee4790d0
|
|
@ -21,6 +21,7 @@ RUN apt-get -q update \
|
|||
bison \
|
||||
ccache \
|
||||
cmake \
|
||||
cppzmq-dev \
|
||||
flex \
|
||||
g++ \
|
||||
gcc \
|
||||
|
|
|
@ -21,13 +21,14 @@ RUN apt-get -q update \
|
|||
jq \
|
||||
libmaxminddb0 \
|
||||
libnode108 \
|
||||
libpython3.11 \
|
||||
libpcap0.8 \
|
||||
libpython3.11 \
|
||||
libssl3 \
|
||||
libuv1 \
|
||||
libz1 \
|
||||
python3-minimal \
|
||||
libzmq5 \
|
||||
python3-git \
|
||||
python3-minimal \
|
||||
python3-semantic-version \
|
||||
python3-websocket \
|
||||
&& apt-get clean \
|
||||
|
|
8
ruff.toml
Normal file
8
ruff.toml
Normal file
|
@ -0,0 +1,8 @@
|
|||
target-version = "py39"
|
||||
|
||||
# Skip anything in the auxil directory. This includes pysubnetree which
|
||||
# should be handled separately.
|
||||
exclude = ["auxil"]
|
||||
|
||||
[lint]
|
||||
select = ["C4", "F", "I", "ISC", "UP"]
|
|
@ -1,3 +1,4 @@
|
|||
@load ./main
|
||||
@load ./store
|
||||
@load ./log
|
||||
@load ./backpressure
|
||||
|
|
35
scripts/base/frameworks/broker/backpressure.zeek
Normal file
35
scripts/base/frameworks/broker/backpressure.zeek
Normal file
|
@ -0,0 +1,35 @@
|
|||
##! This handles Broker peers that fall so far behind in handling messages that
|
||||
##! this node sends it that the local Broker endpoint decides to unpeer them.
|
||||
##! Zeek captures this as follows:
|
||||
##!
|
||||
##! - In broker.log, with a regular "peer-removed" entry indicating CAF's reason.
|
||||
##! - Via eventing through :zeek:see:`Broker::peer_removed` as done in this script.
|
||||
##!
|
||||
##! The cluster framework additionally captures the unpeering as follows:
|
||||
##!
|
||||
##! - In cluster.log, with a higher-level message indicating the node names involved.
|
||||
##! - Via telemetry, using a labeled counter.
|
||||
|
||||
event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
if ( "caf::sec::backpressure_overflow" !in msg ) {
|
||||
return;
|
||||
}
|
||||
|
||||
if ( ! endpoint?$network ) {
|
||||
Reporter::error(fmt("Missing network info to re-peer with %s", endpoint$id));
|
||||
return;
|
||||
}
|
||||
|
||||
# Re-establish the peering so Broker's reconnect behavior kicks in once
|
||||
# the other endpoint catches up. Broker will periodically re-try
|
||||
# connecting as necessary. If the other endpoint originally connected to
|
||||
# us, our attempt will fail (since we attempt to connect to the peer's
|
||||
# ephemeral port), but in that case the peer will reconnect with us once
|
||||
# it recovers.
|
||||
#
|
||||
# We could do this more cleanly by leveraging information from the
|
||||
# cluster framework (since it knows who connects to whom), but that
|
||||
# would further entangle Broker into it.
|
||||
Broker::peer(endpoint$network$address, endpoint$network$bound_port);
|
||||
}
|
|
@ -86,6 +86,24 @@ export {
|
|||
## ZEEK_BROKER_MAX_THREADS environment variable overrides this setting.
|
||||
const max_threads = 1 &redef;
|
||||
|
||||
## Max number of items we buffer at most per peer. What action to take when
|
||||
## the buffer reaches its maximum size is determined by
|
||||
## `peer_overflow_policy`.
|
||||
const peer_buffer_size = 2048 &redef;
|
||||
|
||||
## Configures how Broker responds to peers that cannot keep up with the
|
||||
## incoming message rate. Available strategies:
|
||||
## - disconnect: drop the connection to the unresponsive peer
|
||||
## - drop_newest: replace the newest message in the buffer
|
||||
## - drop_oldest: removed the olsted message from the buffer, then append
|
||||
const peer_overflow_policy = "disconnect" &redef;
|
||||
|
||||
## Same as `peer_buffer_size` but for WebSocket clients.
|
||||
const web_socket_buffer_size = 512 &redef;
|
||||
|
||||
## Same as `peer_overflow_policy` but for WebSocket clients.
|
||||
const web_socket_overflow_policy = "disconnect" &redef;
|
||||
|
||||
## The CAF scheduling policy to use. Available options are "sharing" and
|
||||
## "stealing". The "sharing" policy uses a single, global work queue along
|
||||
## with mutex and condition variable used for accessing it, which may be
|
||||
|
|
|
@ -14,6 +14,9 @@ redef Broker::log_topic = Cluster::rr_log_topic;
|
|||
# Add a cluster prefix.
|
||||
@prefixes += cluster
|
||||
|
||||
# This should soon condition on loading only when Broker is in use.
|
||||
@load ./broker-backpressure
|
||||
|
||||
@if ( Supervisor::is_supervised() )
|
||||
# When running a supervised cluster, populate Cluster::nodes from the node table
|
||||
# the Supervisor provides to new Zeek nodes. The management framework configures
|
||||
|
|
29
scripts/base/frameworks/cluster/broker-backpressure.zeek
Normal file
29
scripts/base/frameworks/cluster/broker-backpressure.zeek
Normal file
|
@ -0,0 +1,29 @@
|
|||
# Notifications for Broker-reported backpressure overflow.
|
||||
# See base/frameworks/broker/backpressure.zeek for context.
|
||||
|
||||
@load base/frameworks/telemetry
|
||||
|
||||
module Cluster;
|
||||
|
||||
global broker_backpressure_disconnects_cf = Telemetry::register_counter_family([
|
||||
$prefix="zeek",
|
||||
$name="broker-backpressure-disconnects",
|
||||
$unit="",
|
||||
$label_names=vector("peer"),
|
||||
$help_text="Number of Broker peerings dropped due to a neighbor falling behind in message I/O",
|
||||
]);
|
||||
|
||||
event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
if ( ! endpoint?$network || "caf::sec::backpressure_overflow" !in msg )
|
||||
return;
|
||||
|
||||
local nn = nodeid_to_node(endpoint$id);
|
||||
|
||||
Cluster::log(fmt("removed due to backpressure overflow: %s%s:%s (%s)",
|
||||
nn$name != "" ? "" : "non-cluster peer ",
|
||||
endpoint$network$address, endpoint$network$bound_port,
|
||||
nn$name != "" ? nn$name : endpoint$id));
|
||||
Telemetry::counter_family_inc(broker_backpressure_disconnects_cf,
|
||||
vector(nn$name != "" ? nn$name : "unknown"));
|
||||
}
|
|
@ -242,6 +242,13 @@ export {
|
|||
## of the cluster that is started up.
|
||||
const node = getenv("CLUSTER_NODE") &redef;
|
||||
|
||||
## Function returning this node's identifier.
|
||||
##
|
||||
## By default this is :zeek:see:`Broker::node_id`, but can be
|
||||
## redefined by other cluster backends. This identifier should be
|
||||
## a short lived identifier that resets when a node is restarted.
|
||||
global node_id: function(): string = Broker::node_id &redef;
|
||||
|
||||
## Interval for retrying failed connections between cluster nodes.
|
||||
## If set, the ZEEK_DEFAULT_CONNECT_RETRY (given in number of seconds)
|
||||
## environment variable overrides this option.
|
||||
|
@ -270,7 +277,7 @@ export {
|
|||
##
|
||||
## Returns: a topic string that may used to send a message exclusively to
|
||||
## a given cluster node.
|
||||
global node_topic: function(name: string): string;
|
||||
global node_topic: function(name: string): string &redef;
|
||||
|
||||
## Retrieve the topic associated with a specific node in the cluster.
|
||||
##
|
||||
|
@ -279,7 +286,16 @@ export {
|
|||
##
|
||||
## Returns: a topic string that may used to send a message exclusively to
|
||||
## a given cluster node.
|
||||
global nodeid_topic: function(id: string): string;
|
||||
global nodeid_topic: function(id: string): string &redef;
|
||||
|
||||
## Retrieve the cluster-level naming of a node based on its node ID,
|
||||
## a backend-specific identifier.
|
||||
##
|
||||
## id: the node ID of a peer.
|
||||
##
|
||||
## Returns: the :zeek:see:`Cluster::NamedNode` for the requested node, if
|
||||
## known, otherwise a "null" instance with an empty name field.
|
||||
global nodeid_to_node: function(id: string): NamedNode;
|
||||
|
||||
## Initialize the cluster backend.
|
||||
##
|
||||
|
@ -336,7 +352,7 @@ function nodes_with_type(node_type: NodeType): vector of NamedNode
|
|||
{ return strcmp(n1$name, n2$name); });
|
||||
}
|
||||
|
||||
function Cluster::get_node_count(node_type: NodeType): count
|
||||
function get_node_count(node_type: NodeType): count
|
||||
{
|
||||
local cnt = 0;
|
||||
|
||||
|
@ -349,7 +365,7 @@ function Cluster::get_node_count(node_type: NodeType): count
|
|||
return cnt;
|
||||
}
|
||||
|
||||
function Cluster::get_active_node_count(node_type: NodeType): count
|
||||
function get_active_node_count(node_type: NodeType): count
|
||||
{
|
||||
return node_type in active_node_ids ? |active_node_ids[node_type]| : 0;
|
||||
}
|
||||
|
@ -394,6 +410,17 @@ function nodeid_topic(id: string): string
|
|||
return nodeid_topic_prefix + id + "/";
|
||||
}
|
||||
|
||||
function nodeid_to_node(id: string): NamedNode
|
||||
{
|
||||
for ( name, n in nodes )
|
||||
{
|
||||
if ( n?$id && n$id == id )
|
||||
return NamedNode($name=name, $node=n);
|
||||
}
|
||||
|
||||
return NamedNode($name="", $node=[$node_type=NONE, $ip=0.0.0.0]);
|
||||
}
|
||||
|
||||
event Cluster::hello(name: string, id: string) &priority=10
|
||||
{
|
||||
if ( name !in nodes )
|
||||
|
@ -426,7 +453,7 @@ event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=
|
|||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
local e = Broker::make_event(Cluster::hello, node, Broker::node_id());
|
||||
local e = Broker::make_event(Cluster::hello, node, Cluster::node_id());
|
||||
Broker::publish(nodeid_topic(endpoint$id), e);
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ event zeek_init() &priority=-10
|
|||
return;
|
||||
}
|
||||
|
||||
Cluster::subscribe(nodeid_topic(Broker::node_id()));
|
||||
Cluster::subscribe(nodeid_topic(Cluster::node_id()));
|
||||
Cluster::subscribe(node_topic(node));
|
||||
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
@load-sigs ./java
|
||||
@load-sigs ./office
|
||||
@load-sigs ./programming
|
||||
@load-sigs ./python
|
||||
@load-sigs ./video
|
||||
|
||||
@load-sigs ./libmagic
|
||||
|
|
|
@ -41,66 +41,3 @@ signature file-elc {
|
|||
file-mime "application/x-elc", 10
|
||||
file-magic /\x3bELC[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff]/
|
||||
}
|
||||
|
||||
# Python 1 bytecode
|
||||
signature file-pyc-1 {
|
||||
file-magic /^(\xfc\xc4|\x99\x4e)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 2 bytecode
|
||||
signature file-pyc-2 {
|
||||
file-magic /^(\x87\xc6|[\x2a\x2d]\xed|[\x3b\x45\x59\x63\x6d\x77\x81\x8b\x8c\x95\x9f\xa9\xb3\xc7\xd1\xdb\xe5\xef\xf9]\xf2|\x03\xf3)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.0 bytecode
|
||||
signature file-pyc-3-0 {
|
||||
file-magic /^([\xb8\xc2\xcc\xd6\xe0\xea\xf4\xf5\xff]\x0b|[\x09\x13\x1d\x1f\x27\x3b]\x0c)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
|
||||
# Python 3.1 bytecode
|
||||
signature file-pyc-3-1 {
|
||||
file-magic /^[\x45\x4f]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
|
||||
# Python 3.2 bytecode
|
||||
signature file-pyc-3-2 {
|
||||
file-magic /^[\x58\x62\x6c]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.3 bytecode
|
||||
signature file-pyc-3-3 {
|
||||
file-magic /^[\x76\x80\x94\x9e]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
|
||||
# Python 3.4 bytecode
|
||||
signature file-pyc-3-4 {
|
||||
file-magic /^[\xb2\xcc\xc6\xd0\xda\xe4\xee]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.5 bytecode
|
||||
signature file-pyc-3-5 {
|
||||
file-magic /^(\xf8\x0c|[\x02\x0c\x16\x17]\x0d)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.6 bytecode
|
||||
signature file-pyc-3-6 {
|
||||
file-magic /^[\x20\x21\x2a-\x2d\x2f-\x33]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.7 bytecode
|
||||
signature file-pyc-3-7 {
|
||||
file-magic /^[\x3e-\x42]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
|
111
scripts/base/frameworks/files/magic/python.sig
Normal file
111
scripts/base/frameworks/files/magic/python.sig
Normal file
|
@ -0,0 +1,111 @@
|
|||
# Python magic numbers can be updated/added by looking at the list at
|
||||
# https://github.com/python/cpython/blob/main/Include/internal/pycore_magic_number.h
|
||||
# The numbers in the list are converted to little-endian and then to hex for the
|
||||
# file-magic entries below.
|
||||
|
||||
# Python 1 bytecode
|
||||
signature file-pyc-1 {
|
||||
file-magic /^(\xfc\xc4|\x99\x4e)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 2 bytecode
|
||||
signature file-pyc-2 {
|
||||
file-magic /^(\x87\xc6|[\x2a\x2d]\xed|[\x3b\x45\x59\x63\x6d\x77\x81\x8b\x8c\x95\x9f\xa9\xb3\xc7\xd1\xdb\xe5\xef\xf9]\xf2|\x03\xf3)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.0 bytecode
|
||||
signature file-pyc-3-0 {
|
||||
file-magic /^([\xb8\xc2\xcc\xd6\xe0\xea\xf4\xf5\xff]\x0b|[\x09\x13\x1d\x1f\x27\x3b]\x0c)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
|
||||
# Python 3.1 bytecode
|
||||
signature file-pyc-3-1 {
|
||||
file-magic /^[\x45\x4f]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
|
||||
# Python 3.2 bytecode
|
||||
signature file-pyc-3-2 {
|
||||
file-magic /^[\x58\x62\x6c]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.3 bytecode
|
||||
signature file-pyc-3-3 {
|
||||
file-magic /^[\x76\x80\x94\x9e]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
|
||||
# Python 3.4 bytecode
|
||||
signature file-pyc-3-4 {
|
||||
file-magic /^[\xb2\xcc\xc6\xd0\xda\xe4\xee]\x0c\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.5 bytecode
|
||||
signature file-pyc-3-5 {
|
||||
file-magic /^(\xf8\x0c|[\x02\x0c\x16\x17]\x0d)\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.6 bytecode
|
||||
signature file-pyc-3-6 {
|
||||
file-magic /^[\x20\x21\x2a-\x2d\x2f-\x33]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.7 bytecode
|
||||
signature file-pyc-3-7 {
|
||||
file-magic /^[\x3e-\x42]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.8 bytecode
|
||||
signature file-pyc-3-8 {
|
||||
file-magic /^[\x48\x49\x52-\x55]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.9 bytecode
|
||||
signature file-pyc-3-9 {
|
||||
file-magic /^[\x5c-\x61]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.10 bytecode
|
||||
signature file-pyc-3-10 {
|
||||
file-magic /^[\x66-\x6f]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.11 bytecode
|
||||
signature file-pyc-3-11 {
|
||||
file-magic /^[\x7a-\xa7]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.12 bytecode
|
||||
signature file-pyc-3-12 {
|
||||
file-magic /^[\xac-\xcb]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.13 bytecode
|
||||
signature file-pyc-3-13 {
|
||||
file-magic /^[\xde-\xf3]\x0d\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
||||
|
||||
# Python 3.14 bytecode
|
||||
# This is in pre-release at this time, and may need to be updated as new
|
||||
# versions come out.
|
||||
signature file-pyc-3-14 {
|
||||
file-magic /^[\x10-\x19]\x0e\x0d\x0a/
|
||||
file-mime "application/x-python-bytecode", 80
|
||||
}
|
|
@ -4247,8 +4247,9 @@ type dns_binds_rr: record {
|
|||
algorithm: count; ##< Algorithm for Public Key.
|
||||
key_id: count; ##< key tag.
|
||||
removal_flag: count; ##< rm flag.
|
||||
complte_flag: string; ##< complete flag.
|
||||
complte_flag: string &deprecated="Remove in v8.1: Use complete_flag instead."; ##< complete flag.
|
||||
is_query: count; ##< The RR is a query/Response.
|
||||
complete_flag: count; ##< complete flag.
|
||||
};
|
||||
|
||||
## A Private RR type LOC record.
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
##! The most convenient way to access this are the Version::number
|
||||
##! and Version::info constants.
|
||||
|
||||
@load base/frameworks/reporter
|
||||
@load base/utils/strings
|
||||
|
||||
module Version;
|
||||
|
||||
export {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
##! Time-related functions.
|
||||
|
||||
## Given an interval, returns a string representing the minutes and seconds
|
||||
## in the interval (for example, "3m34s").
|
||||
|
@ -6,3 +7,22 @@ function duration_to_mins_secs(dur: interval): string
|
|||
local dur_count = double_to_count(interval_to_double(dur));
|
||||
return fmt("%dm%ds", dur_count/60, dur_count%60);
|
||||
}
|
||||
|
||||
## Time value representing the 0 timestamp.
|
||||
const null_ts = double_to_time(0);
|
||||
|
||||
## Calculate the packet lag, i.e. the difference between wall clock and the
|
||||
## timestamp of the currently processed packet. If Zeek is not processing a
|
||||
## packet, the function returns a 0 interval value.
|
||||
function get_packet_lag(): interval
|
||||
{
|
||||
# We use get_current_packet_ts() instead of network_time() here, because
|
||||
# network time does not immediately fall back to wall clock if there is
|
||||
# no packet. Instead, network time remains set to the last seen packet's
|
||||
# timestamp for ``packet_source_inactivity_timeout``.
|
||||
local pkt_ts = get_current_packet_ts();
|
||||
if (pkt_ts == null_ts)
|
||||
return 0 sec;
|
||||
|
||||
return current_time() - pkt_ts;
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
@load ./main.zeek
|
|
@ -0,0 +1,14 @@
|
|||
##! Establish ZeroMQ connectivity with the broker.
|
||||
|
||||
@load ./main
|
||||
|
||||
module Cluster::Backend::ZeroMQ;
|
||||
|
||||
|
||||
event zeek_init() &priority=10
|
||||
{
|
||||
if ( run_proxy_thread )
|
||||
Cluster::Backend::ZeroMQ::spawn_zmq_proxy_thread();
|
||||
|
||||
Cluster::init();
|
||||
}
|
424
scripts/policy/frameworks/cluster/backend/zeromq/main.zeek
Normal file
424
scripts/policy/frameworks/cluster/backend/zeromq/main.zeek
Normal file
|
@ -0,0 +1,424 @@
|
|||
##! ZeroMQ cluster backend support.
|
||||
##!
|
||||
##! For publish-subscribe functionality, one node in the Zeek cluster spawns a
|
||||
##! thread running a central broker listening on a XPUB and XSUB socket.
|
||||
##! These sockets are connected via `zmq_proxy() <https://libzmq.readthedocs.io/en/latest/zmq_proxy.html>`_.
|
||||
##! All other nodes connect to this central broker with their own XSUB and
|
||||
##! XPUB sockets, establishing a global many-to-many publish-subscribe system
|
||||
##! where each node sees subscriptions and messages from all other nodes in a
|
||||
##! Zeek cluster. ZeroMQ's `publish-subscribe pattern <http://api.zeromq.org/4-2:zmq-socket#toc9>`_
|
||||
##! documentation may be a good starting point. Elsewhere in ZeroMQ's documentation,
|
||||
##! the central broker is also called `forwarder <http://api.zeromq.org/4-2:zmq-proxy#toc5>`_.
|
||||
##!
|
||||
##! For remote logging functionality, the ZeroMQ `pipeline pattern <http://api.zeromq.org/4-2:zmq-socket#toc14>`_
|
||||
##! is used. All logger nodes listen on a PULL socket. Other nodes connect
|
||||
##! via PUSH sockets to all of the loggers. Concretely, remote logging
|
||||
##! functionality is not publish-subscribe, but instead leverages ZeroMQ's
|
||||
##! built-in load-balancing functionality provided by PUSH and PULL
|
||||
##! sockets.
|
||||
##!
|
||||
##! The ZeroMQ cluster backend technically allows to run a non-Zeek central
|
||||
##! broker (it only needs to offer XPUB and XSUB sockets). Further, it is
|
||||
##! possible to run non-Zeek logger nodes. All a logger node needs to do is
|
||||
##! open a ZeroMQ PULL socket and interpret the format used by Zeek nodes
|
||||
##! to send their log writes.
|
||||
module Cluster::Backend::ZeroMQ;
|
||||
|
||||
export {
|
||||
## The central broker's XPUB endpoint to connect to.
|
||||
##
|
||||
## A node connects with its XSUB socket to the XPUB socket
|
||||
## of the central broker.
|
||||
const connect_xpub_endpoint = "tcp://127.0.0.1:5556" &redef;
|
||||
|
||||
|
||||
## The central broker's XSUB endpoint to connect to.
|
||||
##
|
||||
## A node connects with its XPUB socket to the XSUB socket
|
||||
## of the central broker.
|
||||
const connect_xsub_endpoint = "tcp://127.0.0.1:5555" &redef;
|
||||
|
||||
## Vector of ZeroMQ endpoints to connect to for logging.
|
||||
##
|
||||
## A node's PUSH socket used for logging connects to each
|
||||
## of the ZeroMQ endpoints listed in this vector.
|
||||
const connect_log_endpoints: vector of string &redef;
|
||||
|
||||
## Toggle for running a central ZeroMQ XPUB-XSUB broker on this node.
|
||||
##
|
||||
## If set to ``T``, :zeek:see:`Cluster::Backend::ZeroMQ::spawn_zmq_proxy_thread`
|
||||
## is called during :zeek:see:`zeek_init`. The node will listen
|
||||
## on :zeek:see:`Cluster::Backend::ZeroMQ::listen_xsub_endpoint` and
|
||||
## :zeek:see:`Cluster::Backend::ZeroMQ::listen_xpub_endpoint` and
|
||||
## forward subscriptions and messages between nodes.
|
||||
##
|
||||
## By default, this is set to ``T`` on the manager and ``F`` elsewhere.
|
||||
const run_proxy_thread: bool = F &redef;
|
||||
|
||||
## XSUB listen endpoint for the central broker.
|
||||
##
|
||||
## This setting is used for the XSUB socket of the central broker started
|
||||
## when :zeek:see:`Cluster::Backend::ZeroMQ::run_proxy_thread` is ``T``.
|
||||
const listen_xsub_endpoint = "tcp://127.0.0.1:5556" &redef;
|
||||
|
||||
## XPUB listen endpoint for the central broker.
|
||||
##
|
||||
## This setting is used for the XPUB socket of the central broker started
|
||||
## when :zeek:see:`Cluster::Backend::ZeroMQ::run_proxy_thread` is ``T``.
|
||||
const listen_xpub_endpoint = "tcp://127.0.0.1:5555" &redef;
|
||||
|
||||
## PULL socket address to listen on for log messages.
|
||||
##
|
||||
## If empty, don't listen for log messages, otherwise
|
||||
## a ZeroMQ address to bind to. E.g., ``tcp://127.0.0.1:5555``.
|
||||
const listen_log_endpoint = "" &redef;
|
||||
|
||||
## Configure the ZeroMQ's sockets linger value.
|
||||
##
|
||||
## The default used by libzmq is 30 seconds (30 000) which is very long
|
||||
## when loggers vanish before workers during a shutdown, so we reduce
|
||||
## this to 500 milliseconds by default.
|
||||
##
|
||||
## A value of ``-1`` configures blocking forever, while ``0`` would
|
||||
## immediately discard any pending messages.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_LINGER documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc24>`_
|
||||
## for more details.
|
||||
const linger_ms: int = 500 &redef;
|
||||
|
||||
## Configure ZeroMQ's immedidate setting on PUSH sockets
|
||||
##
|
||||
## Setting this to ``T`` will queue log writes only to completed
|
||||
## connections. By default, log writes are queued to all potential
|
||||
## endpoints listed in :zeek:see:`Cluster::Backend::ZeroMQ::connect_log_endpoints`.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_IMMEDIATE documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc21>`_
|
||||
## for more details.
|
||||
const log_immediate: bool = F &redef;
|
||||
|
||||
## Send high water mark value for the log PUSH sockets.
|
||||
##
|
||||
## If reached, Zeek nodes will block or drop messages.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_SNDHWM documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc46>`_
|
||||
## for more details.
|
||||
##
|
||||
## TODO: Make action configurable (block vs drop)
|
||||
const log_sndhwm: int = 1000 &redef;
|
||||
|
||||
## Receive high water mark value for the log PULL sockets.
|
||||
##
|
||||
## If reached, Zeek workers will block or drop messages.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_RCVHWM documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc35>`_
|
||||
## for more details.
|
||||
##
|
||||
## TODO: Make action configurable (block vs drop)
|
||||
const log_rcvhwm: int = 1000 &redef;
|
||||
|
||||
## Kernel transmit buffer size for log sockets.
|
||||
##
|
||||
## Using -1 will use the kernel's default.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_SNDBUF documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc45>`_.
|
||||
const log_sndbuf: int = -1 &redef;
|
||||
|
||||
## Kernel receive buffer size for log sockets.
|
||||
##
|
||||
## Using -1 will use the kernel's default.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_RCVBUF documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc34>`_
|
||||
## for more details.
|
||||
const log_rcvbuf: int = -1 &redef;
|
||||
|
||||
## Do not silently drop messages if high-water-mark is reached.
|
||||
##
|
||||
## Whether to configure ``ZMQ_XPUB_NODROP`` on the XPUB socket
|
||||
## to detect when sending a message fails due to reaching
|
||||
## the high-water-mark.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_XPUB_NODROP documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc61>`_
|
||||
## for more details.
|
||||
const xpub_nodrop: bool = T &redef;
|
||||
|
||||
## Do not silently drop messages if high-water-mark is reached.
|
||||
##
|
||||
## Whether to configure ``ZMQ_XPUB_NODROP`` on the XPUB socket
|
||||
## to detect when sending a message fails due to reaching
|
||||
## the high-water-mark.
|
||||
##
|
||||
## This setting applies to the XPUB/XSUB broker started when
|
||||
## :zeek:see:`Cluster::Backend::ZeroMQ::run_proxy_thread` is ``T``.
|
||||
##
|
||||
## See ZeroMQ's `ZMQ_XPUB_NODROP documentation <http://api.zeromq.org/4-2:zmq-setsockopt#toc61>`_
|
||||
## for more details.
|
||||
const listen_xpub_nodrop: bool = T &redef;
|
||||
|
||||
## Messages to receive before yielding.
|
||||
##
|
||||
## Yield from the receive loop when this many messages have been
|
||||
## received from one of the used sockets.
|
||||
const poll_max_messages = 100 &redef;
|
||||
|
||||
## Bitmask to enable low-level stderr based debug printing.
|
||||
##
|
||||
## poll debugging: 1 (produce verbose zmq::poll() output)
|
||||
##
|
||||
## Or values from the above list together and set debug_flags
|
||||
## to the result. E.g. use 7 to select 4, 2 and 1. Only use this
|
||||
## in development if something seems off. The thread used internally
|
||||
## will produce output on stderr.
|
||||
const debug_flags: count = 0 &redef;
|
||||
|
||||
## The node topic prefix to use.
|
||||
global node_topic_prefix = "zeek.cluster.node" &redef;
|
||||
|
||||
## The node_id topic prefix to use.
|
||||
global nodeid_topic_prefix = "zeek.cluster.nodeid" &redef;
|
||||
|
||||
## Low-level event when a subscription is added.
|
||||
##
|
||||
## Every node observes all subscriptions from other nodes
|
||||
## in a cluster through its XPUB socket. Whenever a new
|
||||
## subscription topic is added, this event is raised with
|
||||
## the topic.
|
||||
##
|
||||
## topic: The topic.
|
||||
global subscription: event(topic: string);
|
||||
|
||||
## Low-level event when a subscription vanishes.
|
||||
##
|
||||
## Every node observes all subscriptions from other nodes
|
||||
## in a cluster through its XPUB socket. Whenever a subscription
|
||||
## is removed from the local XPUB socket, this event is raised
|
||||
## with the topic set to the removed subscription.
|
||||
##
|
||||
## topic: The topic.
|
||||
global unsubscription: event(topic: string);
|
||||
|
||||
## Low-level event send to a node in response to their subscription.
|
||||
##
|
||||
## name: The sending node's name in :zeek:see:`Cluster::nodes`.
|
||||
##
|
||||
## id: The sending node's identifier, as generated by :zeek:see:`Cluster::node_id`.
|
||||
global hello: event(name: string, id: string);
|
||||
|
||||
## Expiration for hello state.
|
||||
##
|
||||
## How long to wait before expiring information about
|
||||
## subscriptions and hello messages from other
|
||||
## nodes. These expirations trigger reporter warnings.
|
||||
const hello_expiration: interval = 10sec &redef;
|
||||
}
|
||||
|
||||
redef Cluster::backend = Cluster::CLUSTER_BACKEND_ZEROMQ;
|
||||
|
||||
# By default, let the manager node run the proxy thread.
|
||||
redef run_proxy_thread = Cluster::local_node_type() == Cluster::MANAGER;
|
||||
|
||||
function zeromq_node_topic(name: string): string {
|
||||
return node_topic_prefix + "." + name;
|
||||
}
|
||||
|
||||
function zeromq_nodeid_topic(id: string): string {
|
||||
return nodeid_topic_prefix + "." + id;
|
||||
}
|
||||
|
||||
# Unique identifier for this node with some debug information.
|
||||
const my_node_id = fmt("zeromq_%s_%s_%s_%s", Cluster::node, gethostname(), getpid(), unique_id("N"));
|
||||
|
||||
function zeromq_node_id(): string {
|
||||
return my_node_id;
|
||||
}
|
||||
|
||||
redef Cluster::node_topic = zeromq_node_topic;
|
||||
redef Cluster::nodeid_topic = zeromq_nodeid_topic;
|
||||
redef Cluster::node_id = zeromq_node_id;
|
||||
|
||||
redef Cluster::logger_topic = "zeek.cluster.logger";
|
||||
redef Cluster::manager_topic = "zeek.cluster.manager";
|
||||
redef Cluster::proxy_topic = "zeek.cluster.proxy";
|
||||
redef Cluster::worker_topic = "zeek.cluster.worker";
|
||||
|
||||
redef Cluster::proxy_pool_spec = Cluster::PoolSpec(
|
||||
$topic = "zeek.cluster.pool.proxy",
|
||||
$node_type = Cluster::PROXY);
|
||||
|
||||
redef Cluster::logger_pool_spec = Cluster::PoolSpec(
|
||||
$topic = "zeek.cluster.pool.logger",
|
||||
$node_type = Cluster::LOGGER);
|
||||
|
||||
redef Cluster::worker_pool_spec = Cluster::PoolSpec(
|
||||
$topic = "zeek.cluster.pool.worker",
|
||||
$node_type = Cluster::WORKER);
|
||||
|
||||
|
||||
# Configure listen_log_endpoint based on port in cluster-layout, if any.
|
||||
@if ( Cluster::local_node_type() == Cluster::LOGGER || (Cluster::manager_is_logger && Cluster::local_node_type() == Cluster::MANAGER) )
|
||||
const my_node = Cluster::nodes[Cluster::node];
|
||||
@if ( my_node?$p )
|
||||
redef listen_log_endpoint = fmt("tcp://%s:%s", my_node$ip, port_to_count(my_node$p));
|
||||
@endif
|
||||
@endif
|
||||
|
||||
# Populate connect_log_endpoints based on Cluster::nodes on non-logger nodes.
|
||||
# If you're experimenting with zero-logger clusters, ignore this code and set
|
||||
# connect_log_endpoints yourself via redef.
|
||||
event zeek_init() &priority=100
|
||||
{
|
||||
if ( Cluster::local_node_type() == Cluster::LOGGER )
|
||||
return;
|
||||
|
||||
if ( Cluster::manager_is_logger && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
return;
|
||||
|
||||
for ( _, node in Cluster::nodes )
|
||||
{
|
||||
local endp: string;
|
||||
if ( node$node_type == Cluster::LOGGER && node?$p )
|
||||
{
|
||||
endp = fmt("tcp://%s:%s", node$ip, port_to_count(node$p));
|
||||
connect_log_endpoints += endp;
|
||||
}
|
||||
|
||||
if ( Cluster::manager_is_logger && node$node_type == Cluster::MANAGER && node?$p )
|
||||
{
|
||||
endp = fmt("tcp://%s:%s", node$ip, port_to_count(node$p));
|
||||
connect_log_endpoints += endp;
|
||||
}
|
||||
}
|
||||
|
||||
# If there's no endpoints configured, but more than a single
|
||||
# node in cluster layout, log an error as that's probably not
|
||||
# an intended configuration.
|
||||
if ( |connect_log_endpoints| == 0 && |Cluster::nodes| > 1 )
|
||||
Reporter::error("No ZeroMQ connect_log_endpoints configured");
|
||||
}
|
||||
|
||||
function nodeid_subscription_expired(nodeids: set[string], nodeid: string): interval
|
||||
{
|
||||
Reporter::warning(fmt("Expired subscription from nodeid %s", nodeid));
|
||||
return 0.0sec;
|
||||
}
|
||||
|
||||
function nodeid_hello_expired(nodeids: set[string], nodeid: string): interval
|
||||
{
|
||||
Reporter::warning(fmt("Expired hello from nodeid %s", nodeid));
|
||||
return 0.0sec;
|
||||
}
|
||||
|
||||
# State about subscriptions and hellos seen from other nodes.
|
||||
global nodeid_subscriptions: set[string] &create_expire=hello_expiration &expire_func=nodeid_subscription_expired;
|
||||
global nodeid_hellos: set[string] &create_expire=hello_expiration &expire_func=nodeid_hello_expired;
|
||||
|
||||
# The ZeroMQ plugin notifies script land when a new subscription arrived
|
||||
# on that node's XPUB socket. If the topic of such a subscription starts with
|
||||
# the nodeid_topic_prefix for another node A, node B seeing the subscription
|
||||
# sends ZeroMQ::hello() to the topic, announcing its own presence to node A.
|
||||
# Conversely, when node A sees the subscription for node B's nodeid topic,
|
||||
# it also sens ZeroMQ::hello(). In other words, every node says hello to all
|
||||
# other nodes based on subscriptions they observe on their local XPUB sockets.
|
||||
#
|
||||
# Once node B has seen both, the nodeid topic subscription and ZeroMQ::hello()
|
||||
# event from node A, it raises a Cluster::node_up() event for node A.
|
||||
#
|
||||
# See also the Cluster::Backend::ZeroMQ::hello() handler below.
|
||||
#
|
||||
# 1) node A subscribes to Cluster::nodeid_topic(Cluster::node_id())
|
||||
# 2) node B observes subscription for node A's nodeid_topic and replies with ZeroMQ::hello()
|
||||
# 3) node A receives node B's nodeid_topic subscription, replies with ZeroMQ::hello()
|
||||
# 4) node B receives node A's ZeroMQ::hello() and raises Cluster::node_up()
|
||||
# as it has already seen node A's nodeid_topic subscription.
|
||||
event Cluster::Backend::ZeroMQ::subscription(topic: string)
|
||||
{
|
||||
local prefix = nodeid_topic_prefix + ".";
|
||||
|
||||
if ( ! starts_with(topic, prefix) )
|
||||
return;
|
||||
|
||||
local nodeid = topic[|prefix|:];
|
||||
|
||||
# Do not say hello to ourselves - we won't see it anyhow.
|
||||
if ( nodeid == Cluster::node_id() )
|
||||
return;
|
||||
|
||||
Cluster::publish(topic, Cluster::Backend::ZeroMQ::hello, Cluster::node, Cluster::node_id());
|
||||
|
||||
# If we saw a ZeroMQ::hello from the other node already, send
|
||||
# it a Cluster::hello.
|
||||
if ( nodeid in nodeid_hellos )
|
||||
{
|
||||
Cluster::publish(Cluster::nodeid_topic(nodeid), Cluster::hello, Cluster::node, Cluster::node_id());
|
||||
delete nodeid_hellos[nodeid];
|
||||
}
|
||||
else
|
||||
{
|
||||
add nodeid_subscriptions[nodeid];
|
||||
}
|
||||
}
|
||||
|
||||
# Receiving ZeroMQ::hello() from another node: If we received a subscription
|
||||
# for the node's nodeid_topic, reply with a Cluster::hello. If the node never
|
||||
# properly went away, log a warning and raise a Cluster::node_down() now.
|
||||
event Cluster::Backend::ZeroMQ::hello(name: string, id: string)
|
||||
{
|
||||
if ( name in Cluster::nodes )
|
||||
{
|
||||
local n = Cluster::nodes[name];
|
||||
if ( n?$id )
|
||||
{
|
||||
if ( n$id == id )
|
||||
{
|
||||
# Duplicate ZeroMQ::hello(), very strange, ignore it.
|
||||
Reporter::warning(fmt("node '%s' sends ZeroMQ::hello twice (id:%s)",
|
||||
name, id));
|
||||
return;
|
||||
}
|
||||
|
||||
Reporter::warning(fmt("node '%s' never said goodbye (old id:%s new id:%s",
|
||||
name, n$id, id));
|
||||
|
||||
# We raise node_down() here for the old instance,
|
||||
# but it's obviously fake and somewhat lying.
|
||||
event Cluster::node_down(name, n$id);
|
||||
}
|
||||
}
|
||||
|
||||
# It is possible to publish Cluster::hello() directly if the nodeid_topic
|
||||
# subscription for the other node was already seen. Otherwise, remember
|
||||
# that Cluster::hello() has been seen and send Cluster::hello() in
|
||||
# subscription processing further up.
|
||||
if ( id in nodeid_subscriptions )
|
||||
{
|
||||
Cluster::publish(Cluster::nodeid_topic(id), Cluster::hello, Cluster::node, Cluster::node_id());
|
||||
delete nodeid_subscriptions[id];
|
||||
}
|
||||
else
|
||||
{
|
||||
add nodeid_hellos[id];
|
||||
}
|
||||
}
|
||||
|
||||
# If the unsubscription is for a nodeid prefix, extract the
|
||||
# nodeid that's gone, find the name of the node from the
|
||||
# cluster layout and raise Cluster::node_down().
|
||||
event Cluster::Backend::ZeroMQ::unsubscription(topic: string)
|
||||
{
|
||||
local prefix = nodeid_topic_prefix + ".";
|
||||
if ( ! starts_with(topic, prefix) )
|
||||
return;
|
||||
|
||||
local gone_node_id = topic[|prefix|:];
|
||||
local name = "";
|
||||
for ( node_name, n in Cluster::nodes ) {
|
||||
if ( n?$id && n$id == gone_node_id ) {
|
||||
name = node_name;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( name != "" )
|
||||
event Cluster::node_down(name, gone_node_id);
|
||||
else
|
||||
Reporter::warning(fmt("unsubscription of unknown node with id '%s'", gone_node_id));
|
||||
}
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
@load base/frameworks/notice
|
||||
@load base/frameworks/telemetry
|
||||
@load base/utils/time
|
||||
|
||||
module Stats;
|
||||
|
||||
|
@ -214,7 +215,7 @@ event check_stats(then: time, last_ns: NetStats, last_cs: ConnStats, last_ps: Pr
|
|||
|
||||
if ( reading_live_traffic() )
|
||||
{
|
||||
info$pkt_lag = current_time() - nettime;
|
||||
info$pkt_lag = get_packet_lag();
|
||||
info$pkts_dropped = ns$pkts_dropped - last_ns$pkts_dropped;
|
||||
info$pkts_link = ns$pkts_link - last_ns$pkts_link;
|
||||
|
||||
|
|
|
@ -19,9 +19,14 @@ export {
|
|||
## The string name of the analyzer attempting to forward the protocol.
|
||||
analyzer: string &log;
|
||||
|
||||
## The identifier of the protocol being forwarded.
|
||||
## The identifier of the protocol being forwarded in hex notation.
|
||||
protocol_id: string &log;
|
||||
|
||||
## The identifier of the protocol being forwarded as count.
|
||||
## Note: The count value is not logged by default. It is provided for
|
||||
## easy access in log policy hooks.
|
||||
protocol_id_num: count;
|
||||
|
||||
## A certain number of bytes at the start of the unknown protocol's
|
||||
## header.
|
||||
first_bytes: string &log;
|
||||
|
@ -40,6 +45,7 @@ event unknown_protocol(analyzer_name: string, protocol: count, first_bytes: stri
|
|||
info$ts = network_time();
|
||||
info$analyzer = analyzer_name;
|
||||
info$protocol_id = fmt("0x%x", protocol);
|
||||
info$protocol_id_num = protocol;
|
||||
info$first_bytes = bytestring_to_hexstr(first_bytes);
|
||||
info$analyzer_history = analyzer_history;
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
module SSL;
|
||||
|
||||
redef record SSL::Info += {
|
||||
## List of cient certificate CAs accepted by the server
|
||||
## List of client certificate CAs accepted by the server
|
||||
requested_client_certificate_authorities: vector of string &optional &log;
|
||||
};
|
||||
|
||||
|
|
|
@ -11,6 +11,11 @@
|
|||
|
||||
# @load frameworks/control/controllee.zeek
|
||||
# @load frameworks/control/controller.zeek
|
||||
@ifdef ( Cluster::CLUSTER_BACKEND_ZEROMQ )
|
||||
@load frameworks/cluster/backend/zeromq/__load__.zeek
|
||||
# @load frameworks/cluster/backend/zeromq/connect.zeek
|
||||
@load frameworks/cluster/backend/zeromq/main.zeek
|
||||
@endif
|
||||
@load frameworks/cluster/experimental.zeek
|
||||
# Loaded via the above through test-all-policy-cluster.test
|
||||
# when running as a manager, creates cluster.log entries
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
|
||||
# Scripts which are commented out in test-all-policy.zeek.
|
||||
@load protocols/ssl/decryption.zeek
|
||||
@ifdef ( Cluster::CLUSTER_BACKEND_ZEROMQ )
|
||||
@load frameworks/cluster/backend/zeromq/connect.zeek
|
||||
@endif
|
||||
@load frameworks/cluster/nodes-experimental/manager.zeek
|
||||
@load frameworks/control/controllee.zeek
|
||||
@load frameworks/control/controller.zeek
|
||||
|
@ -28,6 +31,7 @@ event zeek_init() &priority=1000
|
|||
# fail when run under zeekygen. For the purpose of zeekygen, we could
|
||||
# probably disable all modules, too.
|
||||
disable_module_events("Control");
|
||||
disable_module_events("Cluster::Backend::ZeroMQ");
|
||||
disable_module_events("Management::Agent::Runtime");
|
||||
disable_module_events("Management::Controller::Runtime");
|
||||
disable_module_events("Management::Node");
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/Anon.h"
|
||||
|
||||
#include <sys/time.h>
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// The prefix-preserving IP address anonymization code is largely
|
||||
// based on (and sometimes directly copied from) Eddie Kohler's
|
||||
// ipsumdump-1.20 code, per:
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/Base64.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/DNS_Mapping.h"
|
||||
|
||||
#include <ares_nameser.h>
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <netdb.h>
|
||||
|
|
|
@ -318,7 +318,7 @@ static void addrinfo_cb(void* arg, int status, int timeouts, struct ares_addrinf
|
|||
// Push a null on the end so the addr list has a final point during later parsing.
|
||||
addrs.push_back(NULL);
|
||||
|
||||
struct hostent he {};
|
||||
struct hostent he{};
|
||||
he.h_name = util::copy_string(result->name);
|
||||
he.h_addrtype = AF_INET;
|
||||
he.h_length = sizeof(in_addr);
|
||||
|
@ -333,7 +333,7 @@ static void addrinfo_cb(void* arg, int status, int timeouts, struct ares_addrinf
|
|||
// Push a null on the end so the addr list has a final point during later parsing.
|
||||
addrs6.push_back(NULL);
|
||||
|
||||
struct hostent he {};
|
||||
struct hostent he{};
|
||||
he.h_name = util::copy_string(result->name);
|
||||
he.h_addrtype = AF_INET6;
|
||||
he.h_length = sizeof(in6_addr);
|
||||
|
@ -370,7 +370,7 @@ static void query_cb(void* arg, ares_status_t status, size_t timeouts, const are
|
|||
}
|
||||
}
|
||||
else {
|
||||
struct hostent he {};
|
||||
struct hostent he{};
|
||||
|
||||
uint32_t ttl = 0;
|
||||
size_t rr_cnt = ares_dns_record_rr_cnt(dnsrec, ARES_SECTION_ANSWER);
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Implementation of breakpoints.
|
||||
|
||||
#include "zeek/DbgBreakpoint.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Structures and methods for implementing breakpoints in the Zeek debugger.
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Structures and methods for implementing watches in the Zeek debugger.
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Zeek Debugger Help
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Implementation of watches
|
||||
|
||||
#include "zeek/DbgWatch.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Structures and methods for implementing watches in the Zeek debugger.
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Debugging support for Zeek policy files.
|
||||
|
||||
#include "zeek/Debug.h"
|
||||
|
@ -417,7 +419,7 @@ int dbg_init_debugger(const char* cmdfile) {
|
|||
// ### Implement this
|
||||
debug_msg("Command files not supported. Using interactive mode.\n");
|
||||
|
||||
// ### if ( interactive ) (i.e., not reading cmds from a file)
|
||||
// ### if ( interactive ) (i.e., not reading cmds from a file)
|
||||
#ifdef HAVE_READLINE
|
||||
init_readline();
|
||||
#endif
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Debugging support for Zeek policy files.
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Support routines to help deal with Zeek debugging commands and
|
||||
// implementation of most commands.
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Support routines to help deal with Zeek debugging commands and
|
||||
// implementation of most commands.
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
#include "zeek/DebugLogger.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// A logger for (selective) debugging output. Only compiled in if DEBUG is
|
||||
// defined.
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/EventHandler.h"
|
||||
|
||||
#include "zeek/Desc.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Capsulates local and remote event handlers.
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/EventLauncher.h"
|
||||
|
||||
#include "event.bif.func_def"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "zeek/Conn.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/EventRegistry.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Each event raised/handled by Zeek is registered in the EventRegistry.
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Classes for tracing/dumping Zeek events.
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/IntSet.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// A simple but fast data structure for sets of integers.
|
||||
// Only supported operations are insert, remove and membership test.
|
||||
//
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/List.h"
|
||||
|
||||
#include "zeek/3rdparty/doctest.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
// BaseList.h --
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/PacketFilter.h"
|
||||
|
||||
#include "zeek/IP.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
// Provides some very limited but fast packet filter mechanisms
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/PolicyFile.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
// Functions for displaying the contents of policy files.
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/PrefixTable.h"
|
||||
|
||||
#include "zeek/Reporter.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
extern "C" {
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
/*
|
||||
Apply various randomness tests to a stream of bytes
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/Rule.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <climits>
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/RuleAction.h"
|
||||
|
||||
#include "zeek/zeek-config.h"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue