mirror of
https://github.com/zeek/zeek.git
synced 2025-10-04 15:48:19 +00:00
Merge remote-tracking branch 'origin/master' into topic/johanna/spicy-tls
* origin/master: (82 commits) Update doc submodule [nomail] [skip ci] Update zeekjs submodule Update broker submodule telemetry: Disable metrics centralization by default GSSAPI: basic support for MIC/WRAP tokens CI: Force rebuild of tumbleweed VM to pick up newer version of python formatters/JSON: Make JSON::NullDoubleWriter use zeek::json::detail version formatters/JSON: Remove surrounding_braces formatters/JSON: Prepare to remove rapidjson from installed Zeek headers Stmt/Assert: Use std::move() for backtrace element Update cmake submodule [nomail] Update external testing hashes for 6.1 deprecation changes Remove leftover baseline files from 6.1 deprecation removal Update doc submodule [nomail] [skip ci] Update external testing repo hashes to current master commits Update dce-rpc constants -O gen-C++ refinements for BiF failures, negative vector indices, boolean vector operations Update docs submodule [nomail] [skip ci] Force -std=c++17 mode for plugin targets, remove use of RequireCXX17.cmake Add Spicy updates to NEWS for 6.0. ...
This commit is contained in:
commit
061dc73e8f
305 changed files with 2046 additions and 1720 deletions
|
@ -23,3 +23,9 @@ repos:
|
|||
hooks:
|
||||
- id: cmake-format
|
||||
exclude: '^auxil/.*$'
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.15.0
|
||||
hooks:
|
||||
- id: typos
|
||||
exclude: '^(.typos.toml|src/SmithWaterman.cc|testing/.*|auxil/.*|scripts/base/frameworks/files/magic/.*|CHANGES)$'
|
||||
|
|
71
.typos.toml
Normal file
71
.typos.toml
Normal file
|
@ -0,0 +1,71 @@
|
|||
[default]
|
||||
extend-ignore-re = [
|
||||
"#.*MSDN Ref.*\\[ms-oaut\\]",
|
||||
# seh too close to she
|
||||
"registered SEH to support IDL",
|
||||
# ALLO is a valid FTP command
|
||||
"\"ALLO\".*200",
|
||||
"des-ede3-cbc-Env-OID",
|
||||
"Remove in v6.1.*SupressWeird",
|
||||
"max_repititions:.*Remove in v6.1",
|
||||
# On purpose
|
||||
"\"THE NETBIOS NAM\"",
|
||||
# NFS stuff.
|
||||
"commited: stable_how_t &optional",
|
||||
# script_opt uses "ot" a lot, but it's very close to "to", so
|
||||
# don't want to add it as identifier. Match some patterns.
|
||||
"ASSERT\\(ot == OP_.*",
|
||||
"ot->InternalType",
|
||||
"ot->Tag\\(\\) == TYPE_.*",
|
||||
"auto.* ot =",
|
||||
"ot = OP_.*",
|
||||
"if \\( ot == OP_.*",
|
||||
"ot->Yield\\(\\)->InternalType\\(\\)",
|
||||
"switch \\( ot \\)",
|
||||
"\\(ZAMOpType ot\\)",
|
||||
|
||||
# News stuff
|
||||
"SupressWeirds.*deprecated",
|
||||
"\"BaR\"",
|
||||
"\"xFoObar\"",
|
||||
"\"FoO\"",
|
||||
]
|
||||
|
||||
extend-ignore-identifiers-re = [
|
||||
"TLS_.*_EDE.*_.*",
|
||||
"SSL.*_EDE.*_.*",
|
||||
"_3DES_EDE_CBC_SHA",
|
||||
"GOST_R_.*",
|
||||
"icmp6_nd_.*",
|
||||
"ND_ROUTER_.*",
|
||||
"ND_NEIGHBOR_.*",
|
||||
".*_ND_option.*",
|
||||
]
|
||||
|
||||
[default.extend-identifiers]
|
||||
BuildNDOptionsVal = "BuildNDOptionsVal"
|
||||
ESC_SER = "ESC_SER"
|
||||
MCA_OCCURED = "MCA_OCCURED"
|
||||
MNT3ERR_ACCES = "MNT3ERR_ACCES"
|
||||
ND_QUEUE_OVERFLOW = "ND_QUEUE_OVERFLOW"
|
||||
ND_REDIRECT = "ND_REDIRECT"
|
||||
NED_ACK = "NED_ACK"
|
||||
NFS3ERR_ACCES = "NFS3ERR_ACCES"
|
||||
NO_SEH = "NO_SEH"
|
||||
OP_SWITCHS_VVV = "OP_SWITCHS_VVV"
|
||||
O_WRONLY = "O_WRONLY"
|
||||
RPC_NT_CALL_FAILED_DNE = "RPC_NT_CALL_FAILED_DNE"
|
||||
RpcAddPrintProvidor = "RpcAddPrintProvidor"
|
||||
RpcDeletePrintProvidor = "RpcDeletePrintProvidor"
|
||||
THA = "THA"
|
||||
ar_tha = "ar_tha"
|
||||
have_2nd = "have_2nd"
|
||||
ot1 = "ot1"
|
||||
ot2 = "ot2"
|
||||
uses_seh = "uses_seh"
|
||||
|
||||
[default.extend-words]
|
||||
caf = "caf"
|
||||
helo = "helo"
|
||||
# Seems we use this in the management framework
|
||||
requestor = "requestor"
|
305
CHANGES
305
CHANGES
|
@ -1,3 +1,308 @@
|
|||
6.1.0-dev.115 | 2023-06-21 17:45:35 -0700
|
||||
|
||||
* formatters/JSON: Make JSON::NullDoubleWriter use zeek::json::detail version (Arne Welzel, Corelight)
|
||||
|
||||
Not using inheritance and preferring composition to avoid including the
|
||||
detail/json.h header do an indirection via a unique_ptr and then just
|
||||
re-use the Double() implementation.
|
||||
|
||||
* formatters/JSON: Remove surrounding_braces (Arne Welzel, Corelight)
|
||||
|
||||
This seems to have become unused 4 years ago with 9b76e8faf44e90c41f33f24b18900a50f0840c5a,
|
||||
remove it.
|
||||
|
||||
* formatters/JSON: Prepare to remove rapidjson from installed Zeek headers (Arne Welzel, Corelight)
|
||||
|
||||
threading/formatters/JSON.h currently includes rapidjson headers for declaring
|
||||
the NullDoubleWriter. This appears mostly an internal detail, but
|
||||
results in the situation that 1) we need to ship rapidjson headers with
|
||||
the Zeek install tree and 2) taking care that external plugins are able
|
||||
to find these headers should they include formatters/JSON.h.
|
||||
|
||||
There are currently no other Zeek headers that include rapidjson, so this
|
||||
seems very unfortunate and self-inflicted given it's not actually required.
|
||||
|
||||
Attempt to hide this implementation detail with the goal to remove the
|
||||
rapidjson includes with v7.1 and then also stop bundling and exposing
|
||||
the include path to external plugins.
|
||||
|
||||
The NullDoubleWriter implementation moves into a new formatters/detail/json.h
|
||||
header which is not installed.
|
||||
|
||||
Closes #3128
|
||||
|
||||
6.1.0-dev.110 | 2023-06-21 15:36:32 -0700
|
||||
|
||||
* Update zeekjs submodule (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update broker submodule (Tim Wojtulewicz, Corelight)
|
||||
|
||||
6.1.0-dev.108 | 2023-06-21 15:33:50 -0700
|
||||
|
||||
* telemetry: Disable metrics centralization by default (Arne Welzel, Corelight)
|
||||
|
||||
Move the telemetry/cluster.zeek file over into policy/frameworks/telemetry/prometheus.zeek.
|
||||
|
||||
Mention it in local.zeek.
|
||||
|
||||
Relates to zeek/broker#366.
|
||||
|
||||
6.1.0-dev.106 | 2023-06-21 15:33:24 -0700
|
||||
|
||||
* GSSAPI: basic support for MIC/WRAP tokens (Johanna Amann, Corelight)
|
||||
|
||||
When MIC/WRAP tokens are encountered, we now skip the message, instead
|
||||
of raising a parse error. The data in the messages is encrypted - so it
|
||||
does not seem work to raise an event at the moment.
|
||||
|
||||
6.1.0-dev.104 | 2023-06-20 10:15:24 -0700
|
||||
|
||||
* CI: Force rebuild of tumbleweed VM to pick up newer version of python (Tim Wojtulewicz, Corelight)
|
||||
|
||||
The version of python included in the existing VM doesn't have the sqlite
|
||||
module included for some reason. Forcing the VM to rebuild installs python311
|
||||
which does include it, fixing a build failure.
|
||||
|
||||
* Stmt/Assert: Use std::move() for backtrace element (Arne Welzel, Corelight)
|
||||
|
||||
6.1.0-dev.100 | 2023-06-15 21:13:46 -0700
|
||||
|
||||
* Update external testing hashes for 6.1 deprecation changes (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove leftover baseline files from 6.1 deprecation removal (Tim Wojtulewicz, Corelight)
|
||||
|
||||
6.1.0-dev.96 | 2023-06-15 16:27:36 -0700
|
||||
|
||||
* Update dce-rpc constants (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update external testing repo hashes to current master commits (Tim Wojtulewicz, Corelight)
|
||||
|
||||
6.1.0-dev.93 | 2023-06-15 16:25:50 -0700
|
||||
|
||||
* -O gen-C++ refinements for BiF failures, negative vector indices, boolean vector operations (Vern Paxson, Corelight)
|
||||
|
||||
6.1.0-dev.91 | 2023-06-15 14:21:03 -0700
|
||||
|
||||
* ZAM bug fix for incorrect management of global variables (Vern Paxson, Corelight)
|
||||
|
||||
* ZAM bug fix (simplification) for nested inline functions that don't do anything (Vern Paxson, Corelight)
|
||||
|
||||
* ZAM support for negative vector indices (Vern Paxson, Corelight)
|
||||
|
||||
* ZAM ignores new "assert" statement (Vern Paxson, Corelight)
|
||||
|
||||
* fixes for doing "script validation" to check for ZAM compile-ability (Vern Paxson, Corelight)
|
||||
|
||||
* tweak for Broker BTest that needs a delay for ZAM (Vern Paxson, Corelight)
|
||||
|
||||
* BTest to check for invalid negative vector indices (Vern Paxson, Corelight)
|
||||
|
||||
6.1.0-dev.80 | 2023-06-15 12:15:29 -0700
|
||||
|
||||
* Force -std=c++17 mode for plugin targets, remove use of RequireCXX17.cmake (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Add Spicy updates to NEWS for 6.0. (Robin Sommer, Corelight)
|
||||
|
||||
6.1.0-dev.76 | 2023-06-15 18:36:25 +0200
|
||||
|
||||
* plugin/Manager: Warn when plugin with the same name is found (Arne Welzel, Corelight)
|
||||
|
||||
This was brought up on Slack as a potential source of confusion during
|
||||
development as it's not visible when plugin directories are ignored outside
|
||||
of looking into the plugin debug stream. I'd actually prefer to just
|
||||
FatalError() this, but a warning seems reasonably visible for interactive
|
||||
usage.
|
||||
|
||||
* Options: Do not output full usage on --test error (Arne Welzel, Corelight)
|
||||
|
||||
...mostly because the usage output is very long and the actual useful
|
||||
error message scrolls off the screen.
|
||||
|
||||
* GH-3090: CMakeLists: Propagate DOCTEST defines to external plugins (Arne Welzel, Corelight)
|
||||
|
||||
If Zeek has not been built with doctest enabled then it's not easy
|
||||
to run a plugin's tests (and if they really wanted to they could
|
||||
again undef the DOCTEST_CONFIG_DISABLE and provide their own doctest
|
||||
main() implementation.
|
||||
|
||||
* GH-3090: CMakeLists: Add rapidjson/include to zeek_dynamic_plugin_base (Arne Welzel, Corelight)
|
||||
|
||||
threading/formatters/JSON.h has a rapidjson include. Extend the
|
||||
include directories of external plugins so they are setup to find
|
||||
these in Zeek's install tree.
|
||||
|
||||
* GH-3090: ZeekPluginBootstrap: Encode Zeek's CMAKE_BUILD_TYPE (Arne Welzel, Corelight)
|
||||
|
||||
...and bump cmake to have plugin's make use of it.
|
||||
|
||||
6.1.0-dev.66 | 2023-06-14 10:09:46 -0700
|
||||
|
||||
* Change table initialization deprecation to error (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove 5.2 deprecation we missed (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove Supervisor::NodeConfig (6.1 deprecation) (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove LogAscii::logdir (6.1 deprecation) (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Make break/next/continue outside loops an error (6.1 deprecation) (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove analyzer_confirmation/analyzer_violation events (6.1 deprecation) (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove full scripts marked as 6.1 deprecations (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove script functions marked as unused (6.1 deprecations) (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove deprecations tagged for v6.1 (Tim Wojtulewicz, Corelight)
|
||||
|
||||
6.1.0-dev.54 | 2023-06-14 18:55:27 +0200
|
||||
|
||||
* docker: Add libnode to enable JavaScript support (Arne Welzel, Corelight)
|
||||
|
||||
* docker: Bump images to Debian 12 (Arne Welzel, Corelight)
|
||||
|
||||
6.1.0-dev.50 | 2023-06-14 09:25:58 -0700
|
||||
|
||||
* Fix usage of realpath on macOS, instead preferring grealpath (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Don't generate minimal tarball anymore (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* NEWS: Fix enumeration in Breaking Changes (Arne Welzel, Corelight)
|
||||
|
||||
6.1.0-dev.46 | 2023-06-14 12:59:41 +0200
|
||||
|
||||
* all: Fix typos identified by typos pre-commit hook (Arne Welzel, Corelight)
|
||||
|
||||
* NEWS typo fixes (Arne Welzel, Corelight)
|
||||
|
||||
* Start with a typos pre-commit file (Arne Welzel, Corelight)
|
||||
|
||||
Inspired by Spicy and me creating typos everywhere. Some of the
|
||||
suggestions are also very reasonable.
|
||||
|
||||
6.1.0-dev.42 | 2023-06-14 12:51:08 +0200
|
||||
|
||||
* NEWS: Small section about assert statement (Arne Welzel, Corelight)
|
||||
|
||||
* Stmt: Rework assertion hooks break semantics (Arne Welzel, Corelight)
|
||||
|
||||
Using break in either of the hooks allows to suppress the default reporter
|
||||
error message rather than suppressing solely based on the existence of an
|
||||
assertion_failure() handler.
|
||||
|
||||
* Stmt: Introduce assert statement and related hooks (Arne Welzel, Corelight)
|
||||
|
||||
including two hooks called assertion_failure() and assertion_result() for
|
||||
customization and tracking of assertion results.
|
||||
|
||||
* ZeekArgs: Helper for empty arguments (Arne Welzel, Corelight)
|
||||
|
||||
* Reporter: Allow AssertStmt to throw InterpreterException (Arne Welzel, Corelight)
|
||||
|
||||
* Lift backtrace() code into Func.{h,cc} (Arne Welzel, Corelight)
|
||||
|
||||
This is to be re-used by the assertion facility.
|
||||
|
||||
6.1.0-dev.32 | 2023-06-13 11:29:36 -0700
|
||||
|
||||
* Clarify subitem relationship in CMake configure summary. (Benjamin Bannier, Corelight)
|
||||
|
||||
6.1.0-dev.30 | 2023-06-13 11:28:26 -0700
|
||||
|
||||
* tests: Do not use archiver_rotation_format_func as postprocessor (Arne Welzel, Corelight)
|
||||
|
||||
This test triggered ubsan by putting a function with the wrong type
|
||||
as a post-processor into the .shadow file. Don't do that.
|
||||
|
||||
Likely Zeek should provide a better error message, but hand-crafting
|
||||
.shadow files isn't what is normally done and this is to fix the
|
||||
master build for now.
|
||||
|
||||
6.1.0-dev.28 | 2023-06-13 17:33:31 +0200
|
||||
|
||||
* GH-3112: cluster/logger: Fix leftover-log-rotation in multi-logger setups (Arne Welzel, Corelight)
|
||||
|
||||
Populating log_metadata during zeek_init() is too late for the
|
||||
leftover-log-rotation functionality, so do it at script parse time.
|
||||
|
||||
Also, prepend archiver_ to the log_metadata table and encoding function
|
||||
due to being in the global namespace and to align with the
|
||||
archiver_rotation_format_func. This hasn't been in a released
|
||||
version yet, so fine to rename still.
|
||||
|
||||
Closes #3112
|
||||
|
||||
* cluster/logger: Fix global var reference (Arne Welzel, Corelight)
|
||||
|
||||
6.1.0-dev.25 | 2023-06-12 15:27:20 -0700
|
||||
|
||||
* Update broker submodule [nomail] (Tim Wojtulewicz, Corelight)
|
||||
|
||||
6.1.0-dev.23 | 2023-06-12 11:06:34 -0700
|
||||
|
||||
* Bump `auxil/spicy`. (Benjamin Bannier, Corelight)
|
||||
|
||||
6.1.0-dev.19 | 2023-06-12 11:04:42 -0700
|
||||
|
||||
* test-all-policy: Do not load nodes-experimental/manager.zeek (Arne Welzel, Corelight)
|
||||
|
||||
Turns out loading this script in non-cluster mode uses Cluster::log()
|
||||
and creates cluster.log in the external baselines saying "cluster
|
||||
started". Do not load it into test-all-policy.zeek and instead rely
|
||||
on the new test-all-policy-cluster.test to load it transitively
|
||||
when running as manager for basic checking.
|
||||
|
||||
* cluster/main: Remove extra @if ( Cluster::is_enabled() ) (Arne Welzel, Corelight)
|
||||
|
||||
These have been discussed in the context of "@if &analyze" [1] and
|
||||
am much in favor for not disabling/removing ~100 lines (more than
|
||||
fits on a single terminal) out from the middle of a file. There's no
|
||||
performance impact for having these handlers enabled unconditionally.
|
||||
Also, any future work on "@if &analyze" will look at them again which
|
||||
we could also skip.
|
||||
|
||||
This also reverts back to the behavior where the Cluster::LOG stream
|
||||
is created even in non cluster setups like in previous Zeek versions.
|
||||
As long as no one writes to it there's essentially no difference. If
|
||||
someone does write to Cluster::LOG, I'd argue not black holing these
|
||||
messages is better. Schema generators using Log::active_streams will
|
||||
continue to discover Cluster::LOG even if they run in non-cluster
|
||||
mode.
|
||||
|
||||
https://github.com/zeek/zeek/pull/3062#discussion_r1200498905
|
||||
|
||||
6.1.0-dev.16 | 2023-06-12 10:56:55 -0700
|
||||
|
||||
* socks/dpd: Add newer auth methods (Arne Welzel, Corelight)
|
||||
|
||||
The IANA has allocated a few more authentication methods, minimally
|
||||
extend the signature to catch those.
|
||||
|
||||
* socks/dpd: Fix socks5_server side signature (Arne Welzel, Corelight)
|
||||
|
||||
The server replies with \x05 and identifier for the chosen method.
|
||||
Not quite sure what the previous signature meant capture.
|
||||
|
||||
See also: https://datatracker.ietf.org/doc/html/rfc1928#section-3
|
||||
|
||||
Closes #3099.
|
||||
|
||||
6.1.0-dev.13 | 2023-06-09 11:03:54 +0200
|
||||
|
||||
* ci/windows: choco --localonly is gone (Arne Welzel, Corelight)
|
||||
|
||||
choco 2.0 is now used after some caching changes on the Cirrus side [1]
|
||||
and the --localonly flag is gone from choco [2], remove its usage.
|
||||
|
||||
[1] https://github.com/cirruslabs/cirrus-ci-docs/issues/1174#issuecomment-1580928673
|
||||
[2] https://docs.chocolatey.org/en-us/guides/upgrading-to-chocolatey-v2-v6#the-list-command-now-lists-local-packages-only-and-the-local-only-and-lo-options-have-been-removed
|
||||
|
||||
6.1.0-dev.8 | 2023-06-05 14:16:53 +0200
|
||||
|
||||
* Update zeekctl for pysubnetree/patricia upgrade (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Update link to slack in README.md (Tim Wojtulewicz, Corelight)
|
||||
|
||||
6.1.0-dev.4 | 2023-05-31 13:48:49 -0700
|
||||
|
||||
* Update bifcl, binpac, and broker repos for cmake changes (Tim Wojtulewicz)
|
||||
|
|
|
@ -188,7 +188,6 @@ add_library(zeek_internal INTERFACE)
|
|||
add_library(Zeek::Internal ALIAS zeek_internal)
|
||||
set_target_properties(zeek_internal PROPERTIES EXPORT_NAME Internal)
|
||||
install(TARGETS zeek_internal EXPORT ZeekTargets)
|
||||
target_compile_features(zeek_internal INTERFACE ${ZEEK_CXX_STD})
|
||||
|
||||
# Skip "link-time version check" in Plugin.h for plugins that we bake in.
|
||||
target_compile_definitions(zeek_internal INTERFACE ZEEK_PLUGIN_SKIP_VERSION_CHECK)
|
||||
|
@ -200,6 +199,8 @@ add_custom_target(zeek_autogen_files)
|
|||
# reasons and backwards compatibility).
|
||||
if (ZEEK_STANDALONE)
|
||||
add_executable(zeek_exe)
|
||||
target_compile_features(zeek_exe PRIVATE ${ZEEK_CXX_STD})
|
||||
set_target_properties(zeek_exe PROPERTIES CXX_EXTENSIONS OFF)
|
||||
target_link_libraries(zeek_exe PRIVATE $<BUILD_INTERFACE:zeek_internal>)
|
||||
add_dependencies(zeek_exe zeek_autogen_files)
|
||||
set_target_properties(zeek_exe PROPERTIES RUNTIME_OUTPUT_NAME zeek)
|
||||
|
@ -238,6 +239,8 @@ endif ()
|
|||
# bundles all of our object libraries and other dependencies.
|
||||
if (ZEEK_ENABLE_FUZZERS)
|
||||
add_library(zeek_fuzzer_shared SHARED)
|
||||
target_compile_features(zeek_fuzzer_shared PRIVATE ${ZEEK_CXX_STD})
|
||||
set_target_properties(zeek_fuzzer_shared PROPERTIES CXX_EXTENSIONS OFF)
|
||||
target_link_libraries(zeek_fuzzer_shared PUBLIC $<BUILD_INTERFACE:zeek_internal>)
|
||||
# Tell zeek_target_link_libraries to add library dependencies as PUBLIC.
|
||||
set(zeek_fuzzer_shared_access PUBLIC)
|
||||
|
@ -284,7 +287,6 @@ target_include_directories(
|
|||
zeek_dynamic_plugin_base
|
||||
INTERFACE $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>)
|
||||
target_compile_features(zeek_dynamic_plugin_base INTERFACE ${ZEEK_CXX_STD})
|
||||
add_library(Zeek::DynamicPluginBase ALIAS zeek_dynamic_plugin_base)
|
||||
set_target_properties(zeek_dynamic_plugin_base PROPERTIES EXPORT_NAME DynamicPluginBase)
|
||||
install(TARGETS zeek_dynamic_plugin_base EXPORT ZeekTargets)
|
||||
|
@ -313,6 +315,11 @@ add_zeek_dynamic_plugin_build_interface_include_directories(
|
|||
${CMAKE_BINARY_DIR}/auxil/binpac/lib
|
||||
${CMAKE_BINARY_DIR}/auxil/broker/include)
|
||||
|
||||
# threading/formatters/JSON.h includes rapidjson headers and may be used
|
||||
# by external plugins, extend the include path.
|
||||
target_include_directories(zeek_dynamic_plugin_base SYSTEM
|
||||
INTERFACE $<INSTALL_INTERFACE:include/zeek/3rdparty/rapidjson/include>)
|
||||
|
||||
# Convenience function for adding an OBJECT library that feeds directly into the
|
||||
# main target(s).
|
||||
#
|
||||
|
@ -329,6 +336,8 @@ function (zeek_add_subdir_library name)
|
|||
# Create target and add the sources.
|
||||
set(target_name "zeek_${name}_obj")
|
||||
add_library(${target_name} OBJECT ${FN_ARGS_SOURCES})
|
||||
target_compile_features(${target_name} PRIVATE ${ZEEK_CXX_STD})
|
||||
set_target_properties(${target_name} PROPERTIES CXX_EXTENSIONS OFF)
|
||||
target_compile_definitions(${target_name} PRIVATE ZEEK_CONFIG_SKIP_VERSION_H)
|
||||
add_dependencies(${target_name} zeek_autogen_files)
|
||||
target_link_libraries(${target_name} PRIVATE $<BUILD_INTERFACE:zeek_internal>)
|
||||
|
@ -405,8 +414,10 @@ endif ()
|
|||
if (ENABLE_ZEEK_UNIT_TESTS)
|
||||
enable_testing()
|
||||
add_definitions(-DDOCTEST_CONFIG_SUPER_FAST_ASSERTS)
|
||||
target_compile_definitions(zeek_dynamic_plugin_base INTERFACE DOCTEST_CONFIG_SUPER_FAST_ASSERTS)
|
||||
else ()
|
||||
add_definitions(-DDOCTEST_CONFIG_DISABLE)
|
||||
target_compile_definitions(zeek_dynamic_plugin_base INTERFACE DOCTEST_CONFIG_DISABLE)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_CCACHE)
|
||||
|
@ -1049,7 +1060,6 @@ include(PCAPTests)
|
|||
include(OpenSSLTests)
|
||||
include(CheckNameserCompat)
|
||||
include(GetArchitecture)
|
||||
include(RequireCXX17)
|
||||
include(FindKqueue)
|
||||
include(FindCAres)
|
||||
include_directories(BEFORE "auxil/out_ptr/include")
|
||||
|
@ -1411,8 +1421,8 @@ message(
|
|||
"\nlibmaxminddb: ${USE_GEOIP}"
|
||||
"\nKerberos: ${USE_KRB5}"
|
||||
"\ngperftools found: ${HAVE_PERFTOOLS}"
|
||||
"\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
|
||||
"\n debugging: ${USE_PERFTOOLS_DEBUG}"
|
||||
"\n - tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
|
||||
"\n - debugging: ${USE_PERFTOOLS_DEBUG}"
|
||||
"\njemalloc: ${ENABLE_JEMALLOC}"
|
||||
"\n"
|
||||
"\nFuzz Targets: ${ZEEK_ENABLE_FUZZERS}"
|
||||
|
|
12
Makefile
12
Makefile
|
@ -8,8 +8,8 @@
|
|||
BUILD=build
|
||||
REPO=$$(cd $(CURDIR) && basename $$(git config --get remote.origin.url | sed 's/^[^:]*://g'))
|
||||
VERSION_FULL=$(REPO)-$$(cd $(CURDIR) && cat VERSION)
|
||||
VERSION_MIN=$(REPO)-$$(cd $(CURDIR) && cat VERSION)-minimal
|
||||
GITDIR=$$(test -f .git && echo $$(cut -d" " -f2 .git) || echo .git)
|
||||
REALPATH=$$($$(realpath --relative-to=$(pwd) . >/dev/null 2>&1) && echo 'realpath' || echo 'grealpath')
|
||||
|
||||
all: configured
|
||||
$(MAKE) -C $(BUILD) $@
|
||||
|
@ -39,8 +39,8 @@ livehtml:
|
|||
dist:
|
||||
@test -e ../$(VERSION_FULL) && rm -ri ../$(VERSION_FULL) || true
|
||||
@cp -R . ../$(VERSION_FULL)
|
||||
@for i in . $$(git submodule foreach -q --recursive realpath --relative-to=$$(pwd) .); do ((cd ../$(VERSION_FULL)/$$i && test -f .git && cp -R $(GITDIR) .gitnew && rm -f .git && mv .gitnew .git && sed -i.bak -e 's#[[:space:]]*worktree[[:space:]]*=[[:space:]]*.*##g' .git/config) || true); done
|
||||
@for i in . $$(git submodule foreach -q --recursive realpath --relative-to=$$(pwd) .); do (cd ../$(VERSION_FULL)/$$i && git reset -q --hard && git clean -ffdxq); done
|
||||
@for i in . $$(git submodule foreach -q --recursive $(REALPATH) --relative-to=$$(pwd) .); do ((cd ../$(VERSION_FULL)/$$i && test -f .git && cp -R $(GITDIR) .gitnew && rm -f .git && mv .gitnew .git && sed -i.bak -e 's#[[:space:]]*worktree[[:space:]]*=[[:space:]]*.*##g' .git/config) || true); done
|
||||
@for i in . $$(git submodule foreach -q --recursive $(REALPATH) --relative-to=$$(pwd) .); do (cd ../$(VERSION_FULL)/$$i && git reset -q --hard && git clean -ffdxq); done
|
||||
@(cd ../$(VERSION_FULL) && find . -name \.git\* | xargs rm -rf)
|
||||
@(cd ../$(VERSION_FULL) && find . -name \.idea -type d | xargs rm -rf)
|
||||
@(cd ../$(VERSION_FULL) && find . -maxdepth 1 -name build\* | xargs rm -rf)
|
||||
|
@ -48,11 +48,7 @@ dist:
|
|||
@mv ../$(VERSION_FULL) .
|
||||
@COPYFILE_DISABLE=true tar -czf $(VERSION_FULL).tar.gz $(VERSION_FULL)
|
||||
@echo Package: $(VERSION_FULL).tar.gz
|
||||
@mv $(VERSION_FULL) $(VERSION_MIN)
|
||||
@(cd $(VERSION_MIN) && for i in auxil/*; do rm -rf $$i/*; done)
|
||||
@COPYFILE_DISABLE=true tar -czf $(VERSION_MIN).tar.gz $(VERSION_MIN)
|
||||
@echo Package: $(VERSION_MIN).tar.gz
|
||||
@rm -rf $(VERSION_MIN)
|
||||
@rm -rf $(VERSION_FULL)
|
||||
|
||||
distclean:
|
||||
rm -rf $(BUILD)
|
||||
|
|
59
NEWS
59
NEWS
|
@ -9,9 +9,28 @@ Zeek 6.1.0
|
|||
Breaking Changes
|
||||
----------------
|
||||
|
||||
- ``assert`` is now a reserved keyword for the new ``assert`` statement.
|
||||
|
||||
New Functionality
|
||||
-----------------
|
||||
|
||||
- Added a new ``assert`` statement for assertion based testing and asserting
|
||||
runtime state.
|
||||
|
||||
assert <expr: bool>[, <message: string>];
|
||||
|
||||
This statement comes with two hooks. First, ``assertion_failure()`` that
|
||||
is invoked for every failing assert statement. Second, ``assertion_result()``
|
||||
which is invoked for every assert statement and its outcome. The latter allows
|
||||
to construct a summary of failing and passing assert statements. Both hooks
|
||||
receive the location and call stack for the ``assert`` statement via a
|
||||
``Backtrace`` vector.
|
||||
|
||||
A failing assert will abort execution of the current event handler similar
|
||||
to scripting errors. By default, a reporter error message is logged. Using
|
||||
the break statement within ``assertion_failure()`` or ``assertion_result()``
|
||||
allows to suppress the default message.
|
||||
|
||||
Changed Functionality
|
||||
---------------------
|
||||
|
||||
|
@ -46,6 +65,22 @@ Breaking Changes
|
|||
come in handy for example when working with tests that compare results against
|
||||
log baselines that have not yet been updated.
|
||||
|
||||
- Telemetry centralization and Prometheus exposition is not enabled by default
|
||||
anymore. Previously, the manager node would open port 9911/tcp by default and
|
||||
import all metrics from other nodes. For large clusters, the current implementation
|
||||
introduces significant processing overhead on the manager even if the Prometheus
|
||||
functionality is not used. While inconvenient, disable this functionality
|
||||
(assumed to be used by few as of now) by default to preserve resources.
|
||||
|
||||
The script to enable centralization and the Prometheus endpoint is now
|
||||
located in the ``policy/`` folder. Re-enable the old functionality with:
|
||||
|
||||
@load frameworks/telemetry/prometheus
|
||||
|
||||
You may experiment with increasing ``Broker::metrics_export_interval``
|
||||
(default 1s) to reduce the extra overhead and communication at the expense
|
||||
of stale metrics.
|
||||
|
||||
- Custom source tarballs require a ``repo-info.json`` file.
|
||||
|
||||
Note, should you be using official Zeek release tarballs only, or build
|
||||
|
@ -109,6 +144,19 @@ New Functionality
|
|||
|
||||
To disable this functionality, pass ``--disable-javascript`` to configure.
|
||||
|
||||
- Zeek now comes with Spicy support built in, meaning it can now
|
||||
leverage any analyzers written in Spicy out of the box. While the
|
||||
interface layer connecting Zeek and Spicy used to be implemented
|
||||
through an external Zeek plugin, that code has now moved into the
|
||||
Zeek code base itself. We also added infrastructure to Zeek that
|
||||
enables its built-in standard analyzers to use Spicy instead of
|
||||
Binpac. As initial (simple) examples, Zeek's Syslog and Finger
|
||||
analyzers are now implemented in Spicy. While their legacy versions
|
||||
remain available as fallbacks for now in case Spicy gets explicitly
|
||||
disabled at build time, their use is deprecated and their code won't
|
||||
be maintained any further. (Some of these Spicy updates were part of
|
||||
Zeek 5.2 already, but hadn't been included in its NEWS section.)
|
||||
|
||||
- Zeek events now hold network timestamps. For scheduled events, the timestamp
|
||||
represents the network time for which the event was scheduled for, otherwise
|
||||
it is the network time at event creation. A new bif ``current_event_time()``
|
||||
|
@ -260,7 +308,7 @@ New Functionality
|
|||
recognize CCMP-encrypted packets. These encrypted packets are currently
|
||||
dropped to Zeek's inability to do anything with them.
|
||||
|
||||
- Add packet analzyers for LLC, SNAP, and Novell 802.3, called from the Ethernet
|
||||
- Add packet analyzers for LLC, SNAP, and Novell 802.3, called from the Ethernet
|
||||
and VLAN analyzers by default.
|
||||
|
||||
- Environment variables for the execution of log rotation postprocessors can
|
||||
|
@ -290,6 +338,9 @@ New Functionality
|
|||
Changed Functionality
|
||||
---------------------
|
||||
|
||||
- The base distribution of the Zeek container images has been upgraded to
|
||||
Debian 12 "bookworm" and JavaScript support was enabled.
|
||||
|
||||
- When ``get_file_handle()`` is invoked for an analyzer that did not register
|
||||
an appropriate callback function, log a warning and return a generic handle
|
||||
value based on the analyzer and connection information.
|
||||
|
@ -2381,7 +2432,7 @@ Changed Functionality
|
|||
- The IOSource API changed fairly wildly. The ``GetFds()`` and
|
||||
``NextTimestamp`` methods no longer exist. If you had previously
|
||||
implemented a custom IOSource, you will need to look at the new API
|
||||
and make changes to your code to accomodate it. This does not include
|
||||
and make changes to your code to accommodate it. This does not include
|
||||
packet sources, which should remain functional with little to no
|
||||
changes, since the entirety of the changes should be in ``PktSrc``.
|
||||
|
||||
|
@ -4033,7 +4084,7 @@ Changed Functionality
|
|||
|
||||
- HTTP
|
||||
|
||||
Removed 'filename' field (which was seldomly used).
|
||||
Removed 'filename' field (which was seldom used).
|
||||
|
||||
New 'orig_filenames' and 'resp_filenames' fields which each
|
||||
contain a vector of filenames seen in entities transferred.
|
||||
|
@ -5019,7 +5070,7 @@ New Functionality
|
|||
Instead of adding a separate worker entry in node.cfg for each Bro
|
||||
worker process on each worker host, it is now possible to just
|
||||
specify the number of worker processes on each host and BroControl
|
||||
configures everything correctly (including any necessary enviroment
|
||||
configures everything correctly (including any necessary environment
|
||||
variables for the balancers).
|
||||
|
||||
This change adds three new keywords to the node.cfg file (to be used
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
6.1.0-dev.4
|
||||
6.1.0-dev.115
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 3df48de38ef75a5d274c2fa59ad3f798a62c6bfc
|
||||
Subproject commit 2a1d3232b75b530a0cd7df4b376ca3608fe8b9df
|
|
@ -1 +1 @@
|
|||
Subproject commit ec87b43037dba50648cb93be8940a4db23658905
|
||||
Subproject commit 6f4375867083805513a70feb32a626df40039256
|
|
@ -1 +1 @@
|
|||
Subproject commit 01c54f8b385c42ac82553fc8e18b28b22f7be62a
|
||||
Subproject commit 42341843df09dd7867b8b4ce3059ebd3ebd67278
|
|
@ -1 +1 @@
|
|||
Subproject commit e77634d5f92db96e66de0c36ddc4d44893306fa7
|
||||
Subproject commit c1cb44eb709e15ef16844c6a8648ed35017409e1
|
|
@ -2,7 +2,7 @@ FROM opensuse/tumbleweed
|
|||
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230523
|
||||
ENV DOCKERFILE_VERSION 20230620
|
||||
|
||||
# Remove the repo-openh264 repository, it caused intermittent issues
|
||||
# and we should not be needing any packages from it.
|
||||
|
|
|
@ -2,7 +2,7 @@ FROM ubuntu:22.10
|
|||
|
||||
ENV DEBIAN_FRONTEND="noninteractive" TZ="America/Los_Angeles"
|
||||
|
||||
# A version field to invalide Cirrus's build cache when needed, as suggested in
|
||||
# A version field to invalidate Cirrus's build cache when needed, as suggested in
|
||||
# https://github.com/cirruslabs/cirrus-ci-docs/issues/544#issuecomment-566066822
|
||||
ENV DOCKERFILE_VERSION 20230413
|
||||
|
||||
|
|
|
@ -4,4 +4,4 @@ echo %ZEEK_CI_CPUS%
|
|||
wmic cpu get NumberOfCores, NumberOfLogicalProcessors/Format:List
|
||||
systeminfo
|
||||
dir C:
|
||||
choco list --localonly
|
||||
choco list
|
||||
|
|
2
cmake
2
cmake
|
@ -1 +1 @@
|
|||
Subproject commit 4e41cdd77f0aa617c23f37b4776a1ba5c4ea4ea3
|
||||
Subproject commit afa62ecbe399c3dac41f6ebcdb622f409569edd6
|
2
doc
2
doc
|
@ -1 +1 @@
|
|||
Subproject commit 8a0873c71095136ef1f611a01bf936f7a2805aed
|
||||
Subproject commit e479f28d2263ae3c452567a52ef613f144191f08
|
|
@ -1,7 +1,7 @@
|
|||
# See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
# Layer to build Zeek.
|
||||
FROM debian:bullseye-slim
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
RUN echo 'Acquire::Retries "3";' > /etc/apt/apt.conf.d/80-retries
|
||||
|
||||
|
@ -18,15 +18,20 @@ RUN apt-get -q update \
|
|||
git \
|
||||
libfl2 \
|
||||
libfl-dev \
|
||||
libnode-dev \
|
||||
libmaxminddb-dev \
|
||||
libpcap-dev \
|
||||
libssl-dev \
|
||||
libuv1-dev \
|
||||
libz-dev \
|
||||
make \
|
||||
python3-minimal \
|
||||
python3.9-dev \
|
||||
python3.11-dev \
|
||||
swig \
|
||||
ninja-build \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Tell git all the repositories are safe.
|
||||
RUN git config --global --add safe.directory '*'
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
# Final layer containing all artifacts.
|
||||
FROM debian:bullseye-slim
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
RUN echo 'Acquire::Retries "3";' > /etc/apt/apt.conf.d/80-retries
|
||||
|
||||
|
@ -10,9 +10,11 @@ RUN apt-get -q update \
|
|||
ca-certificates \
|
||||
git \
|
||||
libmaxminddb0 \
|
||||
libpython3.9 \
|
||||
libnode108 \
|
||||
libpython3.11 \
|
||||
libpcap0.8 \
|
||||
libssl1.1 \
|
||||
libssl3 \
|
||||
libuv1 \
|
||||
libz1 \
|
||||
python3-minimal \
|
||||
python3-git \
|
||||
|
|
|
@ -211,12 +211,6 @@ export {
|
|||
## Returns: The :zeek:type:`Cluster::NodeType` the calling node acts as.
|
||||
global local_node_type: function(): NodeType;
|
||||
|
||||
## This gives the value for the number of workers currently connected to,
|
||||
## and it's maintained internally by the cluster framework. It's
|
||||
## primarily intended for use by managers to find out how many workers
|
||||
## should be responding to requests.
|
||||
global worker_count: count = 0 &deprecated="Remove in v6.1. Active worker count can be obtained via get_active_node_count(Cluster::WORKER)";
|
||||
|
||||
## The cluster layout definition. This should be placed into a filter
|
||||
## named cluster-layout.zeek somewhere in the ZEEKPATH. It will be
|
||||
## automatically loaded if the CLUSTER_NODE environment variable is set.
|
||||
|
@ -347,8 +341,6 @@ function nodeid_topic(id: string): string
|
|||
return nodeid_topic_prefix + id + "/";
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
|
||||
event Cluster::hello(name: string, id: string) &priority=10
|
||||
{
|
||||
if ( name !in nodes )
|
||||
|
@ -374,11 +366,6 @@ event Cluster::hello(name: string, id: string) &priority=10
|
|||
if ( n$node_type !in active_node_ids )
|
||||
active_node_ids[n$node_type] = set();
|
||||
add active_node_ids[n$node_type][id];
|
||||
|
||||
@pragma push ignore-deprecations
|
||||
if ( n$node_type == WORKER )
|
||||
worker_count = get_active_node_count(WORKER);
|
||||
@pragma pop ignore-deprecations
|
||||
}
|
||||
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
|
@ -400,11 +387,6 @@ event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=1
|
|||
delete n$id;
|
||||
delete active_node_ids[n$node_type][endpoint$id];
|
||||
|
||||
@pragma push ignore-deprecations
|
||||
if ( n$node_type == WORKER )
|
||||
worker_count = get_active_node_count(WORKER);
|
||||
@pragma pop ignore-deprecations
|
||||
|
||||
event Cluster::node_down(node_name, endpoint$id);
|
||||
break;
|
||||
}
|
||||
|
@ -423,8 +405,6 @@ event zeek_init() &priority=5
|
|||
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster", $policy=log_policy]);
|
||||
}
|
||||
|
||||
@endif
|
||||
|
||||
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
|
||||
{
|
||||
local info = stores[name];
|
||||
|
|
|
@ -22,18 +22,26 @@ redef Log::default_rotation_interval = 1 hrs;
|
|||
## Alarm summary mail interval.
|
||||
redef Log::default_mail_alarms_interval = 24 hrs;
|
||||
|
||||
## Generic log metadata rendered into the filename that zeek-archiver may interpret.
|
||||
## This is populated with a log_suffix entry within zeek_init() when multiple
|
||||
## logger nodes are defined in cluster-layout.zeek.
|
||||
global log_metadata: table[string] of string;
|
||||
## Generic log metadata rendered into filename that zeek-archiver may interpret.
|
||||
global archiver_log_metadata: table[string] of string &redef;
|
||||
|
||||
# Populate archiver_log_metadata with a "log_suffix" entry when multiple
|
||||
# loggers are configured in Cluster::nodes. Need to evaluate at script
|
||||
# loading time as leftover-log-rotation functionality is invoking
|
||||
# archiver_rotation_format_func early on during InitPostScript().
|
||||
@if ( Cluster::get_node_count(Cluster::LOGGER) > 1 )
|
||||
redef archiver_log_metadata += {
|
||||
["log_suffix"] = Cluster::node,
|
||||
};
|
||||
@endif
|
||||
|
||||
## Encode the given table as zeek-archiver understood metadata part.
|
||||
function encode_log_metadata(tbl: table[string] of string): string
|
||||
function archiver_encode_log_metadata(tbl: table[string] of string): string
|
||||
{
|
||||
local metadata_vec: vector of string;
|
||||
for ( k, v in log_metadata )
|
||||
for ( k, v in tbl )
|
||||
{
|
||||
if ( |v| == 0 ) # Assume concious decision to skip this entry.
|
||||
if ( |v| == 0 ) # Assume conscious decision to skip this entry.
|
||||
next;
|
||||
|
||||
if ( /[,=]/ in k || /[,=]/ in v )
|
||||
|
@ -57,8 +65,8 @@ function archiver_rotation_format_func(ri: Log::RotationFmtInfo): Log::RotationP
|
|||
local close_str = strftime(Log::default_rotation_date_format, ri$close);
|
||||
local base = fmt("%s__%s__%s__", ri$path, open_str, close_str);
|
||||
|
||||
if ( |log_metadata| > 0 )
|
||||
base = fmt("%s%s__", base, encode_log_metadata(log_metadata));
|
||||
if ( |archiver_log_metadata| > 0 )
|
||||
base = fmt("%s%s__", base, archiver_encode_log_metadata(archiver_log_metadata));
|
||||
|
||||
local rval = Log::RotationPath($file_basename=base);
|
||||
return rval;
|
||||
|
@ -71,15 +79,6 @@ redef Log::default_rotation_dir = "log-queue";
|
|||
redef Log::rotation_format_func = archiver_rotation_format_func;
|
||||
|
||||
redef LogAscii::enable_leftover_log_rotation = T;
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
if ( "log_suffix" in log_metadata )
|
||||
return;
|
||||
|
||||
if ( Cluster::get_node_count(Cluster::LOGGER) > 1 )
|
||||
log_metadata["log_suffix"] = Cluster::node;
|
||||
}
|
||||
@else
|
||||
|
||||
## Use the cluster's archive logging script.
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
@deprecated "Remove in v6.1 - now loaded automatically";
|
||||
|
||||
@load base/frameworks/analyzer
|
|
@ -54,14 +54,6 @@ export {
|
|||
## This option is also available as a per-filter ``$config`` option.
|
||||
const gzip_file_extension = "gz" &redef;
|
||||
|
||||
## Define the default logging directory. If empty, logs are written
|
||||
## to the current working directory.
|
||||
##
|
||||
## This setting is superseded by :zeek:see:`Log::default_logdir`. The
|
||||
## latter applies to all file writers and also interacts better with
|
||||
## log rotation.
|
||||
const logdir = "" &redef &deprecated="Remove in v6.1. Use 'Log::default_logdir'.";
|
||||
|
||||
## Format of timestamps when writing out JSON. By default, the JSON
|
||||
## formatter will use double values for timestamps which represent the
|
||||
## number of seconds from the UNIX epoch.
|
||||
|
|
|
@ -167,7 +167,7 @@ export {
|
|||
##
|
||||
## For example, a search for 192.168.17.0/8 will reveal a rule that exists for
|
||||
## 192.168.0.0/16, since this rule affects the subnet. However, it will not reveal
|
||||
## a more specific rule for 192.168.17.1/32, which does not directy affect the whole
|
||||
## a more specific rule for 192.168.17.1/32, which does not directly affect the whole
|
||||
## subnet.
|
||||
##
|
||||
## This function works on both the manager and workers of a cluster. Note that on
|
||||
|
|
|
@ -584,15 +584,6 @@ function is_being_suppressed(n: Notice::Info): bool
|
|||
return F;
|
||||
}
|
||||
|
||||
# Executes a script with all of the notice fields put into the
|
||||
# new process' environment as "ZEEK_ARG_<field>" variables.
|
||||
function execute_with_notice(cmd: string, n: Notice::Info) &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
# TODO: fix system calls
|
||||
#local tgs = tags(n);
|
||||
#system_env(cmd, tags);
|
||||
}
|
||||
|
||||
function create_file_info(f: fa_file): Notice::FileInfo
|
||||
{
|
||||
local fi: Notice::FileInfo = Notice::FileInfo($fuid = f$id,
|
||||
|
|
|
@ -148,13 +148,6 @@ event zeek_init() &priority=5
|
|||
Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy]);
|
||||
}
|
||||
|
||||
# Returns true if the given signature has already been triggered for the given
|
||||
# [orig, resp] pair.
|
||||
function has_signature_matched(id: string, orig: addr, resp: addr): bool &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
return [orig, resp] in vert_table ? id in vert_table[orig, resp] : F;
|
||||
}
|
||||
|
||||
event sig_summary(orig: addr, id: string, msg: string)
|
||||
{
|
||||
NOTICE([$note=Signature_Summary, $src=orig,
|
||||
|
|
|
@ -454,11 +454,6 @@ function cmp_versions(v1: Version, v2: Version): int
|
|||
return 0;
|
||||
}
|
||||
|
||||
function software_endpoint_name(id: conn_id, host: addr): string &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
return fmt("%s %s", host, (host == id$orig_h ? "client" : "server"));
|
||||
}
|
||||
|
||||
# Convert a version into a string "a.b.c-x". Marked "&is_used" because
|
||||
# while the base scripts don't call it, the optional policy/ scripts do.
|
||||
function software_fmt_version(v: Version): string &is_used
|
||||
|
|
|
@ -71,18 +71,6 @@ function data_added(ss: SumStat, key: Key, result: Result)
|
|||
threshold_crossed(ss, key, result);
|
||||
}
|
||||
|
||||
function request(ss_name: string): ResultTable &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
# This only needs to be implemented this way for cluster compatibility.
|
||||
return when [ss_name] ( T )
|
||||
{
|
||||
if ( ss_name in result_store )
|
||||
return result_store[ss_name];
|
||||
else
|
||||
return table();
|
||||
}
|
||||
}
|
||||
|
||||
function request_key(ss_name: string, key: Key): Result
|
||||
{
|
||||
# This only needs to be implemented this way for cluster compatibility.
|
||||
|
|
|
@ -26,7 +26,7 @@ export {
|
|||
}
|
||||
|
||||
redef record ResultVal += {
|
||||
# Internal use only. This is not meant to be publically available
|
||||
# Internal use only. This is not meant to be publicly available
|
||||
# because probabilistic data structures have to be examined using
|
||||
# specialized bifs.
|
||||
card: opaque of cardinality &optional;
|
||||
|
|
|
@ -26,7 +26,7 @@ export {
|
|||
}
|
||||
|
||||
redef record ResultVal += {
|
||||
# Internal use only. This is not meant to be publically available
|
||||
# Internal use only. This is not meant to be publicly available
|
||||
# and just a copy of num_samples from the Reducer. Needed for
|
||||
# availability in the compose hook.
|
||||
num_samples: count &default=0;
|
||||
|
|
|
@ -23,11 +23,6 @@ function calc_std_dev(rv: ResultVal)
|
|||
rv$std_dev = sqrt(rv$variance);
|
||||
}
|
||||
|
||||
hook std_dev_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
calc_std_dev(rv);
|
||||
}
|
||||
|
||||
hook register_observe_plugins() &priority=-10
|
||||
{
|
||||
register_observe_plugin(STD_DEV, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
|
||||
|
|
|
@ -28,7 +28,7 @@ redef record ResultVal += {
|
|||
# set in the reducer.
|
||||
unique_max: count &optional;
|
||||
|
||||
# Internal use only. This is not meant to be publically available
|
||||
# Internal use only. This is not meant to be publicly available
|
||||
# because we don't want to trust that we can inspect the values
|
||||
# since we will likely move to a probabilistic data structure in the future.
|
||||
# TODO: in the future this will optionally be a hyperloglog structure
|
||||
|
|
|
@ -56,9 +56,6 @@ export {
|
|||
## Additional script filenames/paths that the node should load
|
||||
## after any user-specified scripts.
|
||||
addl_user_scripts: vector of string &default = vector();
|
||||
## The former name of addl_user_scripts.
|
||||
scripts: vector of string &default = vector()
|
||||
&deprecated="Remove in 6.1. Use the addl_user_scripts field instead.";
|
||||
## Environment variables to define in the supervised node.
|
||||
env: table[string] of string &default=table();
|
||||
## A cpu/core number to which the node will try to pin itself.
|
||||
|
|
|
@ -1,7 +1,3 @@
|
|||
@load ./main
|
||||
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@load ./cluster
|
||||
@endif
|
||||
|
|
|
@ -623,10 +623,6 @@ type fa_metadata: record {
|
|||
inferred: bool &default=T;
|
||||
};
|
||||
|
||||
## Same as :zeek:see:`Analyzer::disabling_analyzer`, but deprecated due
|
||||
## to living in the global namespace.
|
||||
type disabling_analyzer: hook(c: connection, atype: AllAnalyzers::Tag, aid: count) &redef &deprecated="Remove in v6.1. Use Analyzer::disabling_analyzer() instead.";
|
||||
|
||||
module Analyzer;
|
||||
export {
|
||||
## A hook taking a connection, analyzer tag and analyzer id that can be
|
||||
|
@ -942,6 +938,45 @@ type BacktraceElement: record {
|
|||
## .. zeek:see:: backtrace print_backtrace
|
||||
type Backtrace: vector of BacktraceElement;
|
||||
|
||||
## A hook that is invoked when an assert statement fails.
|
||||
##
|
||||
## By default, a reporter error message is logged describing the failing
|
||||
## assert similarly to how scripting errors are reported after invoking
|
||||
## this hook. Using the :zeek:see:`break` statement in an assertion_failure
|
||||
## hook handler allows to suppress this message.
|
||||
##
|
||||
## cond: The string representation of the condition.
|
||||
##
|
||||
## msg: Evaluated message as string given to the assert statement.
|
||||
##
|
||||
## bt: Backtrace of the assertion error. The top element will contain
|
||||
## the location of the assert statement that failed.
|
||||
##
|
||||
## .. zeek:see:: assertion_result
|
||||
type assertion_failure: hook(cond: string, msg: string, bt: Backtrace);
|
||||
|
||||
## A hook that is invoked with the result of every assert statement.
|
||||
##
|
||||
## This is a potentially expensive hook meant to be used by testing
|
||||
## frameworks to summarize assert results. In a production setup,
|
||||
## this hook is likely detrimental to performance.
|
||||
##
|
||||
## Using the :zeek:see:`break` statement within an assertion_failure hook
|
||||
## handler allows to suppress the reporter error message generated for
|
||||
## failing assert statements.
|
||||
##
|
||||
## result: The result of evaluating **cond**.
|
||||
##
|
||||
## cond: The string representation of the condition.
|
||||
##
|
||||
## msg: Evaluated message as string given to the assert statement.
|
||||
##
|
||||
## bt: Backtrace of the assertion error. The top element will contain
|
||||
## the location of the assert statement that failed.
|
||||
##
|
||||
## .. zeek:see:: assertion_failure
|
||||
type assertion_result: hook(result: bool, cond: string, msg: string, bt: Backtrace);
|
||||
|
||||
# todo:: Do we still need these here? Can they move into the packet filter
|
||||
# framework?
|
||||
#
|
||||
|
@ -4741,7 +4776,6 @@ export {
|
|||
type SNMP::BulkPDU: record {
|
||||
request_id: int;
|
||||
non_repeaters: count;
|
||||
max_repititions: count &deprecated="Remove in v6.1. Use max_repetitions instead";
|
||||
max_repetitions: count;
|
||||
bindings: SNMP::Bindings;
|
||||
};
|
||||
|
@ -5162,31 +5196,16 @@ export {
|
|||
## Setting this to zero will disable all types of tunnel decapsulation.
|
||||
const max_depth: count = 2 &redef;
|
||||
|
||||
## Toggle whether to do IPv{4,6}-in-IPv{4,6} decapsulation.
|
||||
const enable_ip = T &redef &deprecated="Remove in v6.1. Tunnel analyzers can be toggled with enable_analyzer()/disable_analyzer() or disabled through Analyzer::disabled_analyzers";
|
||||
|
||||
## Toggle whether to do IPv{4,6}-in-AYIYA decapsulation.
|
||||
const enable_ayiya = T &redef &deprecated="Remove in v6.1. Tunnel analyzers can be toggled with enable_analyzer()/disable_analyzer() or disabled through Analyzer::disabled_analyzers";
|
||||
|
||||
## Toggle whether to do IPv6-in-Teredo decapsulation.
|
||||
const enable_teredo = T &redef &deprecated="Remove in v6.1. Tunnel analyzers can be toggled with enable_analyzer()/disable_analyzer() or disabled through Analyzer::disabled_analyzers";
|
||||
|
||||
## Toggle whether to do GTPv1 decapsulation.
|
||||
const enable_gtpv1 = T &redef &deprecated="Remove in v6.1. Tunnel analyzers can be toggled with enable_analyzer()/disable_analyzer() or disabled through Analyzer::disabled_analyzers";
|
||||
|
||||
## Toggle whether to do GRE decapsulation.
|
||||
const enable_gre = T &redef &deprecated="Remove in v6.1. Tunnel analyzers can be toggled with enable_analyzer()/disable_analyzer() or disabled through Analyzer::disabled_analyzers";
|
||||
|
||||
## With this set, the Teredo analyzer waits until it sees both sides
|
||||
## of a connection using a valid Teredo encapsulation before issuing
|
||||
## a :zeek:see:`analyzer_confirmation`. If it's false, the first
|
||||
## a :zeek:see:`analyzer_confirmation_info`. If it's false, the first
|
||||
## occurrence of a packet with valid Teredo encapsulation causes a
|
||||
## confirmation.
|
||||
const delay_teredo_confirmation = T &redef;
|
||||
|
||||
## With this set, the GTP analyzer waits until the most-recent upflow
|
||||
## and downflow packets are a valid GTPv1 encapsulation before
|
||||
## issuing :zeek:see:`analyzer_confirmation`. If it's false, the
|
||||
## issuing :zeek:see:`analyzer_confirmation_info`. If it's false, the
|
||||
## first occurrence of a packet with valid GTPv1 encapsulation causes
|
||||
## confirmation. Since the same inner connection can be carried
|
||||
## differing outer upflow/downflow connections, setting to false
|
||||
|
|
|
@ -283,6 +283,8 @@ export {
|
|||
["86d35949-83c9-4044-b424-db363231fd0c",0x0f] = "SchRpcScheduledRuntimes",
|
||||
["86d35949-83c9-4044-b424-db363231fd0c",0x10] = "SchRpcGetLastRunInfo",
|
||||
["86d35949-83c9-4044-b424-db363231fd0c",0x11] = "SchRpcGetTaskInfo",
|
||||
["86d35949-83c9-4044-b424-db363231fd0c",0x12] = "SchRpcGetNumberOfMissedRuns",
|
||||
["86d35949-83c9-4044-b424-db363231fd0c",0x13] = "SchRpcEnableTask",
|
||||
|
||||
# IObjectExporter
|
||||
["99fcfec4-5260-101b-bbcb-00aa0021347a",0x00] = "ResolveOxid",
|
||||
|
@ -306,6 +308,16 @@ export {
|
|||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x07] = "NspiDNToEph",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x08] = "NspiGetPropList",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x09] = "NspiGetProps",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x0a] = "NspiCompareMIds",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x0b] = "NspiModProps",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x0c] = "NspiGetSpecialTable",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x0d] = "NspiGetTemplateInfo",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x0e] = "NspiModLinkAtt",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x10] = "NspiQueryColumns",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x11] = "NspiGetNamesFromIDs",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x12] = "NspiGetIDsFromNames",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x13] = "NspiResolveNames",
|
||||
["f5cc5a18-4264-101a-8c59-08002b2f8426",0x14] = "NspiResolveNamesW",
|
||||
|
||||
# IWbemServices
|
||||
["9556dc99-828c-11cf-a37e-00aa003240c7",0x03] = "OpenNamespace",
|
||||
|
@ -611,6 +623,12 @@ export {
|
|||
["12345678-1234-abcd-ef00-0123456789ab",0x66] = "RpcCorePrinterDriverInstalled",
|
||||
["12345678-1234-abcd-ef00-0123456789ab",0x67] = "RpcGetPrinterDriverPackagePath",
|
||||
["12345678-1234-abcd-ef00-0123456789ab",0x68] = "RpcReportJobProcessingProgress",
|
||||
["12345678-1234-abcd-ef00-0123456789ab",0x6e] = "RpcGetJobNamedPropertyValue",
|
||||
["12345678-1234-abcd-ef00-0123456789ab",0x6f] = "RpcSetJobNamedProperty",
|
||||
["12345678-1234-abcd-ef00-0123456789ab",0x70] = "RpcDeleteJobNamedProperty",
|
||||
["12345678-1234-abcd-ef00-0123456789ab",0x71] = "RpcEnumJobNamedProperties",
|
||||
["12345678-1234-abcd-ef00-0123456789ab",0x74] = "RpcLogJobInfoForBranchOffice",
|
||||
|
||||
|
||||
["4b324fc8-1670-01d3-1278-5a47bf6ee188",0x00] = "NetrCharDevEnum",
|
||||
["4b324fc8-1670-01d3-1278-5a47bf6ee188",0x01] = "NetrCharDevGetInfo",
|
||||
|
@ -740,6 +758,7 @@ export {
|
|||
["12345778-1234-abcd-ef00-0123456789ac",0x43] = "SamrValidatePassword",
|
||||
["12345778-1234-abcd-ef00-0123456789ac",0x44] = "SamrQueryLocalizableAccountsInDomain",
|
||||
["12345778-1234-abcd-ef00-0123456789ac",0x45] = "SamrPerformGenericOperation",
|
||||
["12345778-1234-abcd-ef00-0123456789ac",0x49] = "SamrUnicodeChangePasswordUser4",
|
||||
|
||||
["338cd001-2244-31f1-aaaa-900038001003",0x00] = "OpenClassesRoot",
|
||||
["338cd001-2244-31f1-aaaa-900038001003",0x01] = "OpenCurrentUser",
|
||||
|
@ -847,6 +866,9 @@ export {
|
|||
["367abb81-9844-35f1-ad32-98f038001003",0x34] = "ScSendPnPMessage",
|
||||
["367abb81-9844-35f1-ad32-98f038001003",0x35] = "ScValidatePnPService",
|
||||
["367abb81-9844-35f1-ad32-98f038001003",0x36] = "ScOpenServiceStatusHandle",
|
||||
["367abb81-9844-35f1-ad32-98f038001003",0x38] = "QueryServiceConfigEx",
|
||||
["367abb81-9844-35f1-ad32-98f038001003",0x3c] = "CreateWowService",
|
||||
["367abb81-9844-35f1-ad32-98f038001003",0x40] = "OpenSCManager2",
|
||||
|
||||
# browser
|
||||
["6bffd098-a112-3610-9833-012892020162",0x00] = "BrowserrServerEnum",
|
||||
|
@ -1132,6 +1154,8 @@ export {
|
|||
["82273fdc-e32a-18c3-3f78-827929dc23ea",0x16] = "ElfrGetLogInformation",
|
||||
["82273fdc-e32a-18c3-3f78-827929dc23ea",0x17] = "ElfrFlushEL",
|
||||
["82273fdc-e32a-18c3-3f78-827929dc23ea",0x18] = "ElfrReportEventAndSourceW",
|
||||
["82273fdc-e32a-18c3-3f78-827929dc23ea",0x19] = "ElfrReportEventExW",
|
||||
["82273fdc-e32a-18c3-3f78-827929dc23ea",0x1a] = "ElfrReportEventExA",
|
||||
|
||||
# ISeclogon
|
||||
["12b81e99-f207-4a4c-85d3-77b42f76fd14",0x00] = "SeclCreateProcessWithLogonW",
|
||||
|
@ -1265,7 +1289,9 @@ export {
|
|||
["12345778-1234-abcd-ef00-0123456789ab",0x5c] = "CredReadByTokenHandle",
|
||||
["12345778-1234-abcd-ef00-0123456789ab",0x5d] = "CredrRestoreCredentials",
|
||||
["12345778-1234-abcd-ef00-0123456789ab",0x5e] = "CredrBackupCredentials",
|
||||
|
||||
["12345778-1234-abcd-ef00-0123456789ab",0x81] = "LsarCreateTrustedDomainEx3",
|
||||
["12345778-1234-abcd-ef00-0123456789ab",0x82] = "LsarOpenPolicy3",
|
||||
["12345778-1234-abcd-ef00-0123456789ab",0x85] = "LsarSetForestTrustInformation2",
|
||||
# msgsvc
|
||||
["17fdd703-1827-4e34-79d4-24a55c53bb37",0x00] = "NetrMessageNameAdd",
|
||||
["17fdd703-1827-4e34-79d4-24a55c53bb37",0x01] = "NetrMessageNameEnum",
|
||||
|
@ -1363,6 +1389,15 @@ export {
|
|||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x07] = "DnssrvComplexOperation2",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x08] = "DnssrvEnumRecords2",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x09] = "DnssrvUpdateRecord2",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x0a] = "DnssrvUpdateRecord3",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x0b] = "DnssrvEnumRecords3",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x0c] = "DnssrvOperation3",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x0d] = "DnssrvQuery3",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x0e] = "DnssrvComplexOperation3",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x0f] = "DnssrvOperation4",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x10] = "DnssrvQuery4",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x11] = "DnssrvUpdateRecord4",
|
||||
["50abc2a4-574d-40b3-9d66-ee4fd5fba076",0x12] = "DnssrvEnumRecords4",
|
||||
|
||||
# lls_license
|
||||
["57674cd0-5200-11ce-a897-08002b2e9c6d",0x00] = "LlsrLicenseRequestW",
|
||||
|
@ -1487,6 +1522,9 @@ export {
|
|||
["4fc742e0-4a10-11cf-8273-00aa004ae673",0x14] = "NetrDfsRemove2",
|
||||
["4fc742e0-4a10-11cf-8273-00aa004ae673",0x15] = "NetrDfsEnumEx",
|
||||
["4fc742e0-4a10-11cf-8273-00aa004ae673",0x16] = "NetrDfsSetInfo2",
|
||||
["4fc742e0-4a10-11cf-8273-00aa004ae673",0x17] = "NetrDfsAddRootTarget",
|
||||
["4fc742e0-4a10-11cf-8273-00aa004ae673",0x18] = "NetrDfsRemoveRootTarget",
|
||||
["4fc742e0-4a10-11cf-8273-00aa004ae673",0x19] = "NetrDfsGetSupportedNamespaceVersion",
|
||||
|
||||
# sfcapi
|
||||
["83da7c00-e84f-11d2-9807-00c04f8ec850",0x00] = "SfcSrv_GetNextProtectedFile",
|
||||
|
@ -1609,6 +1647,12 @@ export {
|
|||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x16] = "DRSReplicaVerifyObjects",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x17] = "DRSGetObjectExistence",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x18] = "DRSQuerySitesByCost",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x19] = "IDL_DRSInitDemotion",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x1a] = "IDL_DRSReplicaDemotion",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x1b] = "IDL_DRSFinishDemotion",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x1c] = "IDL_DRSAddCloneDC",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x1d] = "IDL_DRSWriteNgcKey",
|
||||
["e3514235-4b06-11d1-ab04-00c04fc2dcd2",0x1e] = "IDL_DRSReadNgcKey",
|
||||
|
||||
# winspipe
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x00] = "R_WinsRecordAction",
|
||||
|
@ -1628,9 +1672,12 @@ export {
|
|||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x0e] = "R_WinsGetBrowserNames_Old",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x0f] = "R_WinsDeleteWins",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x10] = "R_WinsSetFlags",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x11] = "R_WinsGetDbRecsByName",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x12] = "R_WinsStatusWHdl",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x13] = "R_WinsDoScavengingNew",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x11] = "R_WinsGetBrowserNames",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x12] = "R_WinsGetDbRecsByName",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x13] = "R_WinsStatusNew",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x14] = "R_WinsStatusWHdl",
|
||||
["45f52c28-7f9f-101a-b52b-08002b2efabe",0x15] = "R_WinsDoScavengingNew",
|
||||
|
||||
|
||||
# mgmt
|
||||
["afa8bd80-7d8a-11c9-bef4-08002b102989",0x00] = "inq_if_ids",
|
||||
|
@ -1761,6 +1808,64 @@ export {
|
|||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x6A] = "ApiClusterControl",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x6B] = "ApiUnblockGetNotifyCall",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x6C] = "ApiSetServiceAccountPassword",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x6D] = "ApiSetResourceDependencyExpression",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x6E] = "ApiGetResourceDependencyExpression",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x70] = "ApiGetResourceNetworkName",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x71] = "ApiExecuteBatch",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x72] = "ApiCreateBatchPort",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x73] = "ApiGetBatchNotification",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x74] = "ApiCloseBatchPort",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x75] = "ApiOpenClusterEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x76] = "ApiOpenNodeEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x77] = "ApiOpenGroupEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x78] = "ApiOpenResourceEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x79] = "ApiOpenNetworkEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x7A] = "ApiOpenNetInterfaceEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x7B] = "ApiChangeCsvState",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x7C] = "ApiCreateNodeEnumEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x7D] = "ApiCreateEnumEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x7E] = "ApiPauseNodeEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x7F] = "ApiPauseNodeWithDrainTarget",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x80] = "ApiResumeNodeEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x81] = "ApiCreateGroupEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x82] = "ApiOnlineGroupEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x83] = "ApiOfflineGroupEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x84] = "ApiMoveGroupEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x85] = "ApiMoveGroupToNodeEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x86] = "ApiCancelClusterGroupOperation",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x87] = "ApiOnlineResourceEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x88] = "ApiOfflineResourceEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x89] = "ApiCreateNotifyV2",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x8A] = "ApiAddNotifyV2",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x8B] = "ApiGetNotifyV2",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x8F] = "ApiCreateGroupEnum",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x90] = "ApiCreateResourceEnum",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x91] = "ApiExecuteReadBatch",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x92] = "ApiRestartResource",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x93] = "ApiGetNotifyAsync",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x9B] = "ApiAddNotifyResourceTypeV2",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0x9D] = "ApiExecuteReadBatchEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xA3] = "ApiCreateGroupSet",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xA4] = "ApiOpenGroupSet",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xA5] = "ApiCloseGroupSet",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xA6] = "ApiDeleteGroupSet",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xA7] = "ApiAddGroupToGroupSet",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xA8] = "ApiRemoveGroupFromGroupSet",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xA9] = "ApiMoveGroupToGroupSet",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xAB] = "ApiAddGroupSetDependency",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xAC] = "ApiAddGroupToGroupSetDependency",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xAD] = "ApiNodeGroupSetControl",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xAE] = "ApiGroupSetControl",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xAF] = "ApiSetGroupDependencyExpression",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB0] = "ApiRemoveClusterGroupDependency",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB1] = "ApiSetGroupSetDependencyExpression",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB2] = "ApiRemoveGroupSetDependency",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB3] = "ApiRemoveClusterGroupToGroupSetDependency",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB4] = "ApiCreateGroupSetEnum",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB5] = "ApiCreateNetInterfaceEnum",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB6] = "ApiChangeCsvStateEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB7] = "ApiAddGroupToGroupSetEx",
|
||||
["b97db8b2-4c63-11cf-bff6-08002be23f2f",0xB8] = "ApiChangeResourceGroupEx",
|
||||
|
||||
# dhcpsrv - MSDN Ref: DHCP Server Mgmt Protocol [ms-dhcpm]
|
||||
["6bffd098-a112-3610-9833-46c3f874532d",0x00] = "R_DhcpCreateSubnet",
|
||||
|
@ -2219,7 +2324,7 @@ export {
|
|||
["00020401-0000-0000-c000-000000000046",0x12] = "GetContainingTypeLib",
|
||||
|
||||
# IDMNotify - MSDN Ref: Disk Mgmt Remote Protocol [ms-dmrp]
|
||||
["d2d79df7-3400-11d0-b40b-00aa005ff586",0x00] = "ObjectsChanged",
|
||||
["d2d79df7-3400-11d0-b40b-00aa005ff586",0x03] = "ObjectsChanged",
|
||||
|
||||
# IDMRemoteServer - MSDN Ref: Disk Mgmt Remote Protocol [ms-dmrp]
|
||||
["3a410f21-553f-11d1-8e5e-00a0c92c9d5d",0x03] = "CreateRemoteObject",
|
||||
|
|
|
@ -135,10 +135,3 @@ function remove_pending_cmd(pc: PendingCmds, ca: CmdArg): bool
|
|||
else
|
||||
return F;
|
||||
}
|
||||
|
||||
function pop_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
local ca = get_pending_cmd(pc, reply_code, reply_msg);
|
||||
remove_pending_cmd(pc, ca);
|
||||
return ca;
|
||||
}
|
||||
|
|
|
@ -182,7 +182,7 @@ const attr_types: table[count] of string = {
|
|||
[171] = "Delegated-IPv6-Prefix-Pool",
|
||||
[172] = "Stateful-IPv6-Address-Pool",
|
||||
[173] = "IPv6-6rd-Configuration"
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &deprecated="Remove in v6.1. Usage testing indicates this function is unused.";
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &is_used;
|
||||
|
||||
const nas_port_types: table[count] of string = {
|
||||
[0] = "Async",
|
||||
|
@ -205,7 +205,7 @@ const nas_port_types: table[count] of string = {
|
|||
[17] = "Cable",
|
||||
[18] = "Wireless - Other",
|
||||
[19] = "Wireless - IEEE 802.11"
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &deprecated="Remove in v6.1. Usage testing indicates this function is unused.";
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &is_used;
|
||||
|
||||
const service_types: table[count] of string = {
|
||||
[1] = "Login",
|
||||
|
@ -219,7 +219,7 @@ const service_types: table[count] of string = {
|
|||
[9] = "Callback NAS Prompt",
|
||||
[10] = "Call Check",
|
||||
[11] = "Callback Administrative",
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &deprecated="Remove in v6.1. Usage testing indicates this function is unused.";
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &is_used;
|
||||
|
||||
const framed_protocol_types: table[count] of string = {
|
||||
[1] = "PPP",
|
||||
|
@ -228,4 +228,4 @@ const framed_protocol_types: table[count] of string = {
|
|||
[4] = "Gandalf proprietary SingleLink/MultiLink protocol",
|
||||
[5] = "Xylogics proprietary IPX/SLIP",
|
||||
[6] = "X.75 Synchronous"
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &deprecated="Remove in v6.1. Usage testing indicates this function is unused.";
|
||||
} &default=function(i: count): string { return fmt("unknown-%d", i); } &is_used;
|
||||
|
|
|
@ -274,50 +274,3 @@ event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count,
|
|||
|
||||
c$smb_state$pipe_map[file_id] = c$smb_state$current_file$uuid;
|
||||
}
|
||||
|
||||
event smb_pipe_bind_ack_response(c: connection, hdr: SMB1::Header) &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
if ( ! c$smb_state?$current_file || ! c$smb_state$current_file?$uuid )
|
||||
{
|
||||
# TODO: figure out why the uuid isn't getting set sometimes.
|
||||
return;
|
||||
}
|
||||
|
||||
c$smb_state$current_cmd$sub_command = "RPC_BIND_ACK";
|
||||
c$smb_state$current_cmd$argument = SMB::rpc_uuids[c$smb_state$current_file$uuid];
|
||||
}
|
||||
|
||||
event smb_pipe_bind_request(c: connection, hdr: SMB1::Header, uuid: string, version: string) &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
if ( ! c$smb_state?$current_file || ! c$smb_state$current_file?$uuid )
|
||||
{
|
||||
# TODO: figure out why the current_file isn't getting set sometimes.
|
||||
return;
|
||||
}
|
||||
|
||||
c$smb_state$current_cmd$sub_command = "RPC_BIND";
|
||||
c$smb_state$current_file$uuid = uuid;
|
||||
c$smb_state$current_cmd$argument = fmt("%s v%s", SMB::rpc_uuids[uuid], version);
|
||||
}
|
||||
|
||||
event smb_pipe_request(c: connection, hdr: SMB1::Header, op_num: count) &deprecated="Remove in v6.1. Usage testing indicates this function is unused."
|
||||
{
|
||||
if ( ! c$smb_state?$current_file )
|
||||
{
|
||||
# TODO: figure out why the current file isn't being set sometimes.
|
||||
return;
|
||||
}
|
||||
|
||||
local f = c$smb_state$current_file;
|
||||
if ( ! f?$uuid )
|
||||
{
|
||||
# TODO: figure out why this is happening.
|
||||
Reporter::conn_weird("smb_pipe_request_missing_uuid", c, "");
|
||||
return;
|
||||
}
|
||||
local arg = fmt("%s: %s",
|
||||
SMB::rpc_uuids[f$uuid],
|
||||
SMB::rpc_sub_cmds[f$uuid][op_num]);
|
||||
|
||||
c$smb_state$current_cmd$argument = arg;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ signature dpd_socks4_reverse_server {
|
|||
signature dpd_socks5_client {
|
||||
ip-proto == tcp
|
||||
# Watch for a few authentication methods to reduce false positives.
|
||||
payload /^\x05.[\x00\x01\x02]/
|
||||
payload /^\x05.[\x00\x01\x02\x03\x05\x06\x07\x08\x09]/
|
||||
tcp-state originator
|
||||
}
|
||||
|
||||
|
@ -40,9 +40,23 @@ signature dpd_socks5_server {
|
|||
requires-reverse-signature dpd_socks5_client
|
||||
# Watch for a single authentication method to be chosen by the server or
|
||||
# the server to indicate the no authentication is required.
|
||||
payload /^\x05(\x00|\x01[\x00\x01\x02])/
|
||||
# From wikipedia:
|
||||
# 0x00: No authentication
|
||||
# 0x01: GSSAPI (RFC 1961)
|
||||
# 0x02: Username/password (RFC 1929)
|
||||
# 0x03–0x7F: methods assigned by IANA[11]
|
||||
# 0x03: Challenge-Handshake Authentication Protocol
|
||||
# 0x04: Unassigned
|
||||
# 0x05: Challenge-Response Authentication Method
|
||||
# 0x06: Secure Sockets Layer
|
||||
# 0x07: NDS Authentication
|
||||
# 0x08: Multi-Authentication Framework
|
||||
# 0x09: JSON Parameter Block
|
||||
# 0x0A–0x7F: Unassigned
|
||||
# 0x80–0xFE: methods reserved for private use
|
||||
#
|
||||
# Keep in sync with dpd_socks5_client, 0xff is "no acceptable methods"
|
||||
payload /^\x05[\x00\x01\x02\x03\x05\x06\x07\x08\x09\xff]/
|
||||
tcp-state responder
|
||||
enable "socks"
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -167,7 +167,7 @@ export {
|
|||
global get_emails: function(a: addr): string;
|
||||
}
|
||||
|
||||
# Please ignore, this is an interally used variable.
|
||||
# Please ignore, this is an internally used variable.
|
||||
global local_dns_suffix_regex: pattern = /MATCH_NOTHING/;
|
||||
global local_dns_neighbor_suffix_regex: pattern = /MATCH_NOTHING/;
|
||||
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
##! This script can be used to add back the fields ``tx_hosts``, ``rx_hosts``
|
||||
##! and ``conn_uids`` to the :zeek:see:`Files::Info` record and thereby also
|
||||
##! back into the ``files.log``. These fields have been removed in Zeek 5.1
|
||||
##! and replaced with the more commonly used ``uid`` and ``id`` fields.
|
||||
##!
|
||||
##! It's only purpose is to provide an easy way to add back the fields such that
|
||||
##! existing downstream processes continue to work without the need to adapt them.
|
||||
##! This script will be removed with Zeek 6.1 at which point downstream processes
|
||||
##! hopefully have switched over to use ``uid`` and ``id`` instead.
|
||||
|
||||
# Remove in v6.1.
|
||||
|
||||
@load base/frameworks/files
|
||||
|
||||
module Files;
|
||||
|
||||
# Add back the fields to Files::Info.
|
||||
redef record Info += {
|
||||
## If this file was transferred over a network
|
||||
## connection this should show the host or hosts that
|
||||
## the data sourced from.
|
||||
tx_hosts: set[addr] &default=addr_set() &log;
|
||||
|
||||
## If this file was transferred over a network
|
||||
## connection this should show the host or hosts that
|
||||
## the data traveled to.
|
||||
rx_hosts: set[addr] &default=addr_set() &log;
|
||||
|
||||
## Connection UIDs over which the file was transferred.
|
||||
conn_uids: set[string] &default=string_set() &log;
|
||||
};
|
||||
|
||||
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=9
|
||||
{
|
||||
local cid = c$id;
|
||||
add f$info$conn_uids[c$uid];
|
||||
add f$info$tx_hosts[f$is_orig ? cid$orig_h : cid$resp_h];
|
||||
add f$info$rx_hosts[f$is_orig ? cid$resp_h : cid$orig_h];
|
||||
}
|
||||
|
||||
# For every log write to files.log, ensure tx_hosts, rx_hosts and conn_uids
|
||||
# hold just a single value. Use a high priority for this handler to ensure
|
||||
# this happens before any user defined hooks.
|
||||
hook Log::log_stream_policy(rec: any, id: Log::ID) &priority=100
|
||||
{
|
||||
if ( id != Files::LOG )
|
||||
return;
|
||||
|
||||
local info = rec as Files::Info;
|
||||
|
||||
# In the common case of a single connection (or the less common case
|
||||
# of no connection), there's nothing to do in this hook.
|
||||
if ( |info$conn_uids| == 1 || ! info?$id )
|
||||
return;
|
||||
|
||||
# Make singular tx_hosts, rx_hosts and conn_uids fields based on
|
||||
# the active uid. Note, this currently assumes that Files::Info$is_orig
|
||||
# is the same for all connections. This seems reasonable given that
|
||||
# all connections will use the same protocol.
|
||||
local cid = info$id;
|
||||
info$conn_uids = set(info$uid);
|
||||
info$tx_hosts = set(info$is_orig ? cid$orig_h : cid$resp_h);
|
||||
info$rx_hosts = set(info$is_orig ? cid$resp_h : cid$orig_h);
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
##! Logs Spicy-related resource usage continously for debugging purposes.
|
||||
##! Logs Spicy-related resource usage continuously for debugging purposes.
|
||||
|
||||
module Spicy;
|
||||
|
||||
|
|
|
@ -1,13 +1,18 @@
|
|||
##! In a cluster configuration, open port 9911 on the manager for
|
||||
##! Prometheus exposition and import all metrics from
|
||||
##! Prometheus exposition and import all metrics from the
|
||||
##! `zeek/cluster/metrics/...` topic.
|
||||
##!
|
||||
##! For customization or disabling, redef the involved Broker options again.
|
||||
##! Specifically, to disable listening on port 9911, set
|
||||
##! :zeek:see:`Broker::metrics_port` to `0/unknown` again.
|
||||
|
||||
##!
|
||||
##! Note that in large clusters, metrics import may cause significant
|
||||
##! communication overhead as well as load on the manager.
|
||||
##!
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
|
||||
# Use Cluster::node as "endpoint" label
|
||||
redef Broker::metrics_export_endpoint_name = Cluster::node;
|
||||
|
||||
|
@ -19,3 +24,5 @@ redef Broker::metrics_import_topics = vector("zeek/cluster/metrics/");
|
|||
@else
|
||||
redef Broker::metrics_export_topic = "zeek/cluster/metrics/";
|
||||
@endif
|
||||
|
||||
@endif
|
|
@ -1,184 +0,0 @@
|
|||
##! TCP Scan detection.
|
||||
|
||||
# ..Authors: Sheharbano Khattak
|
||||
# Seth Hall
|
||||
# All the authors of the old scan.bro
|
||||
|
||||
@deprecated "Remove in v6.1. Use the external github.com/ncsa/bro-simple-scan package instead (e.g., by installing it via `zkg install ncsa/bro-simple-scan`). The misc/scan.zeek script hasn't been maintained since 2013. Further, the external bro-simple-scan package from NCSA (Justin Azoff) has become the recommended alternative for TCP scan detection."
|
||||
|
||||
@load base/frameworks/notice
|
||||
@load base/frameworks/sumstats
|
||||
|
||||
@load base/utils/time
|
||||
|
||||
module Scan;
|
||||
|
||||
export {
|
||||
redef enum Notice::Type += {
|
||||
## Address scans detect that a host appears to be scanning some
|
||||
## number of destinations on a single port. This notice is
|
||||
## generated when more than :zeek:id:`Scan::addr_scan_threshold`
|
||||
## unique hosts are seen over the previous
|
||||
## :zeek:id:`Scan::addr_scan_interval` time range.
|
||||
Address_Scan,
|
||||
|
||||
## Port scans detect that an attacking host appears to be
|
||||
## scanning a single victim host on several ports. This notice
|
||||
## is generated when an attacking host attempts to connect to
|
||||
## :zeek:id:`Scan::port_scan_threshold`
|
||||
## unique ports on a single host over the previous
|
||||
## :zeek:id:`Scan::port_scan_interval` time range.
|
||||
Port_Scan,
|
||||
};
|
||||
|
||||
## Failed connection attempts are tracked over this time interval for
|
||||
## the address scan detection. A higher interval will detect slower
|
||||
## scanners, but may also yield more false positives.
|
||||
const addr_scan_interval = 5min &redef;
|
||||
|
||||
## Failed connection attempts are tracked over this time interval for
|
||||
## the port scan detection. A higher interval will detect slower
|
||||
## scanners, but may also yield more false positives.
|
||||
const port_scan_interval = 5min &redef;
|
||||
|
||||
## The threshold of the unique number of hosts a scanning host has to
|
||||
## have failed connections with on a single port.
|
||||
const addr_scan_threshold = 25.0 &redef;
|
||||
|
||||
## The threshold of the number of unique ports a scanning host has to
|
||||
## have failed connections with on a single victim host.
|
||||
const port_scan_threshold = 15.0 &redef;
|
||||
|
||||
global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
|
||||
global Scan::port_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
|
||||
}
|
||||
|
||||
event zeek_init() &priority=5
|
||||
{
|
||||
local r1: SumStats::Reducer = [$stream="scan.addr.fail", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(addr_scan_threshold+2)];
|
||||
SumStats::create([$name="addr-scan",
|
||||
$epoch=addr_scan_interval,
|
||||
$reducers=set(r1),
|
||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||
{
|
||||
return result["scan.addr.fail"]$unique+0.0;
|
||||
},
|
||||
#$threshold_func=check_addr_scan_threshold,
|
||||
$threshold=addr_scan_threshold,
|
||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||
{
|
||||
local r = result["scan.addr.fail"];
|
||||
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
||||
local dur = duration_to_mins_secs(r$end-r$begin);
|
||||
local message=fmt("%s scanned at least %d unique hosts on port %s in %s", key$host, r$unique, key$str, dur);
|
||||
NOTICE([$note=Address_Scan,
|
||||
$src=key$host,
|
||||
$p=to_port(key$str),
|
||||
$sub=side,
|
||||
$msg=message,
|
||||
$identifier=cat(key$host)]);
|
||||
}]);
|
||||
|
||||
# Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port);
|
||||
local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(port_scan_threshold+2)];
|
||||
SumStats::create([$name="port-scan",
|
||||
$epoch=port_scan_interval,
|
||||
$reducers=set(r2),
|
||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||
{
|
||||
return result["scan.port.fail"]$unique+0.0;
|
||||
},
|
||||
$threshold=port_scan_threshold,
|
||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||
{
|
||||
local r = result["scan.port.fail"];
|
||||
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
||||
local dur = duration_to_mins_secs(r$end-r$begin);
|
||||
local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur);
|
||||
NOTICE([$note=Port_Scan,
|
||||
$src=key$host,
|
||||
$dst=to_addr(key$str),
|
||||
$sub=side,
|
||||
$msg=message,
|
||||
$identifier=cat(key$host)]);
|
||||
}]);
|
||||
}
|
||||
|
||||
function add_sumstats(id: conn_id, reverse: bool)
|
||||
{
|
||||
local scanner = id$orig_h;
|
||||
local victim = id$resp_h;
|
||||
local scanned_port = id$resp_p;
|
||||
|
||||
if ( reverse )
|
||||
{
|
||||
scanner = id$resp_h;
|
||||
victim = id$orig_h;
|
||||
scanned_port = id$orig_p;
|
||||
}
|
||||
|
||||
if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) )
|
||||
SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]);
|
||||
|
||||
if ( hook Scan::port_scan_policy(scanner, victim, scanned_port) )
|
||||
SumStats::observe("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]);
|
||||
}
|
||||
|
||||
function is_failed_conn(c: connection): bool
|
||||
{
|
||||
# Sr || ( (hR || ShR) && (data not sent in any direction) )
|
||||
if ( (c$orig$state == TCP_SYN_SENT && c$resp$state == TCP_RESET) ||
|
||||
(((c$orig$state == TCP_RESET && c$resp$state == TCP_SYN_ACK_SENT) ||
|
||||
(c$orig$state == TCP_RESET && c$resp$state == TCP_ESTABLISHED && "S" in c$history )
|
||||
) && /[Dd]/ !in c$history )
|
||||
)
|
||||
return T;
|
||||
return F;
|
||||
}
|
||||
|
||||
function is_reverse_failed_conn(c: connection): bool
|
||||
{
|
||||
# reverse scan i.e. conn dest is the scanner
|
||||
# sR || ( (Hr || sHr) && (data not sent in any direction) )
|
||||
if ( (c$resp$state == TCP_SYN_SENT && c$orig$state == TCP_RESET) ||
|
||||
(((c$resp$state == TCP_RESET && c$orig$state == TCP_SYN_ACK_SENT) ||
|
||||
(c$resp$state == TCP_RESET && c$orig$state == TCP_ESTABLISHED && "s" in c$history )
|
||||
) && /[Dd]/ !in c$history )
|
||||
)
|
||||
return T;
|
||||
return F;
|
||||
}
|
||||
|
||||
event connection_attempt(c: connection)
|
||||
{
|
||||
local is_reverse_scan = F;
|
||||
if ( "H" in c$history )
|
||||
is_reverse_scan = T;
|
||||
|
||||
add_sumstats(c$id, is_reverse_scan);
|
||||
}
|
||||
|
||||
event connection_rejected(c: connection)
|
||||
{
|
||||
local is_reverse_scan = F;
|
||||
if ( "s" in c$history )
|
||||
is_reverse_scan = T;
|
||||
|
||||
add_sumstats(c$id, is_reverse_scan);
|
||||
}
|
||||
|
||||
event connection_reset(c: connection)
|
||||
{
|
||||
if ( is_failed_conn(c) )
|
||||
add_sumstats(c$id, F);
|
||||
else if ( is_reverse_failed_conn(c) )
|
||||
add_sumstats(c$id, T);
|
||||
}
|
||||
|
||||
event connection_pending(c: connection)
|
||||
{
|
||||
if ( is_failed_conn(c) )
|
||||
add_sumstats(c$id, F);
|
||||
else if ( is_reverse_failed_conn(c) )
|
||||
add_sumstats(c$id, T);
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
@deprecated "Remove in v6.1. The MQTT scripts have been moved out of policy/ into base and are loaded by default"
|
||||
|
||||
# For those running bare-mode and loading protocols/mqtt from policy.
|
||||
@load base/protocols/mqtt
|
|
@ -97,6 +97,10 @@ redef digest_salt = "Please change this value.";
|
|||
# telemetry_histogram.log.
|
||||
@load frameworks/telemetry/log
|
||||
|
||||
# Enable metrics centralization on the manager. This opens port 9911/tcp
|
||||
# on the manager node that can be readily scraped by Prometheus.
|
||||
# @load frameworks/telemetry/prometheus
|
||||
|
||||
# Uncomment the following line to enable detection of the heartbleed attack. Enabling
|
||||
# this might impact performance a bit.
|
||||
# @load policy/protocols/ssl/heartbleed
|
||||
|
|
|
@ -12,7 +12,10 @@
|
|||
# @load frameworks/control/controllee.zeek
|
||||
# @load frameworks/control/controller.zeek
|
||||
@load frameworks/cluster/experimental.zeek
|
||||
@load frameworks/cluster/nodes-experimental/manager.zeek
|
||||
# Loaded via the above through test-all-policy-cluster.test
|
||||
# when running as a manager, creates cluster.log entries
|
||||
# even in non-cluster mode if loaded like the below.
|
||||
# @load frameworks/cluster/nodes-experimental/manager.zeek
|
||||
@load frameworks/management/agent/__load__.zeek
|
||||
@load frameworks/management/agent/api.zeek
|
||||
@load frameworks/management/agent/boot.zeek
|
||||
|
@ -59,7 +62,6 @@
|
|||
@load frameworks/intel/seen/where-locations.zeek
|
||||
@load frameworks/intel/seen/x509.zeek
|
||||
@load frameworks/netcontrol/catch-and-release.zeek
|
||||
@load frameworks/files/deprecated-txhosts-rxhosts-connuids.zeek
|
||||
@load frameworks/files/detect-MHR.zeek
|
||||
@load frameworks/files/entropy-test-all-files.zeek
|
||||
#@load frameworks/files/extract-all-files.zeek
|
||||
|
@ -75,6 +77,7 @@
|
|||
# @load frameworks/spicy/record-spicy-batch.zeek
|
||||
# @load frameworks/spicy/resource-usage.zeek
|
||||
@load frameworks/software/windows-version-detection.zeek
|
||||
@load frameworks/telemetry/prometheus.zeek
|
||||
@load frameworks/telemetry/log.zeek
|
||||
@load integration/collective-intel/__load__.zeek
|
||||
@load integration/collective-intel/main.zeek
|
||||
|
@ -85,7 +88,6 @@
|
|||
@load misc/load-balancing.zeek
|
||||
@load misc/loaded-scripts.zeek
|
||||
@load misc/profiling.zeek
|
||||
@load misc/scan.zeek
|
||||
@load misc/stats.zeek
|
||||
@load misc/weird-stats.zeek
|
||||
@load misc/trim-trace-file.zeek
|
||||
|
@ -116,7 +118,6 @@
|
|||
@load protocols/krb/ticket-logging.zeek
|
||||
@load protocols/modbus/known-masters-slaves.zeek
|
||||
@load protocols/modbus/track-memmap.zeek
|
||||
#@load protocols/mqtt/__load__.zeek
|
||||
@load protocols/mysql/software.zeek
|
||||
@load protocols/rdp/indicate_ssl.zeek
|
||||
@load protocols/smb/log-cmds.zeek
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
@load test-all-policy.zeek
|
||||
|
||||
# Scripts which are commented out in test-all-policy.zeek.
|
||||
@load protocols/mqtt/__load__.zeek
|
||||
@load protocols/ssl/decryption.zeek
|
||||
@load frameworks/cluster/nodes-experimental/manager.zeek
|
||||
@load frameworks/control/controllee.zeek
|
||||
@load frameworks/control/controller.zeek
|
||||
@load frameworks/management/agent/main.zeek
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 410ada8bbe5839807a459a99c1b77221f790f1be
|
||||
Subproject commit c2763e952ea899f86bec2b60f840d38861cefd03
|
|
@ -422,7 +422,6 @@ set(MAIN_SRCS
|
|||
digest.h)
|
||||
|
||||
set(THIRD_PARTY_SRCS
|
||||
3rdparty/bro_inet_ntop.c # Remove in v6.1.
|
||||
3rdparty/zeek_inet_ntop.c
|
||||
3rdparty/bsd-getopt-long.c
|
||||
3rdparty/ConvertUTF.c
|
||||
|
@ -511,6 +510,8 @@ set(zeek_SRCS
|
|||
collect_headers(zeek_HEADERS ${zeek_SRCS})
|
||||
|
||||
add_library(zeek_objs OBJECT ${zeek_SRCS})
|
||||
target_compile_features(zeek_objs PRIVATE ${ZEEK_CXX_STD})
|
||||
set_target_properties(zeek_objs PROPERTIES CXX_EXTENSIONS OFF)
|
||||
target_link_libraries(zeek_objs PRIVATE $<BUILD_INTERFACE:zeek_internal>)
|
||||
target_compile_definitions(zeek_objs PRIVATE ZEEK_CONFIG_SKIP_VERSION_H)
|
||||
add_dependencies(zeek_objs zeek_autogen_files)
|
||||
|
@ -601,6 +602,8 @@ install(
|
|||
PATTERN "*.h"
|
||||
PATTERN "*.pac"
|
||||
PATTERN "3rdparty/*" EXCLUDE
|
||||
# Headers used only during build
|
||||
PATTERN "threading/formatters/detail" EXCLUDE
|
||||
# The "zeek -> ." symlink isn't needed in the install-tree
|
||||
REGEX "${escaped_include_path}$" EXCLUDE
|
||||
# FILES_MATCHING creates empty directories:
|
||||
|
@ -622,7 +625,6 @@ install(
|
|||
|
||||
install(
|
||||
FILES ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/ConvertUTF.h
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/bro_inet_ntop.h # Remove in v6.1
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/zeek_inet_ntop.h
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/bsd-getopt-long.h
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/modp_numtoa.h
|
||||
|
|
|
@ -1554,7 +1554,7 @@ TableValPtr DNS_Mgr::empty_addr_set()
|
|||
|
||||
// Unit testing coverage for the DNS_Mgr code, including making actual DNS requests to
|
||||
// test responses and timeouts. Note that all of these tests are marked with the skip
|
||||
// decorator, since they take some time to run and this slows down local developement. To
|
||||
// decorator, since they take some time to run and this slows down local development. To
|
||||
// run them manually, pass the --no-skip flag when running tests. These tests are
|
||||
// run automatically as part of CI builds.
|
||||
|
||||
|
@ -1605,7 +1605,7 @@ public:
|
|||
|
||||
/**
|
||||
* Derived testing version of DNS_Mgr so that the Process() method can be exposed
|
||||
* publically. If new unit tests are added, this class should be used over using
|
||||
* publicly. If new unit tests are added, this class should be used over using
|
||||
* DNS_Mgr directly.
|
||||
*/
|
||||
class TestDNS_Mgr final : public DNS_Mgr
|
||||
|
|
|
@ -485,7 +485,7 @@ int dbg_shutdown_debugger()
|
|||
// by the operation argument; the additional arguments are put in the
|
||||
// supplied vector.
|
||||
//
|
||||
// Parse the string into individual tokens, similarily to how shell
|
||||
// Parse the string into individual tokens, similarly to how shell
|
||||
// would do it.
|
||||
|
||||
void tokenize(const char* cstr, string& operation, vector<string>& arguments)
|
||||
|
|
|
@ -191,7 +191,7 @@ protected:
|
|||
std::pair<const char*, size_t> FirstEscapeLoc(const char* bytes, size_t n);
|
||||
|
||||
/**
|
||||
* @param start start of string to check for starting with an espace
|
||||
* @param start start of string to check for starting with an escape
|
||||
* sequence.
|
||||
* @param end one byte past the last character in the string.
|
||||
* @return The number of bytes in the escape sequence that the string
|
||||
|
|
|
@ -169,13 +169,6 @@ public:
|
|||
0 == memcmp(GetKey(), arg_key, key_size);
|
||||
}
|
||||
|
||||
template <typename V>
|
||||
[[deprecated("Remove in v6.1. Access the value in the entry directly.")]] T* GetValue() const
|
||||
{
|
||||
static_assert(std::is_same_v<T*, V>, "Type of DictEntry and type requested are different");
|
||||
return value;
|
||||
}
|
||||
|
||||
bool operator==(const DictEntry& r) const { return Equal(r.GetKey(), r.key_size, r.hash); }
|
||||
bool operator!=(const DictEntry& r) const { return ! Equal(r.GetKey(), r.key_size, r.hash); }
|
||||
};
|
||||
|
|
|
@ -106,8 +106,6 @@ enum ExprTag : int
|
|||
#define NUM_EXPRS (int(EXPR_NOP) + 1)
|
||||
};
|
||||
|
||||
using BroExprTag [[deprecated("Remove in v6.1. Use ExprTag.")]] = ExprTag;
|
||||
|
||||
extern const char* expr_name(ExprTag t);
|
||||
|
||||
class AddToExpr;
|
||||
|
|
53
src/Func.cc
53
src/Func.cc
|
@ -903,6 +903,59 @@ FunctionIngredients::FunctionIngredients(ScopePtr _scope, StmtPtr _body,
|
|||
}
|
||||
}
|
||||
|
||||
zeek::RecordValPtr make_backtrace_element(std::string_view name, const VectorValPtr args,
|
||||
const zeek::detail::Location* loc)
|
||||
{
|
||||
static auto elem_type = id::find_type<RecordType>("BacktraceElement");
|
||||
static auto function_name_idx = elem_type->FieldOffset("function_name");
|
||||
static auto function_args_idx = elem_type->FieldOffset("function_args");
|
||||
static auto file_location_idx = elem_type->FieldOffset("file_location");
|
||||
static auto line_location_idx = elem_type->FieldOffset("line_location");
|
||||
|
||||
auto elem = make_intrusive<RecordVal>(elem_type);
|
||||
elem->Assign(function_name_idx, name.data());
|
||||
elem->Assign(function_args_idx, std::move(args));
|
||||
|
||||
if ( loc )
|
||||
{
|
||||
elem->Assign(file_location_idx, loc->filename);
|
||||
elem->Assign(line_location_idx, loc->first_line);
|
||||
}
|
||||
|
||||
return elem;
|
||||
}
|
||||
|
||||
zeek::VectorValPtr get_current_script_backtrace()
|
||||
{
|
||||
static auto backtrace_type = id::find_type<VectorType>("Backtrace");
|
||||
|
||||
auto rval = make_intrusive<VectorVal>(backtrace_type);
|
||||
|
||||
// The body of the following loop can wind up adding items to
|
||||
// the call stack (because MakeCallArgumentVector() evaluates
|
||||
// default arguments, which can in turn involve calls to script
|
||||
// functions), so we work from a copy of the current call stack
|
||||
// to prevent problems with iterator invalidation.
|
||||
auto cs_copy = zeek::detail::call_stack;
|
||||
|
||||
for ( auto it = cs_copy.rbegin(); it != cs_copy.rend(); ++it )
|
||||
{
|
||||
const auto& ci = *it;
|
||||
if ( ! ci.func )
|
||||
// This happens for compiled code.
|
||||
continue;
|
||||
|
||||
const auto& params = ci.func->GetType()->Params();
|
||||
auto args = MakeCallArgumentVector(ci.args, params);
|
||||
|
||||
auto elem = make_backtrace_element(ci.func->Name(), std::move(args),
|
||||
ci.call ? ci.call->GetLocationInfo() : nullptr);
|
||||
rval->Append(std::move(elem));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
static void emit_builtin_error_common(const char* msg, Obj* arg, bool unwind)
|
||||
{
|
||||
auto emit = [=](const CallExpr* ce)
|
||||
|
|
19
src/Func.h
19
src/Func.h
|
@ -364,6 +364,25 @@ private:
|
|||
|
||||
extern std::vector<CallInfo> call_stack;
|
||||
|
||||
/**
|
||||
* Create a single BacktraceElement record val.
|
||||
*
|
||||
* @param name the name of the function.
|
||||
* @param args call argument vector created by MakeCallArgumentVector().
|
||||
* @param loc optional location information of the caller.
|
||||
*
|
||||
* @return record value representing a BacktraceElement.
|
||||
*/
|
||||
zeek::RecordValPtr make_backtrace_element(std::string_view name, const VectorValPtr args,
|
||||
const zeek::detail::Location* loc);
|
||||
|
||||
/**
|
||||
* Create a Zeek script Backtrace of the current script call_stack.
|
||||
*
|
||||
* @return VectorValPtr containing BacktraceElement entries.
|
||||
*/
|
||||
zeek::VectorValPtr get_current_script_backtrace();
|
||||
|
||||
// This is set to true after the built-in functions have been initialized.
|
||||
extern bool did_builtin_init;
|
||||
extern std::vector<void (*)()> bif_initializers;
|
||||
|
|
10
src/Obj.cc
10
src/Obj.cc
|
@ -60,17 +60,7 @@ int Obj::suppress_errors = 0;
|
|||
Obj::~Obj()
|
||||
{
|
||||
if ( notify_plugins )
|
||||
{
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
#endif
|
||||
PLUGIN_HOOK_VOID(HOOK_BRO_OBJ_DTOR, HookBroObjDtor(this));
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
PLUGIN_HOOK_VOID(HOOK_OBJ_DTOR, HookObjDtor(this));
|
||||
}
|
||||
|
||||
delete location;
|
||||
}
|
||||
|
|
|
@ -301,7 +301,7 @@ Options parse_cmdline(int argc, char** argv)
|
|||
fprintf(stderr, "ERROR: C++ unit tests are disabled for this build.\n"
|
||||
" Please re-compile with ENABLE_ZEEK_UNIT_TESTS "
|
||||
"to run the C++ unit tests.\n");
|
||||
usage(argv[0], 1);
|
||||
exit(1);
|
||||
#endif
|
||||
|
||||
auto is_separator = [](const char* cstr)
|
||||
|
|
2
src/RE.h
2
src/RE.h
|
@ -167,7 +167,7 @@ public:
|
|||
|
||||
const AcceptingMatchSet& AcceptedMatches() const { return accepted_matches; }
|
||||
|
||||
// Returns the number of bytes feeded into the matcher so far
|
||||
// Returns the number of bytes fed into the matcher so far
|
||||
int Length() { return current_pos; }
|
||||
|
||||
// Returns true if this inputs leads to at least one new match.
|
||||
|
|
|
@ -557,7 +557,7 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Conne
|
|||
|
||||
int size = sizeof(tmp);
|
||||
char* buffer = tmp;
|
||||
char* alloced = nullptr;
|
||||
char* allocated = nullptr;
|
||||
|
||||
std::string loc_str;
|
||||
|
||||
|
@ -621,7 +621,7 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Conne
|
|||
|
||||
// Enlarge buffer;
|
||||
size *= 2;
|
||||
buffer = alloced = (char*)realloc(alloced, size);
|
||||
buffer = allocated = (char*)realloc(allocated, size);
|
||||
|
||||
if ( ! buffer )
|
||||
FatalError("out of memory in Reporter");
|
||||
|
@ -740,8 +740,8 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Conne
|
|||
#endif
|
||||
}
|
||||
|
||||
if ( alloced )
|
||||
free(alloced);
|
||||
if ( allocated )
|
||||
free(allocated);
|
||||
}
|
||||
|
||||
bool Reporter::EmitToStderr(bool flag)
|
||||
|
|
|
@ -31,6 +31,7 @@ using StringValPtr = IntrusivePtr<StringVal>;
|
|||
namespace detail
|
||||
{
|
||||
|
||||
class AssertStmt;
|
||||
class Location;
|
||||
class Expr;
|
||||
|
||||
|
@ -59,6 +60,7 @@ class InterpreterException : public ReporterException
|
|||
{
|
||||
protected:
|
||||
friend class Reporter;
|
||||
friend class detail::AssertStmt;
|
||||
InterpreterException() { }
|
||||
};
|
||||
|
||||
|
|
|
@ -23,12 +23,10 @@ public:
|
|||
|
||||
if ( stmt->Tag() == STMT_BREAK && ! BreakStmtIsValid() )
|
||||
Report(stmt, "break statement used outside of for, while or "
|
||||
"switch statement and not within a hook. "
|
||||
"With v6.1 this will become an error.");
|
||||
"switch statement and not within a hook.");
|
||||
|
||||
if ( stmt->Tag() == STMT_NEXT && ! NextStmtIsValid() )
|
||||
Report(stmt, "next statement used outside of for or while statement. "
|
||||
"With v6.1 this will become an error.");
|
||||
Report(stmt, "next statement used outside of for or while statement.");
|
||||
|
||||
return TC_CONTINUE;
|
||||
}
|
||||
|
@ -65,6 +63,8 @@ public:
|
|||
return TC_CONTINUE;
|
||||
}
|
||||
|
||||
void SetHookDepth(int hd) { hook_depth = hd; }
|
||||
|
||||
bool IsValid() const { return valid_script; }
|
||||
|
||||
private:
|
||||
|
@ -86,11 +86,7 @@ private:
|
|||
void Report(const Stmt* stmt, const char* msg)
|
||||
{
|
||||
if ( report )
|
||||
{
|
||||
zeek::reporter->PushLocation(stmt->GetLocationInfo());
|
||||
zeek::reporter->Warning("%s", msg);
|
||||
zeek::reporter->PopLocation();
|
||||
}
|
||||
Error(stmt, msg);
|
||||
|
||||
valid_script = false;
|
||||
}
|
||||
|
@ -107,10 +103,15 @@ void script_validation()
|
|||
traverse_all(&bn_cb);
|
||||
}
|
||||
|
||||
bool script_is_valid(const Stmt* stmt)
|
||||
bool script_is_valid(const Stmt* stmt, bool is_in_hook)
|
||||
{
|
||||
BreakNextScriptValidation bn_cb(false);
|
||||
|
||||
if ( is_in_hook )
|
||||
bn_cb.SetHookDepth(1);
|
||||
|
||||
stmt->Traverse(&bn_cb);
|
||||
|
||||
return bn_cb.IsValid();
|
||||
}
|
||||
|
||||
|
|
|
@ -13,8 +13,12 @@ class Stmt;
|
|||
void script_validation();
|
||||
|
||||
/**
|
||||
* Returns true if the given script statement (body) is valid.
|
||||
* Returns true if the given script statement (body) is valid. The
|
||||
* second argument indicates whether the statement is the body of a hook.
|
||||
*
|
||||
* Unlike script_validation(), does not report any errors, just returns
|
||||
* whether they are present.
|
||||
*/
|
||||
bool script_is_valid(const Stmt* s);
|
||||
bool script_is_valid(const Stmt* s, bool is_in_hook);
|
||||
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ void ProfileLogger::Log()
|
|||
DNS_Mgr::Stats dstats;
|
||||
dns_mgr->GetStats(&dstats);
|
||||
|
||||
file->Write(util::fmt("%.06f DNS_Mgr: requests=%lu succesful=%lu failed=%lu pending=%lu "
|
||||
file->Write(util::fmt("%.06f DNS_Mgr: requests=%lu successful=%lu failed=%lu pending=%lu "
|
||||
"cached_hosts=%lu cached_addrs=%lu\n",
|
||||
run_state::network_time, dstats.requests, dstats.successful,
|
||||
dstats.failed, dstats.pending, dstats.cached_hosts,
|
||||
|
|
135
src/Stmt.cc
135
src/Stmt.cc
|
@ -54,6 +54,7 @@ const char* stmt_name(StmtTag t)
|
|||
"ZAM",
|
||||
"ZAM-resumption",
|
||||
"null",
|
||||
"assert",
|
||||
};
|
||||
|
||||
return stmt_names[int(t)];
|
||||
|
@ -1864,6 +1865,140 @@ TraversalCode NullStmt::Traverse(TraversalCallback* cb) const
|
|||
HANDLE_TC_STMT_POST(tc);
|
||||
}
|
||||
|
||||
AssertStmt::AssertStmt(ExprPtr arg_cond, ExprPtr arg_msg)
|
||||
: Stmt(STMT_ASSERT), cond(std::move(arg_cond)), msg(std::move(arg_msg))
|
||||
{
|
||||
if ( ! IsBool(cond->GetType()->Tag()) )
|
||||
cond->Error("conditional must be boolean");
|
||||
|
||||
if ( msg && ! IsString(msg->GetType()->Tag()) )
|
||||
msg->Error("message must be string");
|
||||
}
|
||||
|
||||
ValPtr AssertStmt::Exec(Frame* f, StmtFlowType& flow)
|
||||
{
|
||||
RegisterAccess();
|
||||
flow = FLOW_NEXT;
|
||||
|
||||
static auto assertion_failure_hook = id::find_func("assertion_failure");
|
||||
static auto assertion_result_hook = id::find_func("assertion_result");
|
||||
|
||||
bool run_result_hook = assertion_result_hook && assertion_result_hook->HasEnabledBodies();
|
||||
bool run_failure_hook = assertion_failure_hook && assertion_failure_hook->HasEnabledBodies();
|
||||
|
||||
auto assert_result = cond->Eval(f)->AsBool();
|
||||
|
||||
if ( assert_result && ! run_result_hook )
|
||||
return Val::nil;
|
||||
|
||||
// Textual representation of cond from the AST.
|
||||
static zeek::ODesc desc;
|
||||
desc.Clear();
|
||||
desc.SetShort(true);
|
||||
desc.SetQuotes(true);
|
||||
cond->Describe(&desc);
|
||||
auto cond_val = zeek::make_intrusive<zeek::StringVal>(desc.Len(), (const char*)desc.Bytes());
|
||||
|
||||
zeek::StringValPtr msg_val = zeek::val_mgr->EmptyString();
|
||||
if ( msg )
|
||||
{
|
||||
// Eval() may fail if expression assumes assert
|
||||
// condition is F, but we still try to get it for
|
||||
// the assertion_result hook.
|
||||
try
|
||||
{
|
||||
msg_val = cast_intrusive<zeek::StringVal>(msg->Eval(f));
|
||||
}
|
||||
catch ( InterpreterException& e )
|
||||
{
|
||||
desc.Clear();
|
||||
desc.Add("<error eval ");
|
||||
msg->Describe(&desc);
|
||||
desc.Add(">");
|
||||
msg_val = zeek::make_intrusive<zeek::StringVal>(desc.Len(), (const char*)desc.Bytes());
|
||||
}
|
||||
}
|
||||
|
||||
VectorValPtr bt = nullptr;
|
||||
if ( run_result_hook || run_failure_hook )
|
||||
{
|
||||
bt = get_current_script_backtrace();
|
||||
auto assert_elem = make_backtrace_element("assert", MakeEmptyCallArgumentVector(),
|
||||
GetLocationInfo());
|
||||
bt->Insert(0, std::move(assert_elem));
|
||||
}
|
||||
|
||||
// Breaking from either the assertion_failure() or assertion_result()
|
||||
// hook can be used to suppress the default log message.
|
||||
bool report_error = true;
|
||||
|
||||
if ( run_result_hook )
|
||||
report_error &= assertion_result_hook
|
||||
->Invoke(zeek::val_mgr->Bool(assert_result), cond_val, msg_val, bt)
|
||||
->AsBool();
|
||||
|
||||
if ( assert_result )
|
||||
return Val::nil;
|
||||
|
||||
if ( run_failure_hook )
|
||||
report_error &= assertion_failure_hook->Invoke(cond_val, msg_val, bt)->AsBool();
|
||||
|
||||
if ( report_error )
|
||||
{
|
||||
std::string reporter_msg = util::fmt("assertion failure: %s", cond_val->CheckString());
|
||||
if ( msg_val->Len() > 0 )
|
||||
reporter_msg += util::fmt(" (%s)", msg_val->CheckString());
|
||||
|
||||
reporter->PushLocation(GetLocationInfo());
|
||||
reporter->Error("%s", reporter_msg.c_str());
|
||||
reporter->PopLocation();
|
||||
}
|
||||
|
||||
throw InterpreterException();
|
||||
}
|
||||
|
||||
void AssertStmt::StmtDescribe(ODesc* d) const
|
||||
{
|
||||
Stmt::StmtDescribe(d);
|
||||
|
||||
// Quoting strings looks better when describing assert
|
||||
// statements. So turn it on explicitly.
|
||||
//
|
||||
// E.g., md5_hash("") ends up as md5_hash() without quoting.
|
||||
auto orig_quotes = d->WantQuotes();
|
||||
d->SetQuotes(true);
|
||||
|
||||
cond->Describe(d);
|
||||
|
||||
if ( msg )
|
||||
{
|
||||
d->Add(",");
|
||||
d->SP();
|
||||
msg->Describe(d);
|
||||
}
|
||||
|
||||
DescribeDone(d);
|
||||
|
||||
d->SetQuotes(orig_quotes);
|
||||
}
|
||||
|
||||
TraversalCode AssertStmt::Traverse(TraversalCallback* cb) const
|
||||
{
|
||||
TraversalCode tc = cb->PreStmt(this);
|
||||
HANDLE_TC_STMT_PRE(tc);
|
||||
|
||||
tc = cond->Traverse(cb);
|
||||
HANDLE_TC_STMT_PRE(tc);
|
||||
if ( msg )
|
||||
{
|
||||
tc = msg->Traverse(cb);
|
||||
HANDLE_TC_STMT_PRE(tc);
|
||||
}
|
||||
|
||||
tc = cb->PostStmt(this);
|
||||
HANDLE_TC_STMT_POST(tc);
|
||||
}
|
||||
|
||||
WhenInfo::WhenInfo(ExprPtr arg_cond, FuncType::CaptureList* arg_cl, bool arg_is_return)
|
||||
: cond(std::move(arg_cond)), cl(arg_cl), is_return(arg_is_return)
|
||||
{
|
||||
|
|
24
src/Stmt.h
24
src/Stmt.h
|
@ -544,6 +544,28 @@ private:
|
|||
bool is_directive;
|
||||
};
|
||||
|
||||
class AssertStmt final : public Stmt
|
||||
{
|
||||
public:
|
||||
explicit AssertStmt(ExprPtr cond, ExprPtr msg = nullptr);
|
||||
|
||||
ValPtr Exec(Frame* f, StmtFlowType& flow) override;
|
||||
|
||||
void StmtDescribe(ODesc* d) const override;
|
||||
|
||||
TraversalCode Traverse(TraversalCallback* cb) const override;
|
||||
|
||||
// Optimization-related:
|
||||
StmtPtr Duplicate() override;
|
||||
|
||||
bool IsReduced(Reducer* c) const override;
|
||||
StmtPtr DoReduce(Reducer* c) override;
|
||||
|
||||
private:
|
||||
ExprPtr cond;
|
||||
ExprPtr msg;
|
||||
};
|
||||
|
||||
// A helper class for tracking all of the information associated with
|
||||
// a "when" statement, and constructing the necessary components in support
|
||||
// of lambda-style captures.
|
||||
|
@ -617,7 +639,7 @@ private:
|
|||
|
||||
bool is_return = false;
|
||||
|
||||
// The name of parameter passed ot the lambda.
|
||||
// The name of parameter passed to the lambda.
|
||||
std::string lambda_param_id;
|
||||
|
||||
// The expression for constructing the lambda, and its type.
|
||||
|
|
|
@ -29,6 +29,7 @@ namespace detail
|
|||
class CompositeHash;
|
||||
class Frame;
|
||||
|
||||
class AssertStmt;
|
||||
class CatchReturnStmt;
|
||||
class ExprStmt;
|
||||
class ForStmt;
|
||||
|
@ -94,6 +95,7 @@ public:
|
|||
const WhenStmt* AsWhenStmt() const;
|
||||
const SwitchStmt* AsSwitchStmt() const;
|
||||
const NullStmt* AsNullStmt() const;
|
||||
const AssertStmt* AsAssertStmt() const;
|
||||
|
||||
void RegisterAccess() const
|
||||
{
|
||||
|
|
|
@ -32,8 +32,9 @@ enum StmtTag
|
|||
STMT_CPP, // compiled C++
|
||||
STMT_ZAM, // a ZAM function body
|
||||
STMT_ZAM_RESUMPTION, // resumes ZAM execution for "when" statements
|
||||
STMT_NULL
|
||||
#define NUM_STMTS (int(STMT_NULL) + 1)
|
||||
STMT_NULL,
|
||||
STMT_ASSERT,
|
||||
#define NUM_STMTS (int(STMT_ASSERT) + 1)
|
||||
};
|
||||
|
||||
enum StmtFlowType
|
||||
|
|
|
@ -92,7 +92,7 @@ public:
|
|||
|
||||
void Describe(ODesc* d) const override;
|
||||
|
||||
// Overidden from Notifier. We queue the trigger and evaluate it
|
||||
// Overridden from Notifier. We queue the trigger and evaluate it
|
||||
// later to avoid race conditions.
|
||||
void Modified(zeek::notifier::detail::Modifiable* m) override;
|
||||
|
||||
|
|
37
src/Type.cc
37
src/Type.cc
|
@ -1398,18 +1398,6 @@ void RecordType::AddFieldsDirectly(const type_decl_list& others, bool add_log_at
|
|||
num_fields = types->length();
|
||||
}
|
||||
|
||||
void RecordType::Create(std::vector<std::optional<ZVal>>& r) const
|
||||
{
|
||||
for ( auto& di : deferred_inits )
|
||||
if ( di )
|
||||
r.push_back(di->Generate());
|
||||
else
|
||||
r.push_back(std::nullopt);
|
||||
|
||||
for ( auto& ci : creation_inits )
|
||||
r[ci.first] = ci.second->Generate();
|
||||
}
|
||||
|
||||
void RecordType::DescribeFields(ODesc* d) const
|
||||
{
|
||||
if ( d->IsReadable() )
|
||||
|
@ -2692,31 +2680,6 @@ TypePtr merge_types(const TypePtr& arg_t1, const TypePtr& arg_t2)
|
|||
}
|
||||
}
|
||||
|
||||
TypePtr merge_type_list(detail::ListExpr* elements)
|
||||
{
|
||||
TypeList* tl_type = elements->GetType()->AsTypeList();
|
||||
const auto& tl = tl_type->GetTypes();
|
||||
|
||||
if ( tl.size() < 1 )
|
||||
{
|
||||
reporter->Error("no type can be inferred for empty list");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto t = tl[0];
|
||||
|
||||
if ( tl.size() == 1 )
|
||||
return t;
|
||||
|
||||
for ( size_t i = 1; t && i < tl.size(); ++i )
|
||||
t = merge_types(t, tl[i]);
|
||||
|
||||
if ( ! t )
|
||||
reporter->Error("inconsistent types in list");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
TypePtr maximal_type(detail::ListExpr* elements)
|
||||
{
|
||||
TypeList* tl_type = elements->GetType()->AsTypeList();
|
||||
|
|
15
src/Type.h
15
src/Type.h
|
@ -688,15 +688,6 @@ public:
|
|||
|
||||
void AddFieldsDirectly(const type_decl_list& types, bool add_log_attr = false);
|
||||
|
||||
/**
|
||||
*
|
||||
* Populates a new instance of the record with its initial values.
|
||||
* @param r The record's underlying value vector.
|
||||
*/
|
||||
[[deprecated("Remove in v6.1. Construct a corresponding RecordVal and build vector from "
|
||||
"GetFieldAs() calls.")]] void
|
||||
Create(std::vector<std::optional<ZVal>>& r) const;
|
||||
|
||||
void DescribeReST(ODesc* d, bool roles_only = false) const override;
|
||||
void DescribeFields(ODesc* d) const;
|
||||
void DescribeFieldsReST(ODesc* d, bool func_args) const;
|
||||
|
@ -949,12 +940,6 @@ extern TypeTag max_type(TypeTag t1, TypeTag t2);
|
|||
// an error message) if the types are incompatible.
|
||||
TypePtr merge_types(const TypePtr& t1, const TypePtr& t2);
|
||||
|
||||
// Given a list of expressions, returns a (ref'd) type reflecting
|
||||
// a merged type consistent across all of them, or nil if this
|
||||
// cannot be done.
|
||||
[[deprecated("Remove in v6.1. Use maximal_type() if possible. See GH-2604.")]] TypePtr
|
||||
merge_type_list(detail::ListExpr* elements);
|
||||
|
||||
// Given a list of expressions, returns the maximal type consistent across
|
||||
// all of them, or nil if this cannot be done. "Maximal" incorporates
|
||||
// notions of arithmetic coercion, but otherwise requires type-equivalence.
|
||||
|
|
|
@ -98,5 +98,3 @@ inline UID& UID::operator=(const UID& other)
|
|||
}
|
||||
|
||||
} // namespace zeek
|
||||
|
||||
constexpr int BRO_UID_LEN [[deprecated("Remove in v6.1. Use zeek::UID_LEN")]] = zeek::UID_LEN;
|
||||
|
|
16
src/Val.cc
16
src/Val.cc
|
@ -39,7 +39,7 @@
|
|||
#include "zeek/broker/Data.h"
|
||||
#include "zeek/broker/Manager.h"
|
||||
#include "zeek/broker/Store.h"
|
||||
#include "zeek/threading/formatters/JSON.h"
|
||||
#include "zeek/threading/formatters/detail/json.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -404,8 +404,8 @@ TableValPtr Val::GetRecordFields()
|
|||
|
||||
// This is a static method in this file to avoid including rapidjson's headers in Val.h because
|
||||
// they're huge.
|
||||
static void BuildJSON(threading::formatter::JSON::NullDoubleWriter& writer, Val* val,
|
||||
bool only_loggable = false, RE_Matcher* re = nullptr, const string& key = "")
|
||||
static void BuildJSON(json::detail::NullDoubleWriter& writer, Val* val, bool only_loggable = false,
|
||||
RE_Matcher* re = nullptr, const string& key = "")
|
||||
{
|
||||
if ( ! key.empty() )
|
||||
writer.Key(key);
|
||||
|
@ -509,7 +509,7 @@ static void BuildJSON(threading::formatter::JSON::NullDoubleWriter& writer, Val*
|
|||
else
|
||||
{
|
||||
rapidjson::StringBuffer buffer;
|
||||
threading::formatter::JSON::NullDoubleWriter key_writer(buffer);
|
||||
json::detail::NullDoubleWriter key_writer(buffer);
|
||||
BuildJSON(key_writer, entry_key, only_loggable, re);
|
||||
string key_str = buffer.GetString();
|
||||
|
||||
|
@ -612,7 +612,7 @@ static void BuildJSON(threading::formatter::JSON::NullDoubleWriter& writer, Val*
|
|||
StringValPtr Val::ToJSON(bool only_loggable, RE_Matcher* re)
|
||||
{
|
||||
rapidjson::StringBuffer buffer;
|
||||
threading::formatter::JSON::NullDoubleWriter writer(buffer);
|
||||
json::detail::NullDoubleWriter writer(buffer);
|
||||
|
||||
BuildJSON(writer, this, only_loggable, re, "");
|
||||
|
||||
|
@ -1375,7 +1375,7 @@ static std::variant<ValPtr, std::string> BuildVal(const rapidjson::Value& j, con
|
|||
}
|
||||
|
||||
default:
|
||||
return util::fmt("type '%s' unsupport", type_name(t->Tag()));
|
||||
return util::fmt("type '%s' unsupported", type_name(t->Tag()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2425,7 +2425,7 @@ void TableVal::SendToStore(const Val* index, const TableEntryVal* new_entry_val,
|
|||
{
|
||||
if ( attrs->Find(detail::ATTR_EXPIRE_CREATE) )
|
||||
{
|
||||
// for create expiry, we have to substract the already elapsed time from
|
||||
// for create expiry, we have to subtract the already elapsed time from
|
||||
// the expiry.
|
||||
auto e = expire_time -
|
||||
(run_state::network_time - new_entry_val->ExpireAccessTime());
|
||||
|
@ -2720,7 +2720,7 @@ void TableVal::Describe(ODesc* d) const
|
|||
|
||||
void TableVal::InitDefaultFunc(detail::Frame* f)
|
||||
{
|
||||
// Value aready initialized.
|
||||
// Value already initialized.
|
||||
if ( def_val )
|
||||
return;
|
||||
|
||||
|
|
13
src/Var.cc
13
src/Var.cc
|
@ -195,18 +195,7 @@ static void make_var(const IDPtr& id, TypePtr t, InitClass c, ExprPtr init,
|
|||
{
|
||||
// This can happen because the grammar allows any "init_class",
|
||||
// including none, to be followed by an expression.
|
||||
// Remove in v6.1 (make an error)
|
||||
reporter->Deprecation(
|
||||
util::fmt("Remove in v6.1. Initialization not preceded by =/+=/-= is deprecated. (%s)",
|
||||
obj_desc_short(init.get()).c_str()),
|
||||
init->GetLocationInfo());
|
||||
|
||||
// The historical instances of these, such as the
|
||||
// language/redef-same-prefixtable-idx.zeek btest, treat
|
||||
// this as += rather than =, and with the initializer
|
||||
// implicitly inside a list.
|
||||
init = make_intrusive<ListExpr>(init);
|
||||
c = INIT_EXTRA;
|
||||
init->Error("Initialization not preceded by =/+=/-= is not allowed.");
|
||||
}
|
||||
|
||||
if ( init && init->Tag() == EXPR_LIST )
|
||||
|
|
|
@ -52,4 +52,10 @@ VectorValPtr MakeCallArgumentVector(const Args& vals, const RecordTypePtr& types
|
|||
return rval;
|
||||
}
|
||||
|
||||
VectorValPtr MakeEmptyCallArgumentVector()
|
||||
{
|
||||
static auto call_argument_vector = id::find_type<VectorType>("call_argument_vector");
|
||||
return make_intrusive<VectorVal>(call_argument_vector);
|
||||
}
|
||||
|
||||
} // namespace zeek
|
||||
|
|
|
@ -39,4 +39,11 @@ Args val_list_to_args(const ValPList& vl);
|
|||
*/
|
||||
VectorValPtr MakeCallArgumentVector(const Args& vals, const RecordTypePtr& types);
|
||||
|
||||
/**
|
||||
* Creates an empty "call_argument_vector" vector.
|
||||
*
|
||||
* @return empty vector of script-level type "call_argument_vector"
|
||||
*/
|
||||
VectorValPtr MakeEmptyCallArgumentVector();
|
||||
|
||||
} // namespace zeek
|
||||
|
|
|
@ -17,3 +17,7 @@ set(ZEEK_CMAKE_INSTALL_PREFIX "@CMAKE_INSTALL_PREFIX@"
|
|||
# the package directory.
|
||||
set(ZEEK_PLUGIN_SCRIPTS_PATH "${ZEEK_CMAKE_CONFIG_DIR}"
|
||||
CACHE PATH "Path to utility scripts for building Zeek plugins." FORCE)
|
||||
|
||||
# The CMAKE_BUILD_TYPE type to use for external plugins if not overridden.
|
||||
set(ZEEK_CMAKE_BUILD_TYPE "@CMAKE_BUILD_TYPE@"
|
||||
CACHE PATH "Internal Zeek variable: CMAKE_BUILD_TYPE of Zeek." FORCE)
|
||||
|
|
|
@ -104,9 +104,6 @@ public:
|
|||
static constexpr int ZEEK_STRING_LITERAL = // as in a Zeek string literal
|
||||
ESC_ESC | ESC_QUOT | ESC_HEX;
|
||||
|
||||
static constexpr int BRO_STRING_LITERAL
|
||||
[[deprecated("Remove in v6.1. Use ZEEK_STRING_LITERAL.")]] = ZEEK_STRING_LITERAL;
|
||||
|
||||
// Renders a string into a newly allocated character array that
|
||||
// you have to delete[]. You can combine the render styles given
|
||||
// above to achieve the representation you desire. If you pass a
|
||||
|
|
|
@ -696,11 +696,6 @@ void Analyzer::EnqueueAnalyzerConfirmationInfo(const zeek::Tag& arg_tag)
|
|||
event_mgr.Enqueue(analyzer_confirmation_info, arg_tag.AsVal(), info);
|
||||
}
|
||||
|
||||
void Analyzer::EnqueueAnalyzerConfirmation(const zeek::Tag& arg_tag)
|
||||
{
|
||||
event_mgr.Enqueue(analyzer_confirmation, ConnVal(), arg_tag.AsVal(), val_mgr->Count(id));
|
||||
}
|
||||
|
||||
void Analyzer::AnalyzerConfirmation(zeek::Tag arg_tag)
|
||||
{
|
||||
if ( analyzer_confirmed )
|
||||
|
@ -712,9 +707,6 @@ void Analyzer::AnalyzerConfirmation(zeek::Tag arg_tag)
|
|||
|
||||
if ( analyzer_confirmation_info )
|
||||
EnqueueAnalyzerConfirmationInfo(effective_tag);
|
||||
|
||||
if ( analyzer_confirmation )
|
||||
EnqueueAnalyzerConfirmation(effective_tag);
|
||||
}
|
||||
|
||||
void Analyzer::EnqueueAnalyzerViolationInfo(const char* reason, const char* data, int len,
|
||||
|
@ -736,25 +728,6 @@ void Analyzer::EnqueueAnalyzerViolationInfo(const char* reason, const char* data
|
|||
event_mgr.Enqueue(analyzer_violation_info, arg_tag.AsVal(), info);
|
||||
}
|
||||
|
||||
void Analyzer::EnqueueAnalyzerViolation(const char* reason, const char* data, int len,
|
||||
const zeek::Tag& arg_tag)
|
||||
{
|
||||
StringValPtr r;
|
||||
|
||||
if ( data && len )
|
||||
{
|
||||
const char* tmp = util::copy_string(reason);
|
||||
r = make_intrusive<StringVal>(util::fmt(
|
||||
"%s [%s%s]", tmp, util::fmt_bytes(data, min(40, len)), len > 40 ? "..." : ""));
|
||||
delete[] tmp;
|
||||
}
|
||||
else
|
||||
r = make_intrusive<StringVal>(reason);
|
||||
|
||||
event_mgr.Enqueue(analyzer_violation, ConnVal(), arg_tag.AsVal(), val_mgr->Count(id),
|
||||
std::move(r));
|
||||
}
|
||||
|
||||
void Analyzer::AnalyzerViolation(const char* reason, const char* data, int len, zeek::Tag arg_tag)
|
||||
{
|
||||
const auto& effective_tag = arg_tag ? arg_tag : tag;
|
||||
|
@ -771,9 +744,6 @@ void Analyzer::AnalyzerViolation(const char* reason, const char* data, int len,
|
|||
|
||||
if ( analyzer_violation_info )
|
||||
EnqueueAnalyzerViolationInfo(reason, data, len, effective_tag);
|
||||
|
||||
if ( analyzer_violation )
|
||||
EnqueueAnalyzerViolation(reason, data, len, effective_tag);
|
||||
}
|
||||
|
||||
void Analyzer::AddTimer(analyzer_timer_func timer, double t, bool do_expire,
|
||||
|
|
|
@ -466,7 +466,7 @@ public:
|
|||
Analyzer* GetChildAnalyzer(const zeek::Tag& tag) const;
|
||||
|
||||
/**
|
||||
* Recursively searches all (direct or indirect) childs of the
|
||||
* Recursively searches all (direct or indirect) children of the
|
||||
* analyzer for an analyzer with a specific ID.
|
||||
*
|
||||
* @param id The analyzer id to search. This is the ID that GetID()
|
||||
|
@ -477,7 +477,7 @@ public:
|
|||
virtual Analyzer* FindChild(ID id);
|
||||
|
||||
/**
|
||||
* Recursively searches all (direct or indirect) childs of the
|
||||
* Recursively searches all (direct or indirect) children of the
|
||||
* analyzer for an analyzer of a given type.
|
||||
*
|
||||
* @param tag The analyzer type to search.
|
||||
|
@ -488,7 +488,7 @@ public:
|
|||
virtual Analyzer* FindChild(zeek::Tag tag);
|
||||
|
||||
/**
|
||||
* Recursively searches all (direct or indirect) childs of the
|
||||
* Recursively searches all (direct or indirect) children of the
|
||||
* analyzer for an analyzer of a given type.
|
||||
*
|
||||
* @param name The name of the analyzer type to search (e.g.,
|
||||
|
@ -737,17 +737,10 @@ private:
|
|||
// Internal helper to raise analyzer_confirmation events
|
||||
void EnqueueAnalyzerConfirmationInfo(const zeek::Tag& arg_tag);
|
||||
|
||||
// Remove in v6.1 - internal helper to raise analyzer_confirmation
|
||||
void EnqueueAnalyzerConfirmation(const zeek::Tag& arg_tag);
|
||||
|
||||
// Internal helper to raise analyzer_violation_info
|
||||
void EnqueueAnalyzerViolationInfo(const char* reason, const char* data, int len,
|
||||
const zeek::Tag& arg_tag);
|
||||
|
||||
// Remove in v6.1 - internal helper to raise analyzer_violation
|
||||
void EnqueueAnalyzerViolation(const char* reason, const char* data, int len,
|
||||
const zeek::Tag& arg_tag);
|
||||
|
||||
zeek::Tag tag;
|
||||
ID id;
|
||||
|
||||
|
|
|
@ -297,7 +297,7 @@ public:
|
|||
* @param init True if the newly added analyzers should be
|
||||
* immediately initialized.
|
||||
*
|
||||
* @param root If given, the scheduled analyzers will become childs
|
||||
* @param root If given, the scheduled analyzers will become children
|
||||
* of this; if not given the connection's root analyzer is used
|
||||
* instead.
|
||||
*
|
||||
|
|
|
@ -66,13 +66,13 @@
|
|||
// (excluding CRC fields) in the current DNP3 packet.
|
||||
//
|
||||
// Since "Len" is of size one byte, the largest length it can represent is
|
||||
// 255 bytes. The larget DNP3 Application Layer size is "255 - 5 + size of
|
||||
// 255 bytes. The largest DNP3 Application Layer size is "255 - 5 + size of
|
||||
// all CRC fields". "minus 5" is coming from the 5 bytes after "Len" field in
|
||||
// the DNP3 Link Layer, i.e. Ctrl Dest_LSB Dest_MSB Src_LSB Src_MSB Hence,
|
||||
// the largest size of a DNP3 Packet (DNP3 Data Link Layer : DNP3 Transport
|
||||
// Layer : DNP3 Application Layer) can only be 292 bytes.
|
||||
//
|
||||
// The "Len" field indicates the length of of a single chunk of DNP3 Psuedo
|
||||
// The "Len" field indicates the length of of a single chunk of DNP3 Pseudo
|
||||
// Application Layer data instead of the whole DNP3 Application Layer
|
||||
// Fragment. However, we can not know the whole length of the DNP3
|
||||
// Application Layer Fragment (which Binpac would normally need) until all
|
||||
|
|
|
@ -775,7 +775,7 @@ type FrozenCounter16woFlag = record {
|
|||
|
||||
# g21v11 and g21v12 are obsolete
|
||||
|
||||
# Conter event g22
|
||||
# Counter event g22
|
||||
|
||||
# g22v1
|
||||
type CounterEve32wFlag = record {
|
||||
|
@ -807,7 +807,7 @@ type CounterEve16wFlagTime = record {
|
|||
|
||||
# g22v7 g22v8 obsolete
|
||||
|
||||
# Conter event g23
|
||||
# Counter event g23
|
||||
|
||||
# g23v1
|
||||
type FrozenCounterEve32wFlag = record {
|
||||
|
|
|
@ -72,7 +72,7 @@ enum RR_Type
|
|||
TYPE_NSEC3 = 50,
|
||||
TYPE_NSEC3PARAM = 51, ///< Contains the NSEC3 parameters (RFC 5155)
|
||||
TYPE_SVCB =
|
||||
64, ///< SerViCe Binding (RFC draft:
|
||||
64, ///< Service Binding (RFC draft:
|
||||
///< https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-07#section-1.1)
|
||||
TYPE_HTTPS = 65, ///< HTTPS record (HTTPS specific SVCB resource record)
|
||||
// Obsoleted
|
||||
|
|
|
@ -335,7 +335,6 @@ event dns_WKS_reply%(c: connection, msg: dns_msg, ans: dns_answer%);
|
|||
## dns_max_queries dns_session_timeout dns_skip_addl
|
||||
## dns_skip_all_addl dns_skip_all_auth dns_skip_auth
|
||||
event dns_HINFO_reply%(c: connection, msg: dns_msg, ans: dns_answer, cpu: string, os: string%);
|
||||
event dns_HINFO_reply%(c: connection, msg: dns_msg, ans: dns_answer%) &deprecated="Remove in v5.2. Use the definition with the extra parameters for cpu and os.";
|
||||
|
||||
## Generated for DNS replies of type *MX*. For replies with multiple answers, an
|
||||
## individual event of the corresponding type is raised for each.
|
||||
|
|
|
@ -1,6 +1,16 @@
|
|||
|
||||
type GSSAPI_NEG_TOKEN(is_orig: bool) = record {
|
||||
type GSSAPI_SELECT(is_orig: bool) = record {
|
||||
wrapper : ASN1EncodingMeta;
|
||||
token: case tok_id of {
|
||||
0x0404 -> mic_blob: bytestring &restofdata;
|
||||
0x0504 -> wrap_blob: bytestring &restofdata;
|
||||
default -> neg_token: GSSAPI_NEG_TOKEN(is_orig, is_init);
|
||||
} &requires(is_init) &requires(tok_id);
|
||||
} &let {
|
||||
is_init: bool = wrapper.tag == 0x60;
|
||||
tok_id: uint32 = (wrapper.tag << 8) | wrapper.len;
|
||||
} &byteorder=littleendian;
|
||||
|
||||
type GSSAPI_NEG_TOKEN(is_orig: bool, is_init: bool) = record {
|
||||
have_oid : case is_init of {
|
||||
true -> oid : ASN1Encoding;
|
||||
false -> no_oid : empty;
|
||||
|
@ -13,8 +23,6 @@ type GSSAPI_NEG_TOKEN(is_orig: bool) = record {
|
|||
true -> init : GSSAPI_NEG_TOKEN_INIT;
|
||||
false -> resp : GSSAPI_NEG_TOKEN_RESP;
|
||||
};
|
||||
} &let {
|
||||
is_init: bool = wrapper.tag == 0x60;
|
||||
} &byteorder=littleendian;
|
||||
|
||||
type GSSAPI_NEG_TOKEN_INIT = record {
|
||||
|
|
|
@ -23,7 +23,7 @@ connection GSSAPI_Conn(zeek_analyzer: ZeekAnalyzer) {
|
|||
|
||||
# Now we define the flow:
|
||||
flow GSSAPI_Flow(is_orig: bool) {
|
||||
datagram = GSSAPI_NEG_TOKEN(is_orig) withcontext(connection, this);
|
||||
datagram = GSSAPI_SELECT(is_orig) withcontext(connection, this);
|
||||
};
|
||||
|
||||
%include gssapi-analyzer.pac
|
||||
|
|
|
@ -1293,7 +1293,7 @@ int HTTP_Analyzer::HTTP_RequestLine(const char* line, const char* end_of_line)
|
|||
|
||||
// If we determined HTTP/0.9 (no HTTP/ in the request line), assert that
|
||||
// minimally we have an URI and a 3 character method (HTTP 0.9 only
|
||||
// supports GET). If that doesn't hold, probably not HTTP or very stange.
|
||||
// supports GET). If that doesn't hold, probably not HTTP or very strange.
|
||||
if ( request_version == HTTP_VersionNumber{0, 9} )
|
||||
{
|
||||
bool maybe_get_method = (end_of_method - line) >= 3;
|
||||
|
|
|
@ -1079,7 +1079,7 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig)
|
|||
|
||||
void IRC_Analyzer::StartTLS()
|
||||
{
|
||||
// STARTTLS was succesful. Remove support analyzers, add SSL
|
||||
// STARTTLS was successful. Remove support analyzers, add SSL
|
||||
// analyzer, and throw event signifying the change.
|
||||
starttls = true;
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ private:
|
|||
* \brief Splits a string into its words which are separated by
|
||||
* the split character.
|
||||
*
|
||||
* \param input string which will be splitted
|
||||
* \param input string which will be split
|
||||
* \param split character which separates the words
|
||||
* \return vector containing words
|
||||
*/
|
||||
|
|
|
@ -48,7 +48,7 @@ type LengthEncodedStringArg(first_byte: uint8) = record {
|
|||
public:
|
||||
int operator()(uint24le* num) const
|
||||
{
|
||||
// Convert 24bit little endian int parsed as 3 uint8 into host endianess.
|
||||
// Convert 24bit little endian int parsed as 3 uint8 into host endianness.
|
||||
return (num->byte1() << 16) | (num->byte2() << 8) | num->byte3();
|
||||
}
|
||||
|
||||
|
|
|
@ -806,7 +806,7 @@ void POP3_Analyzer::ProcessReply(int length, const char* line)
|
|||
|
||||
void POP3_Analyzer::StartTLS()
|
||||
{
|
||||
// STARTTLS was succesful. Remove support analyzers, add SSL
|
||||
// STARTTLS was successful. Remove support analyzers, add SSL
|
||||
// analyzer, and throw event signifying the change.
|
||||
RemoveSupportAnalyzer(cl_orig);
|
||||
RemoveSupportAnalyzer(cl_resp);
|
||||
|
|
|
@ -9,7 +9,7 @@ event rfb_authentication_type%(c: connection, authtype: count%);
|
|||
##
|
||||
## c: The connection record for the underlying transport-layer session/flow.
|
||||
##
|
||||
## result: whether or not authentication was succesful
|
||||
## result: whether or not authentication was successful
|
||||
event rfb_auth_result%(c: connection, result: bool%);
|
||||
|
||||
## Generated for RFB event share flag messages
|
||||
|
|
|
@ -224,8 +224,8 @@ type PixelData(encoding: int32, x: uint16, y: uint16, w: uint16, h: uint16) = ca
|
|||
15 -> trle: PD_TRLE;
|
||||
16 -> zrle: PD_ZRLE;
|
||||
# TODO: binpac is not happy with negative values here
|
||||
#-239 -> cursor_pseudo: PD_PsuedoCursor;
|
||||
#-223 -> desktop_size: PD_PsuedoDesktopSize;
|
||||
#-239 -> cursor_pseudo: PD_PseudoCursor;
|
||||
#-223 -> desktop_size: PD_PseudoDesktopSize;
|
||||
};
|
||||
|
||||
type PD_Raw(w: uint16, h: uint16) = record {
|
||||
|
@ -266,12 +266,12 @@ type PD_ZRLE = record {
|
|||
zlib_data: bytestring &length=len &transient;
|
||||
} &length=(4 + len);
|
||||
|
||||
type PD_PsuedoCursor(w: uint16, h: uint16) = record {
|
||||
type PD_PseudoCursor(w: uint16, h: uint16) = record {
|
||||
pixels: bytestring &length=(w * h * $context.connection.get_bytes_per_pixel()) &transient;
|
||||
bitmask: bytestring &length=(h * ((w + 7) / 8)) &transient;
|
||||
} &length=(w * h * $context.connection.get_bytes_per_pixel()) + (h * ((w + 7) / 8))
|
||||
|
||||
type PD_PsuedoDesktopSize = record {
|
||||
type PD_PseudoDesktopSize = record {
|
||||
# Actually no further data
|
||||
nothing: empty;
|
||||
} &length=0;
|
||||
|
|
|
@ -384,7 +384,7 @@ void SMTP_Analyzer::NewCmd(int cmd_code)
|
|||
|
||||
void SMTP_Analyzer::StartTLS()
|
||||
{
|
||||
// STARTTLS was succesful. Remove SMTP support analyzers, add SSL
|
||||
// STARTTLS was successful. Remove SMTP support analyzers, add SSL
|
||||
// analyzer, and throw event signifying the change.
|
||||
state = detail::SMTP_IN_TLS;
|
||||
expect_sender = expect_recver = true;
|
||||
|
|
|
@ -195,9 +195,7 @@ zeek::RecordValPtr build_bulk_pdu(const GetBulkRequestPDU* pdu)
|
|||
rv->Assign(0, asn1_integer_to_val(pdu->request_id(), zeek::TYPE_INT));
|
||||
rv->Assign(1, asn1_integer_to_val(pdu->non_repeaters(), zeek::TYPE_COUNT));
|
||||
rv->Assign(2, asn1_integer_to_val(pdu->max_repetitions(), zeek::TYPE_COUNT));
|
||||
// Remove in v6.1: Misspelled repititions/repetitions backwards compat
|
||||
rv->Assign(3, asn1_integer_to_val(pdu->max_repetitions(), zeek::TYPE_COUNT));
|
||||
rv->Assign(4, build_bindings(pdu->var_bindings()));
|
||||
rv->Assign(3, build_bindings(pdu->var_bindings()));
|
||||
return rv;
|
||||
}
|
||||
%}
|
||||
|
|
|
@ -33,8 +33,8 @@ refine connection SSL_Conn += {
|
|||
|
||||
for ( unsigned int i = 0; i < cipher_suites.size(); ++i )
|
||||
{
|
||||
auto ciph = zeek::val_mgr->Count(cipher_suites[i]);
|
||||
cipher_vec->Assign(i, ciph);
|
||||
auto cipher = zeek::val_mgr->Count(cipher_suites[i]);
|
||||
cipher_vec->Assign(i, std::move(cipher));
|
||||
}
|
||||
|
||||
auto comp_vec = zeek::make_intrusive<zeek::VectorVal>(zeek::id::index_vec);
|
||||
|
|
|
@ -91,10 +91,10 @@ type V2ServerHello(rec: SSLRecord) = record {
|
|||
#cert_type : uint8;
|
||||
server_version : uint16;
|
||||
cert_len : uint16;
|
||||
ciph_len : uint16;
|
||||
ciphers_len : uint16;
|
||||
conn_id_len : uint16;
|
||||
cert_data : bytestring &length = cert_len;
|
||||
ciphers : uint24[ciph_len/3];
|
||||
ciphers : uint24[ciphers_len/3];
|
||||
conn_id_data : bytestring &length = conn_id_len;
|
||||
} &let {
|
||||
session_id_hit : uint8 = rec.head3;
|
||||
|
|
|
@ -56,8 +56,8 @@ refine connection Handshake_Conn += {
|
|||
|
||||
for ( unsigned int i = 0; i < cipher_suites.size(); ++i )
|
||||
{
|
||||
auto ciph = zeek::val_mgr->Count(cipher_suites[i]);
|
||||
cipher_vec->Assign(i, ciph);
|
||||
auto cipher = zeek::val_mgr->Count(cipher_suites[i]);
|
||||
cipher_vec->Assign(i, std::move(cipher));
|
||||
}
|
||||
|
||||
auto comp_vec = zeek::make_intrusive<zeek::VectorVal>(zeek::id::index_vec);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue