Merge branch 'master' into topic/jsiwek/supervisor

This commit is contained in:
Jon Siwek 2020-01-07 14:55:51 -08:00
commit 6046da9993
314 changed files with 3709 additions and 5389 deletions

123
.cirrus.yml Normal file
View file

@ -0,0 +1,123 @@
cpus: &CPUS 8
btest_jobs: &BTEST_JOBS 8
memory: &MEMORY 8GB
config: &CONFIG --build-type=release --enable-cpp-tests
memcheck_config: &MEMCHECK_CONFIG --build-type=debug --enable-cpp-tests --sanitizers=address
resources_template: &RESOURCES_TEMPLATE
cpu: *CPUS
memory: *MEMORY
ci_template: &CI_TEMPLATE
only_if: >
$CIRRUS_PR != '' ||
$CIRRUS_BRANCH == 'master' ||
$CIRRUS_BRANCH =~ 'release/.*'
# Default timeout is 60 minutes, Cirrus hard limit is 120 minutes for free
# tasks, so may as well ask for full time.
timeout_in: 120m
sync_submodules_script: git submodule update --recursive --init
get_external_pcaps_cache:
folder: testing/external/zeek-testing-traces
fingerprint_script: echo zeek-testing-traces
populate_script: ./ci/init-external-repos.sh
init_external_repos_script: ./ci/init-external-repos.sh
build_script: ./ci/build.sh
test_script: ./ci/test.sh
on_failure:
upload_btest_tmp_dir_artifacts:
path: "testing/**/tmp.tar.gz"
always:
upload_btest_xml_results_artifacts:
path: "testing/**/btest-results.xml"
type: text/xml
format: junit
upload_btest_html_results_artifacts:
path: "testing/**/btest-results.html"
type: text/html
env:
CIRRUS_WORKING_DIR: /zeek
ZEEK_CI_CPUS: *CPUS
ZEEK_CI_BTEST_JOBS: *BTEST_JOBS
ZEEK_CI_CONFIGURE_FLAGS: *CONFIG
# This is a single-purpose, read-only GitHub deploy key (SSH private key) for
# the zeek-testing-private repository.
ZEEK_TESTING_PRIVATE_SSH_KEY: ENCRYPTED[6631d7bf11e6553c531222953fb6de4d4a48a86a5dbc21a97604d5ca1791845718c985d9086f125cead6908e1b5f2b23]
# Linux EOL timelines: https://linuxlifecycle.com/
# Fedora (~13 months): https://fedoraproject.org/wiki/Fedora_Release_Life_Cycle
fedora30_task:
container:
# Fedora 30 EOL: Around June 2020
dockerfile: ci/fedora-30/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
centos7_task:
container:
# CentOS 7 EOL: June 30, 2024
dockerfile: ci/centos-7/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
debian9_task:
container:
# Debian 9 EOL: June 2022
dockerfile: ci/debian-9/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
ubuntu18_task:
container:
# Ubuntu 18.04 EOL: April 2023
dockerfile: ci/ubuntu-18.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
ubuntu16_task:
container:
# Ubuntu 16.04 EOL: April 2021
dockerfile: ci/ubuntu-16.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
# Apple doesn't publish official long-term support timelines, so easiest
# option is to only support the latest macOS release or whatever latest
# image is available.
macos_task:
osx_instance:
image: mojave-base
prepare_script: ./ci/macos/prepare.sh
<< : *CI_TEMPLATE
env:
# Currently only available as 2 core / 4 thread (and 8GB) instances.
ZEEK_CI_CPUS: 4
ZEEK_CI_BTEST_JOBS: 4
# No permission to write to default location of /zeek
CIRRUS_WORKING_DIR: /tmp/zeek
# FreeBSD EOL timelines: https://www.freebsd.org/security/security.html#sup
freebsd_task:
freebsd_instance:
# FreeBSD 12 EOL: June 30, 2024
image_family: freebsd-12-1
<< : *RESOURCES_TEMPLATE
prepare_script: ./ci/freebsd/prepare.sh
<< : *CI_TEMPLATE
memcheck_task:
container:
# Just uses a recent/common distro to run memory error/leak checks.
dockerfile: ci/ubuntu-18.04/Dockerfile
<< : *RESOURCES_TEMPLATE
<< : *CI_TEMPLATE
env:
ZEEK_CI_CONFIGURE_FLAGS: *MEMCHECK_CONFIG

20
.github/workflows/ci-notification.yml vendored Normal file
View file

@ -0,0 +1,20 @@
name: CI Email Notification
on:
check_suite:
types: [completed]
jobs:
notify:
runs-on: ubuntu-latest
steps:
- name: Send CI Email Notification
uses: jsiwek/ci-email-action@master
env:
CI_APP_NAME: "Cirrus CI"
SMTP_HOST: ${{ secrets.SMTP_HOST }}
SMTP_PORT: ${{ secrets.SMTP_PORT }}
SMTP_USER: ${{ secrets.SMTP_USER }}
SMTP_PASS: ${{ secrets.SMTP_PASS }}
MAIL_FROM: ${{ secrets.MAIL_FROM }}
MAIL_TO: ${{ secrets.MAIL_TO }}
MAIL_REPLY_TO: ${{ secrets.MAIL_REPLY_TO }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -31,8 +31,8 @@ env:
- distro: ubuntu_18.04 - distro: ubuntu_18.04
- distro: ubuntu_18.04_leaktest - distro: ubuntu_18.04_leaktest
install: sh testing/scripts/travis-job install $distro install: sh ci/travis-job install $distro
before_script: sh testing/scripts/travis-job build $distro before_script: sh ci/travis-job build $distro
script: sh testing/scripts/travis-job run $distro script: sh ci/travis-job run $distro

280
CHANGES
View file

@ -1,4 +1,284 @@
3.1.0-dev.359 | 2020-01-06 16:40:55 -0800
* Fix build warning in X509 plugin (Tim Wojtulewicz, Corelight)
3.1.0-dev.357 | 2020-01-06 14:10:54 -0800
* GH-412: Enable Patterns as Table index (Dev Bali, Corelight)
3.1.0-dev.354 | 2020-01-06 14:16:29 -0700
* Fix snprintf compiler warning in hexdump BIF (Jon Siwek, Corelight)
* Updating submodule(s).
[nomail] (Jon Siwek, Corelight)
3.1.0-dev.351 | 2020-01-06 11:20:09 -0800
* Increase a timeout for a btest (Jon Siwek, Corelight)
3.1.0-dev.350 | 2020-01-06 10:51:07 -0800
* Fix Cirrus CI FreeBSD python dependencies installation (Jon Siwek, Corelight)
3.1.0-dev.348 | 2020-01-06 11:19:36 -0700
* Increase timeout for a btest
May be more timing-sensitive with slower ASAN/LSAN configuration in
addition to already-slower CI environment. (Jon Siwek, Corelight)
* Switch CI scripts to use ASAN/LSAN instead of gperftools
Using AddressSanitizer/LeakSanitizer is better. It covers the full
unit/baseline test suite by default without requiring one to write
specific memory leak tests. It also covers other types of memory errors
besides just leaks. (Jon Siwek, Corelight)
* Remove redundant memory leak btests
Or otherwise convert into a regular btest if it didn't already seem to
be covered.
There's no need for a separate memory leak test group since compiling
with LeakSanitizer now covers leak checking for the full btest suite. (Jon Siwek, Corelight)
* Fix reference counting issues related to lambdas/closures
For example, circular references between a lambda function the frame
it's stored within and/or its closure could cause memory leaks.
This also fixes other various reference-count ownership issues that
could lead to memory errors.
There may still be some potential/undiscovered issues because the "outer
ID" finding logic doesn't look quite right as the AST traversal descends
within nested lambdas and considers their locals as "outer", but
possibly the other logic for locating values in closures or cloning
closures just works around that behavior. (Jon Siwek, Corelight)
* Disable LeakSanitizer for btests that have known leaks
E.g. ones that throw interpreter exceptions, as those are currently
known to potentially cause leaks. Fixing the underlying leaks involves
the larger task of more IntrusivePtr usage.
Reference cycles may also cause leaks. (Jon Siwek, Corelight)
* Rewrite the btest for when-statement timeouts
To avoid a memory leak in DNS lookups that's hard to work around and
does not otherwise effect typical operation when Zeek is allowed to
continue to run after zeek_init(). (Jon Siwek, Corelight)
* Fix memory leak when a logging plugin hook prevents a write (Jon Siwek, Corelight)
* Fix memory leaks in various input framework error-handling cases (Jon Siwek, Corelight)
* Fix memory leak in Reporter::get_weird_sampling_whitelist() BIF (Jon Siwek, Corelight)
* Fix reference counting of Log::Filter "config" field
Which can potentially be a minor memory leak if there's a lot of dynamic
adding/removing of logging filters. (Jon Siwek, Corelight)
* Fix memory leak in system_env() BIF (Jon Siwek, Corelight)
* Clean up triggers awaiting global state modification at shutdown
Otherwise they can be reported as memory leaks since no more global
state modifications will take place to notify the trigger to clean
itself up. (Jon Siwek, Corelight)
* Fix memory leak in initializing log writers with no local backend (Jon Siwek, Corelight)
* Fix packet filter memory leaks (Jon Siwek, Corelight)
* Skip sending thread heartbeat if it alread asked to be finished
Otherwise the heartbeat message may fail to be processed and show up as
leaked memory. (Jon Siwek, Corelight)
* Fix memory leak of sqlite input reader prepared statement (Jon Siwek, Corelight)
* Prevent duplicate "finish" threading messages
As they don't get processed and may show up as a memory leak. (Jon Siwek, Corelight)
* Fix memory leak when table-based input stream overwrites old entries (Jon Siwek, Corelight)
* Fix scripting error in a DHCP btest (Jon Siwek, Corelight)
* Fix memory leaks in Kerberos ticket decryption
Memory allocated to the decrypted ticket data as well as the server
principal were not freed.
Also fixed potential leaks in error cases that called
krb5_get_error_message() without freeing the returned value. (Jon Siwek, Corelight)
* Fix scripting error in an ftp btest (Jon Siwek, Corelight)
* Update paraglob submodule (Jon Siwek, Corelight)
* Fix malloc/delete mismatch in JSON formatting
ODesc allocated with malloc() and BroString deallocated with delete[],
but really the intermediate BroString wasn't even needed when copying
into std::string. (Jon Siwek, Corelight)
* Delete/timeout pending DNS requests during shutdown
Primarily, this change prevents the pending requests showing up as
memory leaks. (Jon Siwek, Corelight)
* Fix memory leak in OCSP parsing functions
Various OCSP parsing functions used in presence of OpenSSL 1.1 used
"d2i_ASN1_SEQUENCE_ANY" which returns a "STACK_OF(ASN1_TYPE)", but used
"sk_ASN1_TYPE_free" instead of "sk_ASN1_TYPE_pop_free" to free it. The
former only frees the stack structure while the later frees both the
structure and the elements. (Jon Siwek, Corelight)
* Free the global X509 certificate root store on shutdown
Otherwise LeakSanitizer reports its contents as leaked. (Jon Siwek, Corelight)
* Add general LeakSanitizer macros/instrumentation (Jon Siwek, Corelight)
* Improve --sanitizers configure option
* Rename SANITIZERS CMake variable to ZEEK_SANITIZERS for clarity
* Use -O1 by default to improve speed (set NO_OPTIMIZATIONS env. var.
to override and use -O0). Uses -fno-optimize-sibling-calls with -O1
to still get "perfect stack traces".
* Updates various sub-projects with sanitizer improvements:
binpac and bifcl, by default, now ignore leaks reported by LeakSanitizer
so that it doesn't interfere with the Zeek build (Jon Siwek, Corelight)
3.1.0-dev.319 | 2020-01-06 09:44:11 -0800
* Mark safe_snprintf and safe_vsnprintf as deprecated, remove uses of them (Tim Wojtulewicz, Corelight)
* Add unit tests to util.cc and module_util.cc (Tim Wojtulewicz, Corelight)
3.1.0-dev.314 | 2019-12-18 13:36:07 -0800
* Add GitHub Action for CI notification emails (Jon Siwek, Corelight)
3.1.0-dev.313 | 2019-12-18 13:23:51 -0800
* Add Cirrus CI config (Jon Siwek, Corelight)
3.1.0-dev.309 | 2019-12-16 09:40:01 -0800
* GHI-599: avoid memory leak with default pattern matching and json formatting (Tim Wojtulewicz, Corelight)
3.1.0-dev.307 | 2019-12-16 08:20:58 -0800
* Update URL for Malware Hash Registry website (Jon Siwek, Corelight)
3.1.0-dev.300 | 2019-12-05 12:34:41 -0700
* GH-700: add zeek_args() BIF
Provides access to all zeek command-line arguments ("argv"). (Jon Siwek, Corelight)
3.1.0-dev.297 | 2019-12-05 11:57:12 -0700
* GH-700: add packet_sources() BIF
Provides access to properties of packet sources, like interface or pcap
file name. (Jon Siwek, Corelight)
3.1.0-dev.295 | 2019-12-04 14:43:27 -0700
* Use new Zeek Logo instead of Bro Eyes on README.md (Dev Bali, Corelight)
3.1.0-dev.292 | 2019-12-02 13:37:19 -0800
* GH-619: Allow "print" statements to be redirected to a Log (Dev Bali, Corelight)
3.1.0-dev.286 | 2019-11-21 08:47:32 -0800
* GH-684: Fix parsing of RPC calls with non-AUTH_UNIX flavors
The parsing logic that should be specific to the AUTH_UNIX credential
flavor was previously applied unconditionally to other flavors. (Jon Siwek, Corelight)
3.1.0-dev.284 | 2019-11-21 08:29:36 -0800
* Fix ZEEK_PROFILER_FILE file format/parsing
Some Zeek script statement descriptions were exceeding the hardcoded
maximum length and also could contain tab characters which were
supposed to be reserved for use as a delimiter in the file format. (Jon Siwek, Corelight)
3.1.0-dev.282 | 2019-11-18 12:06:13 +0000
* GH-646: Add new "successful_connection_remove" event. (Jon Siwek, Corelight)
Includes:
- Switch Zeek's base scripts over to using it in place of
"connection_state_remove". The difference between the two is
that "connection_state_remove" is raised for all events
while "successful_connection_remove" excludes TCP
connections that were never established (just SYN packets).
There can be performance benefits to this change for some
use-cases.
- Add new event called ``connection_successful`` and a new
``connection`` record field named "successful" to help
indicate this new property of connections.
3.1.0-dev.280 | 2019-11-15 18:27:27 -0800
* Run doctest unit tests in Travis CI (Jon Siwek, Corelight)
* Fix indents/whitespace in Travis CI script (Jon Siwek, Corelight)
* Adjust btests for OpenBSD portability (Jon Siwek, Corelight)
* Convert pcapng test suite files to pcap format for OpenBSD compat (Jon Siwek, Corelight)
* Fix undefined symbols loading libbroker on OpenBSD (Jon Siwek, Corelight)
* Fix compile warnings on OpenBSD (Jon Siwek, Corelight)
3.1.0-dev.271 | 2019-11-14 19:16:31 -0800
* Add initial scaffold for unit testing via doctest (Dominik Charousset, Corelight)
3.1.0-dev.266 | 2019-11-14 17:29:00 -0800
* Add hint to run `make distclean` if configure fails (Simon Hardy-Francis, Corelight)
3.1.0-dev.264 | 2019-11-14 16:57:13 -0800
* Install test-all-policy.zeek script (Jon Siwek, Corelight)
The zeekygen script, which is already installed, refers to it, and
could also generally be useful for "test everything" type scenarios.
3.1.0-dev.263 | 2019-11-13 13:43:16 -0800
* Improve record_fields() BIF to recursively give full container type names (Henri DF)
3.1.0-dev.257 | 2019-11-11 13:40:11 -0800
* Update embedded CAF to 0.17.3 (Jon Siwek, Corelight)
3.1.0-dev.256 | 2019-11-07 13:42:11 -0800
* Allow record_fields() string arguments that name a record type (Jon Siwek, Corelight)
3.1.0-dev.253 | 2019-11-05 09:54:01 -0800 3.1.0-dev.253 | 2019-11-05 09:54:01 -0800
* Add and use new IntrusivePtr type (Dominik Charousset, Corelight) * Add and use new IntrusivePtr type (Dominik Charousset, Corelight)

View file

@ -19,6 +19,14 @@ include(cmake/FindClangTidy.cmake)
######################################################################## ########################################################################
## Project/Build Configuration ## Project/Build Configuration
if (ENABLE_ZEEK_UNIT_TESTS)
enable_testing()
add_definitions(-DDOCTEST_CONFIG_SUPER_FAST_ASSERTS)
else ()
add_definitions(-DDOCTEST_CONFIG_DISABLE)
endif ()
if ( ENABLE_CCACHE ) if ( ENABLE_CCACHE )
find_program(CCACHE_PROGRAM ccache) find_program(CCACHE_PROGRAM ccache)
@ -113,10 +121,30 @@ if ( NOT BINARY_PACKAGING_MODE )
_make_install_dir_symlink("${CMAKE_INSTALL_PREFIX}/lib/bro" "${CMAKE_INSTALL_PREFIX}/lib/zeek") _make_install_dir_symlink("${CMAKE_INSTALL_PREFIX}/lib/bro" "${CMAKE_INSTALL_PREFIX}/lib/zeek")
endif () endif ()
if ( SANITIZERS ) if ( ZEEK_SANITIZERS )
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=${SANITIZERS} -fno-omit-frame-pointer") # Check the thread library info early as setting compiler flags seems to
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=${SANITIZERS} -fno-omit-frame-pointer") # interfere with the detection and cause CMAKE_THREAD_LIBS_INIT to not
set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -fsanitize=${SANITIZERS} -fno-omit-frame-pointer") # include -lpthread when it should.
find_package(Threads)
set(_sanitizer_flags "-fsanitize=${ZEEK_SANITIZERS}")
set(_sanitizer_flags "${_sanitizer_flags} -fno-omit-frame-pointer")
set(_sanitizer_flags "${_sanitizer_flags} -fno-optimize-sibling-calls")
if ( NOT DEFINED ENV{NO_OPTIMIZATIONS} )
# Using -O1 is generally the suggestion to get more reasonable
# performance. The one downside is it that the compiler may optimize out
# code that otherwise generates an error/leak in a -O0 build, but that
# should be rare and users mostly will not be running unoptimized builds
# in production anyway.
set(_sanitizer_flags "${_sanitizer_flags} -O1")
endif ()
# Technically, the we also need to use the compiler to drive linking and
# give the sanitizer flags there, too. However, CMake, by default, uses
# the compiler for linking and so the automatically flags get used. See
# https://cmake.org/pipermail/cmake/2014-August/058268.html
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_sanitizer_flags}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_sanitizer_flags}")
endif() endif()
######################################################################## ########################################################################
@ -181,6 +209,50 @@ if (MISSING_PREREQS)
message(FATAL_ERROR "Configuration aborted due to missing prerequisites") message(FATAL_ERROR "Configuration aborted due to missing prerequisites")
endif () endif ()
if ( CAF_ROOT_DIR )
find_package(CAF COMPONENTS core io openssl REQUIRED)
endif ()
add_subdirectory(aux/paraglob)
set(zeekdeps ${zeekdeps} paraglob)
if ( BROKER_ROOT_DIR )
# Avoid calling find_package(CAF) twice.
if ( NOT CAF_ROOT_DIR )
find_package(CAF COMPONENTS core io openssl REQUIRED)
endif ()
find_package(Broker REQUIRED)
set(zeekdeps ${zeekdeps} ${BROKER_LIBRARY})
set(broker_includes ${BROKER_INCLUDE_DIR})
else ()
if ( ZEEK_SANITIZERS )
set(BROKER_SANITIZERS ${ZEEK_SANITIZERS})
endif ()
set(ENABLE_STATIC_ONLY_SAVED ${ENABLE_STATIC_ONLY})
if ( BUILD_STATIC_BROKER )
set(ENABLE_STATIC_ONLY true)
endif()
add_subdirectory(aux/broker)
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
if ( BUILD_STATIC_BROKER )
set(zeekdeps ${zeekdeps} broker_static)
else()
set(zeekdeps ${zeekdeps} broker)
endif()
set(broker_includes ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker/include ${CMAKE_CURRENT_BINARY_DIR}/aux/broker/include)
endif ()
# CAF_LIBRARIES and CAF_INCLUDE_DIRS are defined either by calling
# find_package(CAF) or by calling add_subdirectory(aux/broker). In either case,
# we have to care about CAF here because Broker headers can pull in CAF
# headers.
set(zeekdeps ${zeekdeps} ${CAF_LIBRARIES})
include_directories(BEFORE ${broker_includes} ${CAF_INCLUDE_DIRS})
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/paraglob/include)
include_directories(BEFORE include_directories(BEFORE
${PCAP_INCLUDE_DIR} ${PCAP_INCLUDE_DIR}
${BIND_INCLUDE_DIR} ${BIND_INCLUDE_DIR}
@ -254,7 +326,7 @@ if ( ${CMAKE_SYSTEM_NAME} MATCHES Linux AND EXISTS /etc/os-release )
endif () endif ()
endif () endif ()
set(zeekdeps set(zeekdeps ${zeekdeps}
${BinPAC_LIBRARY} ${BinPAC_LIBRARY}
${PCAP_LIBRARY} ${PCAP_LIBRARY}
${OPENSSL_LIBRARIES} ${OPENSSL_LIBRARIES}
@ -339,48 +411,6 @@ InstallSymlink("${CMAKE_INSTALL_PREFIX}/bin/zeek-wrapper" "${CMAKE_INSTALL_PREFI
######################################################################## ########################################################################
## Recurse on sub-directories ## Recurse on sub-directories
if ( CAF_ROOT_DIR )
find_package(CAF COMPONENTS core io openssl REQUIRED)
endif ()
if ( BROKER_ROOT_DIR )
# Avoid calling find_package(CAF) twice.
if ( NOT CAF_ROOT_DIR )
find_package(CAF COMPONENTS core io openssl REQUIRED)
endif ()
find_package(Broker REQUIRED)
set(zeekdeps ${zeekdeps} ${BROKER_LIBRARY})
include_directories(BEFORE ${BROKER_INCLUDE_DIR})
else ()
set(ENABLE_STATIC_ONLY_SAVED ${ENABLE_STATIC_ONLY})
if ( BUILD_STATIC_BROKER )
set(ENABLE_STATIC_ONLY true)
endif()
add_subdirectory(aux/broker)
set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED})
if ( BUILD_STATIC_BROKER )
set(zeekdeps ${zeekdeps} broker_static)
else()
set(zeekdeps ${zeekdeps} broker)
endif()
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker/include
${CMAKE_CURRENT_BINARY_DIR}/aux/broker/include)
endif ()
# CAF_LIBRARIES and CAF_INCLUDE_DIRS are defined either by calling
# find_package(CAF) or by calling add_subdirectory(aux/broker). In either case,
# we have to care about CAF here because Broker headers can pull in CAF
# headers.
set(zeekdeps ${zeekdeps} ${CAF_LIBRARIES})
include_directories(BEFORE ${CAF_INCLUDE_DIRS})
add_subdirectory(aux/paraglob)
set(zeekdeps ${zeekdeps} paraglob)
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/paraglob/include)
add_subdirectory(src) add_subdirectory(src)
add_subdirectory(scripts) add_subdirectory(scripts)
add_subdirectory(man) add_subdirectory(man)
@ -421,6 +451,7 @@ message(
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}" "\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
"\nZeek Script Path: ${ZEEK_SCRIPT_INSTALL_PATH}" "\nZeek Script Path: ${ZEEK_SCRIPT_INSTALL_PATH}"
"\nDebug mode: ${ENABLE_DEBUG}" "\nDebug mode: ${ENABLE_DEBUG}"
"\nUnit tests: ${ENABLE_ZEEK_UNIT_TESTS}"
"\n" "\n"
"\nCC: ${CMAKE_C_COMPILER}" "\nCC: ${CMAKE_C_COMPILER}"
"\nCFLAGS: ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${BuildType}}" "\nCFLAGS: ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${BuildType}}"

View file

@ -583,3 +583,29 @@ The original source file comes with this licensing statement:
This Source Code Form is subject to the terms of the Mozilla Public This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/. file, You can obtain one at http://mozilla.org/MPL/2.0/.
==============================================================================
%%% src/3rdparty/doctest.h
==============================================================================
Copyright (c) 2016-2019 Viktor Kirilov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

25
NEWS
View file

@ -22,6 +22,10 @@ New Functionality
- There is now a new ``tcp_options`` event that is raised for each TCP header - There is now a new ``tcp_options`` event that is raised for each TCP header
that contains options. that contains options.
- Added a new option, ``Log::print_to_log`` that can be set to automatically
redirect the output from "print" statements to a real log stream (e.g.
instead of writing to stdout).
Changed Functionality Changed Functionality
--------------------- ---------------------
@ -40,6 +44,24 @@ Changed Functionality
- The tcp_option event is now correctly raised. - The tcp_option event is now correctly raised.
'- The base scripts shipped with Zeek now use the new
``successful_connection_remove`` event instead of
``connection_state_remove`` where possible (when the logic doesn't
pertain to unestablished TCP connections). There's a performance
benefit to this switch, but it may potentially break custom scripts
that depended on accessing or modifying state via specific
``&priority`` ordering of ``connection_state_remove`` event
handlers. However, since most of Zeek's base scripts previously just
used that event with the intention of writing a finalized log as the
last thing executed for a given connection, and the new
``successful_connection_remove`` event handlers all run after
``connection_state_remove``, it's not likely this change to the base
scripts causes any incompatibility with user scripts.
There's also a new event called ``connection_successful`` and a new
``connection`` record field named "successful" to help indicate this
new property of connections.
Removed Functionality Removed Functionality
--------------------- ---------------------
@ -50,6 +72,9 @@ Deprecated Functionality
in favor of the real <cstdint> types they alias. E.g. use int8_t instead of in favor of the real <cstdint> types they alias. E.g. use int8_t instead of
int8. int8.
- The C++ API functions "safe_snprintf" and "safe_vsnprintf" are deprecated.
Use "snprintf" and "vsnprintf" instead.
Zeek 3.0.0 Zeek 3.0.0
========== ==========

View file

@ -1,6 +1,6 @@
<h1 align="center"> <h1 align="center">
[![Zeek Logo](https://www.zeek.org/images/bro-eyes.png)](https:://www.zeek.org) [![Zeek Logo](https://www.zeek.org/images/zeek-logo-without-text.png)](https:://www.zeek.org)
The Zeek Network Security Monitor The Zeek Network Security Monitor

View file

@ -1 +1 @@
3.1.0-dev.253 3.1.0-dev.359

@ -1 +1 @@
Subproject commit e5b1940850d486e3989f6a55615140a16bd1b9d2 Subproject commit e6bc87175dcdf297caae9506d326f8d0cc36b64b

@ -1 +1 @@
Subproject commit 9d7bdd82dfabe9ca2522c43ef1dd22e1044882ee Subproject commit 3b6c9ebc844ca98759e183f7b07c511fa87e8163

@ -1 +1 @@
Subproject commit 6c0d6c1d43e787c0110d5ad610281e5fb3f48725 Subproject commit a31a5260611528d4d1dd31fb921f92e35a7004b5

@ -1 +1 @@
Subproject commit 052bace948a84efa7188ac9a20a802e89d6ea5cc Subproject commit 37f8619c8ac89e7e9bcda6c167e8972a05d8e286

@ -1 +1 @@
Subproject commit 6c2b36193e47490e61f22ce6de233af7ed3101b1 Subproject commit ee7932525ff198f1270b3d941f449da5f939f29c

@ -1 +1 @@
Subproject commit 6ecf78b664653736eb74243f93baf6f2b6aa8747 Subproject commit 0790f420148806c1380fc7e0e0a4278c7970753c

@ -1 +1 @@
Subproject commit 1bb662776b84420142ca746e3a7b0b486c2ad15d Subproject commit 17f3b239f5551d8a9050a8d26a254e516db5c393

49
ci/README Normal file
View file

@ -0,0 +1,49 @@
=========================================
Continuous Integration Configuration Info
=========================================
The following pointers are aimed at maintainers to help describe a few points
about the Cirrus CI setup that may not be obvious/intuitive.
Private Test Suite
------------------
Access to the private test suite is available in CI tasks except in Pull
Requests from people that do not otherwise have write access to the zeek
repository on GitHub. To configure this access for the
``zeek-testing-private`` repository, it uses a "deploy key" which is a single
purpose SSH key with read-only permissions. Here's how it can be set up:
1. ``ssh-keygen -f cirrus-key``
2. Add contents of ``cirrus-key.pub`` as a new key on GitHub:
https://github.com/zeek/zeek-testing-private/settings/keys
3. Generate a new Cirrus CI secured variable in the repository settings at
https://cirrus-ci.com/github/zeek/zeek. The value of the secured variable
is the base64-encoded private key, and can be obtained from the output of
the command: ``base64 cirrus-key``.
4. Take the ``ENCRYPTED[...]``` string generated by Cirrus and use that as the
value of ``ZEEK_TESTING_PRIVATE_SSH_KEY`` in ``.cirrus.yml``
5. Delete the local key: ``rm cirrus-key*``
6. Commit/push the changes.
Email Notifications
-------------------
Cirrus CI doesn't feature any way to perform email notifications on failures,
so that is instead handled by a separate GitHub Action:
https://github.com/jsiwek/ci-email-action
The configuration of that GitHub Action is typical: it's just the
``.github/workflows/ci-notification.yml`` file, which set SMTP/mail info
via secrets stored in GitHub for the Zeek repository:
https://github.com/zeek/zeek/settings/secrets
The particular values used for those are currently from the Zeek project's AWS
Simple Email Service configuration.

7
ci/build.sh Executable file
View file

@ -0,0 +1,7 @@
#! /usr/bin/env bash
set -e
set -x
./configure ${ZEEK_CI_CONFIGURE_FLAGS}
make -j ${ZEEK_CI_CPUS}

54
ci/centos-7/Dockerfile Normal file
View file

@ -0,0 +1,54 @@
FROM centos:7
# The version of git in the standard repos is 1.8 and CI needs 2.3+
# for the use of GIT_SSH_COMMAND when cloning private repos.
RUN yum -y install \
https://repo.ius.io/ius-release-el7.rpm \
https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
&& yum -y install git2u \
&& yum clean all && rm -rf /var/cache/yum
RUN yum -y install \
epel-release \
&& yum clean all && rm -rf /var/cache/yum
RUN yum -y install \
centos-release-scl \
&& yum clean all && rm -rf /var/cache/yum
RUN yum -y install \
devtoolset-7 \
&& yum clean all && rm -rf /var/cache/yum
RUN yum -y install \
cmake3 \
make \
flex \
bison \
swig \
openssl \
openssl-devel \
libpcap-devel \
python3 \
python3-devel \
python3-pip\
zlib-devel \
sqlite \
findutils \
which \
&& yum clean all && rm -rf /var/cache/yum
# Many distros adhere to PEP 394's recommendation for `python` = `python2` so
# this is a simple workaround until we drop Python 2 support and explicitly
# use `python3` for all invocations (e.g. in shebangs).
RUN ln -sf /usr/bin/python3 /usr/local/bin/python
RUN ln -sf /usr/bin/pip3 /usr/local/bin/pip
RUN pip install junit2html
RUN echo 'unset BASH_ENV PROMPT_COMMAND ENV' > /usr/bin/zeek-ci-env && \
echo 'source /opt/rh/devtoolset-7/enable' >> /usr/bin/zeek-ci-env
ENV BASH_ENV="/usr/bin/zeek-ci-env" \
ENV="/usr/bin/zeek-ci-env" \
PROMPT_COMMAND=". /usr/bin/zeek-ci-env"

40
ci/debian-9/Dockerfile Normal file
View file

@ -0,0 +1,40 @@
FROM debian:9
RUN apt-get update && apt-get -y install \
git \
cmake \
make \
gcc \
g++ \
flex \
bison \
libpcap-dev \
libssl-dev \
python3 \
python3-dev \
python3-pip\
swig \
zlib1g-dev \
libkrb5-dev \
bsdmainutils \
sqlite3 \
curl \
wget \
xz-utils \
clang-7 \
libc++-7-dev \
libc++abi-7-dev \
&& rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/cc cc /usr/bin/clang-7 100
RUN update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++-7 100
# Many distros adhere to PEP 394's recommendation for `python` = `python2` so
# this is a simple workaround until we drop Python 2 support and explicitly
# use `python3` for all invocations (e.g. in shebangs).
RUN ln -sf /usr/bin/python3 /usr/local/bin/python
RUN ln -sf /usr/bin/pip3 /usr/local/bin/pip
RUN pip install junit2html
ENV CXXFLAGS=-stdlib=libc++

30
ci/fedora-30/Dockerfile Normal file
View file

@ -0,0 +1,30 @@
FROM fedora:30
RUN yum -y install \
git \
cmake \
make \
gcc \
gcc-c++ \
flex \
bison \
swig \
openssl \
openssl-devel \
libpcap-devel \
python3 \
python3-devel \
python3-pip\
zlib-devel \
sqlite \
findutils \
which \
&& yum clean all && rm -rf /var/cache/yum
# Many distros adhere to PEP 394's recommendation for `python` = `python2` so
# this is a simple workaround until we drop Python 2 support and explicitly
# use `python3` for all invocations (e.g. in shebangs).
RUN ln -sf /usr/bin/python3 /usr/local/bin/python
RUN ln -sf /usr/bin/pip3 /usr/local/bin/pip
RUN pip install junit2html

13
ci/freebsd/prepare.sh Executable file
View file

@ -0,0 +1,13 @@
#!/bin/sh
echo "Preparing FreeBSD environment"
sysctl hw.model hw.machine hw.ncpu
set -e
set -x
env ASSUME_ALWAYS_YES=YES pkg bootstrap
pkg install -y bash git cmake swig30 bison python3 base64
pyver=`python3 -c 'import sys; print(f"py{sys.version_info[0]}{sys.version_info[1]}")'`
pkg install -y $pyver-sqlite3 $pyver-pip
( cd && mkdir -p ./bin && ln -s /usr/local/bin/python3 ./bin/python )
pip install junit2html

66
ci/init-external-repos.sh Executable file
View file

@ -0,0 +1,66 @@
#! /usr/bin/env bash
function banner
{
local msg="${1}"
printf "+--------------------------------------------------------------+\n"
printf "| %-60s |\n" "$(date)"
printf "| %-60s |\n" "${msg}"
printf "+--------------------------------------------------------------+\n"
}
set -e
cd testing/external
[[ ! -d zeek-testing ]] && make init
cd zeek-testing
git checkout -q $(cat ../commit-hash.zeek-testing)
if [[ -n "${CIRRUS_CI}" ]]; then
if [[ -d ../zeek-testing-traces ]]; then
banner "Use existing/cached zeek-testing traces"
else
banner "Create cache directory for zeek-testing traces"
mkdir ../zeek-testing-traces
fi
rm -rf Traces
ln -s ../zeek-testing-traces Traces
fi
make update-traces
cd ..
# Note that this script is also called when populating the public cache, so
# the zeek-testing-private dir could have been created/populated already.
if [[ -n "${CIRRUS_CI}" ]] && [[ ! -d zeek-testing-private ]]; then
# If we're running this on Cirrus, the SSH key won't be available to PRs,
# so don't make any of this fail the task in that case. (But technically,
# the key is also available in PRs for people with write access to the
# repo, so we can still try for those cases).
if [[ -n "${CIRRUS_PR}" ]]; then
set +e
else
set -e
fi
banner "Trying to clone zeek-testing-private git repo"
echo "${ZEEK_TESTING_PRIVATE_SSH_KEY}" > cirrus_key.b64
base64 --decode cirrus_key.b64 > cirrus_key
rm cirrus_key.b64
chmod 600 cirrus_key
git --version
# Note: GIT_SSH_COMMAND requires git 2.3.0+
export GIT_SSH_COMMAND="ssh -i cirrus_key -F /dev/null -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
git clone git@github.com:zeek/zeek-testing-private
rm cirrus_key
fi
set -e
if [[ -d zeek-testing-private ]]; then
# Note that we never cache private pcaps.
banner "Update zeek-testing-private traces"
cd zeek-testing-private
make update-traces
fi

8
ci/macos/prepare.sh Executable file
View file

@ -0,0 +1,8 @@
#!/bin/sh
echo "Preparing macOS environment"
sysctl hw.model hw.machine hw.ncpu hw.physicalcpu hw.logicalcpu
set -e
set -x
brew install cmake swig openssl bison

110
ci/test.sh Executable file
View file

@ -0,0 +1,110 @@
#! /usr/bin/env bash
# It's possible to use this script locally from the zeek repo's root dir.
# The parallelism level when running tests locally is $1 if provided, else
# the value of `nproc` if available, otherwise just a single core.
result=0
BTEST=$(pwd)/aux/btest/btest
if [[ -z "${CIRRUS_CI}" ]]; then
# Set default values to use in place of env. variables set by Cirrus CI.
ZEEK_CI_CPUS=1
[[ $(which nproc) ]] && ZEEK_CI_CPUS=$(nproc)
[[ -n "${1}" ]] && ZEEK_CI_CPUS=${1}
ZEEK_CI_BTEST_JOBS=${ZEEK_CI_CPUS}
fi
function pushd
{
command pushd "$@" > /dev/null || exit 1
}
function popd
{
command popd "$@" > /dev/null || exit 1
}
function banner
{
local msg="${1}"
printf "+--------------------------------------------------------------+\n"
printf "| %-60s |\n" "$(date)"
printf "| %-60s |\n" "${msg}"
printf "+--------------------------------------------------------------+\n"
}
function run_unit_tests
{
banner "Running unit tests"
pushd build
( . ./zeek-path-dev.sh && zeek --test ) || result=1
popd
return 0
}
function prep_artifacts
{
banner "Prepare artifacts"
[[ -d .tmp ]] && rm -rf .tmp/script-coverage && tar -czf tmp.tar.gz .tmp
junit2html btest-results.xml btest-results.html
}
function run_btests
{
banner "Running baseline tests: zeek"
pushd testing/btest
${BTEST} -d -b -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} || result=1
make coverage
prep_artifacts
popd
return 0
}
function run_external_btests
{
local zeek_testing_pid=""
local zeek_testing_pid_private=""
pushd testing/external/zeek-testing
${BTEST} -d -b -x btest-results.xml -j ${ZEEK_CI_BTEST_JOBS} >btest.out 2>&1 &
zeek_testing_pid=$!
popd
if [[ -d testing/external/zeek-testing-private ]]; then
pushd testing/external/zeek-testing-private
# Note that we don't use btest's "-d" flag or generate/upload any
# artifacts to prevent leaking information about the private pcaps.
${BTEST} -b -j ${ZEEK_CI_BTEST_JOBS} >btest.out 2>&1 &
zeek_testing_private_pid=$!
popd
fi
banner "Running baseline tests: external/zeek-testing"
wait ${zeek_testing_pid} || result=1
pushd testing/external/zeek-testing
cat btest.out
make coverage
prep_artifacts
popd
if [[ -n "${zeek_testing_private_pid}" ]]; then
banner "Running baseline tests: external/zeek-testing-private"
wait ${zeek_testing_private_pid} || result=1
pushd testing/external/zeek-testing-private
make coverage
cat btest.out
popd
else
banner "Skipping private tests (not available for PRs)"
fi
}
banner "Start tests: ${ZEEK_CI_CPUS} cpus, ${ZEEK_CI_BTEST_JOBS} btest jobs"
run_unit_tests
run_btests
run_external_btests
exit ${result}

42
testing/scripts/travis-job → ci/travis-job Normal file → Executable file
View file

@ -100,7 +100,7 @@ install_in_docker() {
distro_cmds="apt-get update; apt-get -y install wget xz-utils gdb cmake make gcc g++ flex bison python3 libpcap-dev libssl-dev zlib1g-dev libkrb5-dev git sqlite3 curl bsdmainutils; ln -s /usr/bin/python3 /usr/local/bin/python" distro_cmds="apt-get update; apt-get -y install wget xz-utils gdb cmake make gcc g++ flex bison python3 libpcap-dev libssl-dev zlib1g-dev libkrb5-dev git sqlite3 curl bsdmainutils; ln -s /usr/bin/python3 /usr/local/bin/python"
;; ;;
${LEAK_TEST_DISTRO}) ${LEAK_TEST_DISTRO})
distro_cmds="apt-get update; apt-get -y install gdb cmake make gcc g++ flex bison python3 libpcap-dev libssl-dev zlib1g-dev libkrb5-dev git sqlite3 curl bsdmainutils google-perftools libgoogle-perftools4 libgoogle-perftools-dev; ln -s /usr/bin/python3 /usr/local/bin/python" distro_cmds="apt-get update; apt-get -y install gdb cmake make gcc g++ flex bison python3 libpcap-dev libssl-dev zlib1g-dev libkrb5-dev git sqlite3 curl bsdmainutils; ln -s /usr/bin/python3 /usr/local/bin/python"
local_distro="ubuntu_18.04" local_distro="ubuntu_18.04"
;; ;;
*) *)
@ -125,7 +125,7 @@ build_in_docker() {
# Pass the distro as a different environment variable name to docker since # Pass the distro as a different environment variable name to docker since
# the script will set $distro to "travis" as part of the invocation. # the script will set $distro to "travis" as part of the invocation.
docker exec -e COV_TOKEN -e BUILD_DISTRO=${distro} zeektest sh testing/scripts/travis-job build ${recursed_distro} docker exec -e COV_TOKEN -e BUILD_DISTRO=${distro} zeektest sh ci/travis-job build ${recursed_distro}
} }
@ -139,7 +139,7 @@ run_in_docker() {
# Pass the distro as a different environment variable name to docker since # Pass the distro as a different environment variable name to docker since
# the script will set $distro to "travis" as part of the invocation. # the script will set $distro to "travis" as part of the invocation.
docker exec -t -e TRAVIS -e TRAVIS_PULL_REQUEST -e TESTING_PRIVATE_DEPLOYKEY -e COV_TOKEN -e BUILD_DISTRO=${distro} zeektest sh testing/scripts/travis-job run ${recursed_distro} docker exec -t -e TRAVIS -e TRAVIS_PULL_REQUEST -e TESTING_PRIVATE_DEPLOYKEY -e COV_TOKEN -e BUILD_DISTRO=${distro} zeektest sh ci/travis-job run ${recursed_distro}
} }
update_env() { update_env() {
@ -165,10 +165,10 @@ build() {
# not needed by the Zeek tests. If the distro is set for leak tests, enable # not needed by the Zeek tests. If the distro is set for leak tests, enable
# those options as well. # those options as well.
if [ "${BUILD_DISTRO}" != "${LEAK_TEST_DISTRO}" ]; then if [ "${BUILD_DISTRO}" != "${LEAK_TEST_DISTRO}" ]; then
./configure --build-type=Release --disable-broker-tests --disable-python --disable-zeekctl && make -j 2 ./configure --build-type=Release --disable-broker-tests --enable-cpp-tests --disable-python --disable-zeekctl && make -j 2
else else
echo "Configuring zeek to build for leak testing" echo "Configuring zeek to build for leak testing"
./configure --build-type=Debug --disable-broker-tests --disable-python --disable-zeekctl --enable-perftools --enable-perftools-debug && make -j 2 ./configure --build-type=Debug --disable-broker-tests --enable-cpp-tests --disable-python --disable-zeekctl --sanitizers=address && make -j 2
fi fi
} }
@ -207,20 +207,34 @@ run() {
ulimit -c unlimited ulimit -c unlimited
ulimit -a ulimit -a
ret=0
echo echo
echo "Running unit tests ##################################################" echo "Running unit tests ##################################################"
echo echo
set +e
( cd build && . ./zeek-path-dev.sh && zeek --test )
if [ $? -ne 0 ]; then
ret=1
fi
set -e
echo
echo "Running baseline tests ##############################################"
echo
cd testing/btest cd testing/btest
set +e set +e
# Must specify a value for "-j" option, otherwise Travis uses a huge value. # Must specify a value for "-j" option, otherwise Travis uses a huge value.
if [ "${BUILD_DISTRO}" != "${LEAK_TEST_DISTRO}" ]; then
../../aux/btest/btest -j 4 -d ../../aux/btest/btest -j 4 -d
else
../../aux/btest/btest -j 4 -d -g leaks if [ $? -ne 0 ]; then
ret=1
fi fi
ret=$?
set -e set -e
echo echo
@ -258,11 +272,7 @@ run() {
if [ -d zeek-testing ]; then if [ -d zeek-testing ]; then
cd zeek-testing cd zeek-testing
if [ "${BUILD_DISTRO}" != "${LEAK_TEST_DISTRO}" ]; then
make make
else
make leaks
fi
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
showdiag showdiag
@ -274,11 +284,7 @@ run() {
if [ -d zeek-testing-private ]; then if [ -d zeek-testing-private ]; then
cd zeek-testing-private cd zeek-testing-private
if [ "${BUILD_DISTRO}" != "${LEAK_TEST_DISTRO}" ]; then
make make
else
make leaks
fi
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
showdiag showdiag
@ -332,7 +338,7 @@ remove_container() {
} }
if [ ! -f testing/scripts/travis-job ]; then if [ ! -f ci/travis-job ]; then
echo "Error: must change directory to root of zeek source tree before running this script." echo "Error: must change directory to root of zeek source tree before running this script."
exit 1 exit 1
fi fi

View file

@ -0,0 +1,41 @@
FROM ubuntu:16.04
RUN apt-get update && apt-get -y install \
git \
cmake \
make \
gcc \
g++ \
flex \
bison \
libpcap-dev \
libssl-dev \
python3 \
python3-dev \
python3-pip\
swig \
zlib1g-dev \
libkrb5-dev \
bsdmainutils \
sqlite3 \
curl \
wget \
xz-utils \
&& rm -rf /var/lib/apt/lists/*
RUN wget -q https://releases.llvm.org/9.0.0/clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz
RUN mkdir /clang-9
RUN tar --strip-components=1 -C /clang-9 -xvf clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz
RUN update-alternatives --install /usr/bin/cc cc /clang-9/bin/clang 100
RUN update-alternatives --install /usr/bin/c++ c++ /clang-9/bin/clang++ 100
# Many distros adhere to PEP 394's recommendation for `python` = `python2` so
# this is a simple workaround until we drop Python 2 support and explicitly
# use `python3` for all invocations (e.g. in shebangs).
RUN ln -sf /usr/bin/python3 /usr/local/bin/python
RUN ln -sf /usr/bin/pip3 /usr/local/bin/pip
RUN pip install junit2html
ENV CXXFLAGS=-stdlib=libc++
ENV LD_LIBRARY_PATH=/clang-9/lib

View file

@ -0,0 +1,31 @@
FROM ubuntu:18.04
RUN apt-get update && apt-get -y install \
git \
cmake \
make \
gcc \
g++ \
flex \
bison \
libpcap-dev \
libssl-dev \
python3 \
python3-dev \
python3-pip\
swig \
zlib1g-dev \
libkrb5-dev \
bsdmainutils \
sqlite3 \
curl \
wget \
&& rm -rf /var/lib/apt/lists/*
# Many distros adhere to PEP 394's recommendation for `python` = `python2` so
# this is a simple workaround until we drop Python 2 support and explicitly
# use `python3` for all invocations (e.g. in shebangs).
RUN ln -sf /usr/bin/python3 /usr/local/bin/python
RUN ln -sf /usr/bin/pip3 /usr/local/bin/pip
RUN pip install junit2html

15
configure vendored
View file

@ -2,6 +2,11 @@
# Convenience wrapper for easily viewing/setting options that # Convenience wrapper for easily viewing/setting options that
# the project's CMake scripts will recognize # the project's CMake scripts will recognize
set -e set -e
trap '[ $? -eq 0 ] && exit 0 ||
echo "Also, before re-running configure, consider cleaning the cache \
(removing the build directory) via \`make distclean\`"' EXIT
command="$0 $*" command="$0 $*"
usage="\ usage="\
@ -22,6 +27,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
ccache installation and CMake 3.10+) ccache installation and CMake 3.10+)
--toolchain=PATH path to a CMAKE_TOOLCHAIN_FILE --toolchain=PATH path to a CMAKE_TOOLCHAIN_FILE
(useful for cross-compiling) (useful for cross-compiling)
--sanitizers=LIST comma-separated list of sanitizer names to enable
Installation Directories: Installation Directories:
--prefix=PREFIX installation directory [/usr/local/zeek] --prefix=PREFIX installation directory [/usr/local/zeek]
@ -47,11 +53,11 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--enable-jemalloc link against jemalloc --enable-jemalloc link against jemalloc
--enable-static-broker build Broker statically (ignored if --with-broker is specified) --enable-static-broker build Broker statically (ignored if --with-broker is specified)
--enable-static-binpac build binpac statically (ignored if --with-binpac is specified) --enable-static-binpac build binpac statically (ignored if --with-binpac is specified)
--enable-cpp-tests build Zeek's C++ unit tests
--disable-zeekctl don't install ZeekControl --disable-zeekctl don't install ZeekControl
--disable-auxtools don't build or install auxiliary tools --disable-auxtools don't build or install auxiliary tools
--disable-python don't try to build python bindings for Broker --disable-python don't try to build python bindings for Broker
--disable-broker-tests don't try to build Broker unit tests --disable-broker-tests don't try to build Broker unit tests
--sanitizers=SANITIZERS comma-separated list of Clang sanitizers to enable
Required Packages in Non-Standard Locations: Required Packages in Non-Standard Locations:
--with-openssl=PATH path to OpenSSL install root --with-openssl=PATH path to OpenSSL install root
@ -148,7 +154,7 @@ append_cache_entry INSTALL_AUX_TOOLS BOOL true
append_cache_entry INSTALL_ZEEKCTL BOOL true append_cache_entry INSTALL_ZEEKCTL BOOL true
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
append_cache_entry SANITIZERS STRING "" append_cache_entry ZEEK_SANITIZERS STRING ""
# parse arguments # parse arguments
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
@ -234,7 +240,7 @@ while [ $# -ne 0 ]; do
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true
;; ;;
--sanitizers=*) --sanitizers=*)
append_cache_entry SANITIZERS STRING $optarg append_cache_entry ZEEK_SANITIZERS STRING $optarg
;; ;;
--enable-jemalloc) --enable-jemalloc)
append_cache_entry ENABLE_JEMALLOC BOOL true append_cache_entry ENABLE_JEMALLOC BOOL true
@ -245,6 +251,9 @@ while [ $# -ne 0 ]; do
--enable-static-binpac) --enable-static-binpac)
append_cache_entry BUILD_STATIC_BINPAC BOOL true append_cache_entry BUILD_STATIC_BINPAC BOOL true
;; ;;
--enable-cpp-tests)
append_cache_entry ENABLE_ZEEK_UNIT_TESTS BOOL true
;;
--disable-zeekctl) --disable-zeekctl)
append_cache_entry INSTALL_ZEEKCTL BOOL false append_cache_entry INSTALL_ZEEKCTL BOOL false
;; ;;

2
doc

@ -1 +1 @@
Subproject commit b481bc908ed21a33fab215037e54bba0ab30822e Subproject commit 60ff40641a4024d1d172572317ddb30435046d51

View file

@ -2,7 +2,6 @@ include(InstallPackageConfigFile)
install(DIRECTORY ./ DESTINATION ${ZEEK_SCRIPT_INSTALL_PATH} FILES_MATCHING install(DIRECTORY ./ DESTINATION ${ZEEK_SCRIPT_INSTALL_PATH} FILES_MATCHING
PATTERN "site/local*" EXCLUDE PATTERN "site/local*" EXCLUDE
PATTERN "test-all-policy.zeek" EXCLUDE
PATTERN "*.zeek" PATTERN "*.zeek"
PATTERN "*.sig" PATTERN "*.sig"
PATTERN "*.fp" PATTERN "*.fp"

View file

@ -12,7 +12,9 @@ export {
## file. ## file.
type Log::ID: enum { type Log::ID: enum {
## Dummy place-holder. ## Dummy place-holder.
UNKNOWN UNKNOWN,
## Print statements that have been redirected to a log stream.
PRINTLOG
}; };
## If true, local logging is by default enabled for all filters. ## If true, local logging is by default enabled for all filters.
@ -75,6 +77,36 @@ export {
## Returns: The path to be used for the filter. ## Returns: The path to be used for the filter.
global default_path_func: function(id: ID, path: string, rec: any) : string &redef; global default_path_func: function(id: ID, path: string, rec: any) : string &redef;
## If :zeek:see:`Log::print_to_log` is set to redirect, ``print`` statements will
## automatically populate log entries with the fields contained in this record.
type PrintLogInfo: record {
## Current timestamp.
ts: time &log;
## Set of strings passed to the print statement.
vals: string_vec &log;
};
## Configurations for :zeek:see:`Log::print_to_log`
type PrintLogType: enum {
## No redirection of ``print`` statements.
REDIRECT_NONE,
## Redirection of those ``print`` statements that were being logged to stdout,
## leaving behind those set to go to other specific files.
REDIRECT_STDOUT,
## Redirection of all ``print`` statements.
REDIRECT_ALL
};
## Event for accessing logged print records.
global log_print: event(rec: PrintLogInfo);
## Set configuration for ``print`` statements redirected to logs.
const print_to_log: PrintLogType = REDIRECT_NONE &redef;
## If :zeek:see:`Log::print_to_log` is enabled to write to a print log,
## this is the path to which the print Log Stream writes to
const print_log_path = "print" &redef;
# Log rotation support. # Log rotation support.
## Information passed into rotation callback functions. ## Information passed into rotation callback functions.
@ -643,3 +675,9 @@ function remove_default_filter(id: ID) : bool
{ {
return remove_filter(id, "default"); return remove_filter(id, "default");
} }
event zeek_init() &priority=5
{
if ( print_to_log != REDIRECT_NONE )
Log::create_stream(PRINTLOG, [$columns=PrintLogInfo, $ev=log_print, $path=print_log_path]);
}

View file

@ -144,7 +144,7 @@ event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5
register_all(e); register_all(e);
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c$id in active ) if ( c$id in active )
close(active[c$id], CLOSE); close(active[c$id], CLOSE);

View file

@ -120,6 +120,22 @@ type mime_match: record {
## :zeek:see:`file_magic` ## :zeek:see:`file_magic`
type mime_matches: vector of mime_match; type mime_matches: vector of mime_match;
## Properties of an I/O packet source being read by Zeek.
type PacketSource: record {
## Whether the packet source is a live interface or offline pcap file.
live: bool;
## The interface name for a live interface or filesystem path of
## an offline pcap file.
path: string;
## The data link-layer type of the packet source.
link_type: int;
## The netmask assoicated with the source or ``NETMASK_UNKNOWN``.
netmask: count;
};
## A list of packet sources being read by Zeek.
type PacketSourceList: vector of PacketSource;
## A connection's transport-layer protocol. Note that Zeek uses the term ## A connection's transport-layer protocol. Note that Zeek uses the term
## "connection" broadly, using flow semantics for ICMP and UDP. ## "connection" broadly, using flow semantics for ICMP and UDP.
type transport_proto: enum { type transport_proto: enum {
@ -419,6 +435,11 @@ type connection: record {
## The inner VLAN, if applicable for this connection. ## The inner VLAN, if applicable for this connection.
inner_vlan: int &optional; inner_vlan: int &optional;
## Flag that will be true if :zeek:see:`connection_successful` has
## already been generated for the connection. See the documentation of
## that event for a definition of what makes a connection "succesful".
successful: bool;
}; };
## Default amount of time a file can be inactive before the file analysis ## Default amount of time a file can be inactive before the file analysis

View file

@ -300,6 +300,11 @@ event connection_state_remove(c: connection) &priority=5
event connection_state_remove(c: connection) &priority=-5 event connection_state_remove(c: connection) &priority=-5
{ {
if ( ! c$successful )
Log::write(Conn::LOG, c$conn); Log::write(Conn::LOG, c$conn);
} }
event successful_connection_remove(c: connection) &priority=-5
{
Log::write(Conn::LOG, c$conn);
}

View file

@ -209,7 +209,7 @@ event dce_rpc_response(c: connection, fid: count, ctx_id: count, opnum: count, s
} }
} }
event connection_state_remove(c: connection) event successful_connection_remove(c: connection)
{ {
if ( ! c?$dce_rpc ) if ( ! c?$dce_rpc )
return; return;

View file

@ -63,7 +63,7 @@ event dnp3_application_response_header(c: connection, is_orig: bool, application
delete c$dnp3; delete c$dnp3;
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( ! c?$dnp3 ) if ( ! c?$dnp3 )
return; return;

View file

@ -571,7 +571,7 @@ event dns_rejected(c: connection, msg: dns_msg, query: string, qtype: count, qcl
c$dns$rejected = T; c$dns$rejected = T;
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( ! c?$dns_state ) if ( ! c?$dns_state )
return; return;

View file

@ -322,7 +322,7 @@ event connection_reused(c: connection) &priority=5
c$ftp_data_reuse = T; c$ftp_data_reuse = T;
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c$ftp_data_reuse ) return; if ( c$ftp_data_reuse ) return;
if ( [c$id$resp_h, c$id$resp_p] in ftp_data_expected ) if ( [c$id$resp_h, c$id$resp_p] in ftp_data_expected )
@ -334,8 +334,8 @@ event connection_state_remove(c: connection) &priority=-5
} }
} }
# Use state remove event to cover connections terminated by RST. # Use remove event to cover connections terminated by RST.
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( ! c?$ftp ) return; if ( ! c?$ftp ) return;

View file

@ -321,7 +321,7 @@ event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &
} }
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
# Flush all pending but incomplete request/response pairs. # Flush all pending but incomplete request/response pairs.
if ( c?$http_state ) if ( c?$http_state )

View file

@ -124,7 +124,7 @@ event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10
add c$service["irc-dcc-data"]; add c$service["irc-dcc-data"];
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( [c$id$resp_h, c$id$resp_p] in dcc_expected_transfers ) if ( [c$id$resp_h, c$id$resp_p] in dcc_expected_transfers )
{ {

View file

@ -118,7 +118,7 @@ event krb_tgs_response(c: connection, msg: KDC_Response)
fill_in_subjects(c); fill_in_subjects(c);
} }
event connection_state_remove(c: connection) event successful_connection_remove(c: connection)
{ {
fill_in_subjects(c); fill_in_subjects(c);
} }

View file

@ -228,7 +228,7 @@ event krb_tgs_response(c: connection, msg: KDC_Response) &priority=-5
do_log(c); do_log(c);
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
do_log(c); do_log(c);
} }

View file

@ -122,7 +122,7 @@ event mysql_ok(c: connection, affected_rows: count) &priority=-5
} }
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$mysql ) if ( c?$mysql )
{ {

View file

@ -106,7 +106,7 @@ event gssapi_neg_result(c: connection, state: count) &priority=-3
} }
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$ntlm && ! c$ntlm$done ) if ( c?$ntlm && ! c$ntlm$done )
{ {

View file

@ -138,7 +138,7 @@ event radius_message(c: connection, result: RADIUS::Message) &priority=-5
} }
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$radius && ! c$radius$logged ) if ( c?$radius && ! c$radius$logged )
{ {

View file

@ -272,7 +272,7 @@ event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason
write_log(c); write_log(c);
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
# If the connection is removed, then log the record immediately. # If the connection is removed, then log the record immediately.
if ( c?$rdp ) if ( c?$rdp )

View file

@ -151,7 +151,7 @@ event rfb_share_flag(c: connection, flag: bool) &priority=5
c$rfb$share_flag = flag; c$rfb$share_flag = flag;
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$rfb ) if ( c?$rfb )
{ {

View file

@ -289,7 +289,7 @@ event sip_end_entity(c: connection, is_request: bool) &priority = -5
} }
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$sip_state ) if ( c?$sip_state )
{ {

View file

@ -298,7 +298,7 @@ event mime_one_header(c: connection, h: mime_header_rec) &priority=3
c$smtp$path += ip; c$smtp$path += ip;
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$smtp ) if ( c?$smtp )
smtp_message(c); smtp_message(c);

View file

@ -93,7 +93,7 @@ function init_state(c: connection, h: SNMP::Header): Info
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$snmp ) if ( c?$snmp )
Log::write(LOG, c$snmp); Log::write(LOG, c$snmp);

View file

@ -111,7 +111,7 @@ event socks_login_userpass_reply(c: connection, code: count) &priority=5
c$socks$status = v5_status[code]; c$socks$status = v5_status[code];
} }
event connection_state_remove(c: connection) event successful_connection_remove(c: connection)
{ {
# This will handle the case where the analyzer failed in some way and was # This will handle the case where the analyzer failed in some way and was
# removed. We probably don't want to log these connections. # removed. We probably don't want to log these connections.

View file

@ -247,7 +247,7 @@ event ssh_capabilities(c: connection, cookie: string, capabilities: Capabilities
server_caps$server_host_key_algorithms); server_caps$server_host_key_algorithms);
} }
event connection_state_remove(c: connection) event successful_connection_remove(c: connection)
{ {
if ( c?$ssh && !c$ssh$logged ) if ( c?$ssh && !c$ssh$logged )
{ {

View file

@ -329,13 +329,13 @@ event ssl_established(c: connection) &priority=-5
finish(c, T); finish(c, T);
} }
event connection_state_remove(c: connection) &priority=20 event successful_connection_remove(c: connection) &priority=20
{ {
if ( c?$ssl && ! c$ssl$logged ) if ( c?$ssl && ! c$ssl$logged )
hook ssl_finishing(c); hook ssl_finishing(c);
} }
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c?$ssl ) if ( c?$ssl )
# called in case a SSL connection that has not been established terminates # called in case a SSL connection that has not been established terminates

View file

@ -182,7 +182,7 @@ event ProtocolDetector::check_connection(c: connection)
} }
} }
event connection_state_remove(c: connection) event successful_connection_remove(c: connection)
{ {
if ( c$id !in conns ) if ( c$id !in conns )
{ {

View file

@ -1,5 +1,5 @@
##! Detect file downloads that have hash values matching files in Team ##! Detect file downloads that have hash values matching files in Team
##! Cymru's Malware Hash Registry (http://www.team-cymru.org/Services/MHR/). ##! Cymru's Malware Hash Registry (https://www.team-cymru.com/mhr.html).
@load base/frameworks/files @load base/frameworks/files
@load base/frameworks/notice @load base/frameworks/notice

View file

@ -250,7 +250,7 @@ event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &pr
} }
# Handle the connection ending in case no protocol was ever detected. # Handle the connection ending in case no protocol was ever detected.
event connection_state_remove(c: connection) &priority=-5 event successful_connection_remove(c: connection) &priority=-5
{ {
if ( c$known_services_done ) if ( c$known_services_done )
return; return;

View file

@ -25,7 +25,7 @@ event protocol_late_match(c: connection, atype: Analyzer::Tag)
add c$speculative_service[analyzer]; add c$speculative_service[analyzer];
} }
event connection_state_remove(c: connection) event successful_connection_remove(c: connection)
{ {
local sp_service = ""; local sp_service = "";
for ( s in c$speculative_service ) for ( s in c$speculative_service )

@ -1 +1 @@
Subproject commit 0e1f951b0bcafea63e503957ae005220c24e4b20 Subproject commit 2b3206b7add3472ea0736f2841473e11d506a85e

View file

@ -1,5 +1,7 @@
#include <cstdio> #include <cstdio>
#include <cstring> #include <cstring>
#include <sstream>
#include <fstream>
#include <utility> #include <utility>
#include <algorithm> #include <algorithm>
#include <sys/stat.h> #include <sys/stat.h>
@ -13,6 +15,17 @@ Brofiler::Brofiler()
Brofiler::~Brofiler() Brofiler::~Brofiler()
{ {
for ( auto& s : stmts )
Unref(s);
}
void Brofiler::AddStmt(Stmt* s)
{
if ( ignoring != 0 )
return;
::Ref(s);
stmts.push_back(s);
} }
bool Brofiler::ReadStats() bool Brofiler::ReadStats()
@ -22,27 +35,46 @@ bool Brofiler::ReadStats()
if ( ! bf ) if ( ! bf )
return false; return false;
FILE* f = fopen(bf, "r"); std::ifstream ifs;
if ( ! f ) ifs.open(bf, std::ifstream::in);
if ( ! ifs )
return false; return false;
char line[16384]; std::stringstream ss;
ss << ifs.rdbuf();
std::string file_contents = ss.str();
ss.clear();
std::vector<std::string> lines;
tokenize_string(file_contents, "\n", &lines);
string delimiter; string delimiter;
delimiter = delim; delimiter = delim;
while( fgets(line, sizeof(line), f) ) for ( const auto& line : lines )
{ {
line[strlen(line) - 1] = 0; //remove newline if ( line.empty() )
string cnt(strtok(line, delimiter.c_str())); continue;
string location(strtok(0, delimiter.c_str()));
string desc(strtok(0, delimiter.c_str())); std::vector<std::string> line_components;
pair<string, string> location_desc(location, desc); tokenize_string(line, delimiter, &line_components);
uint64_t count;
atoi_n(cnt.size(), cnt.c_str(), 0, 10, count); if ( line_components.size() != 3 )
usage_map[location_desc] = count; {
fprintf(stderr, "invalid ZEEK_PROFILER_FILE line: %s\n", line.data());
continue;
}
std::string& cnt = line_components[0];
std::string& location = line_components[1];
std::string& desc = line_components[2];
pair<string, string> location_desc(std::move(location), std::move(desc));
uint64_t count;
atoi_n(cnt.size(), cnt.c_str(), 0, 10, count);
usage_map.emplace(std::move(location_desc), count);
} }
fclose(f);
return true; return true;
} }
@ -88,7 +120,7 @@ bool Brofiler::WriteStats()
return false; return false;
} }
for ( list<const Stmt*>::const_iterator it = stmts.begin(); for ( list<Stmt*>::const_iterator it = stmts.begin();
it != stmts.end(); ++it ) it != stmts.end(); ++it )
{ {
ODesc location_info; ODesc location_info;
@ -96,7 +128,8 @@ bool Brofiler::WriteStats()
ODesc desc_info; ODesc desc_info;
(*it)->Describe(&desc_info); (*it)->Describe(&desc_info);
string desc(desc_info.Description()); string desc(desc_info.Description());
for_each(desc.begin(), desc.end(), canonicalize_desc()); canonicalize_desc cd{delim};
for_each(desc.begin(), desc.end(), cd);
pair<string, string> location_desc(location_info.Description(), desc); pair<string, string> location_desc(location_info.Description(), desc);
if ( usage_map.find(location_desc) != usage_map.end() ) if ( usage_map.find(location_desc) != usage_map.end() )
usage_map[location_desc] += (*it)->GetAccessCount(); usage_map[location_desc] += (*it)->GetAccessCount();

View file

@ -38,13 +38,13 @@ public:
void IncIgnoreDepth() { ignoring++; } void IncIgnoreDepth() { ignoring++; }
void DecIgnoreDepth() { ignoring--; } void DecIgnoreDepth() { ignoring--; }
void AddStmt(const Stmt* s) { if ( ignoring == 0 ) stmts.push_back(s); } void AddStmt(Stmt* s);
private: private:
/** /**
* The current, global Brofiler instance creates this list at parse-time. * The current, global Brofiler instance creates this list at parse-time.
*/ */
list<const Stmt*> stmts; list<Stmt*> stmts;
/** /**
* Indicates whether new statments will not be considered as part of * Indicates whether new statments will not be considered as part of
@ -70,9 +70,12 @@ private:
* that don't agree with the output format of Brofiler. * that don't agree with the output format of Brofiler.
*/ */
struct canonicalize_desc { struct canonicalize_desc {
char delim;
void operator() (char& c) void operator() (char& c)
{ {
if ( c == '\n' ) c = ' '; if ( c == '\n' ) c = ' ';
if ( c == delim ) c = ' ';
} }
}; };
}; };

View file

@ -435,3 +435,25 @@ add_clang_tidy_files(${MAIN_SRCS})
# (*.pac.cc) files, and most of the generated code for BIFs (not including # (*.pac.cc) files, and most of the generated code for BIFs (not including
# *.bif.register.cc) # *.bif.register.cc)
create_clang_tidy_target() create_clang_tidy_target()
########################################################################
## CTest setup.
# Scan all .cc files for TEST_CASE macros and generate CTest targets.
if (ENABLE_ZEEK_UNIT_TESTS)
file(GLOB_RECURSE all_cc_files "*.cc")
set(test_cases "")
foreach (cc_file ${all_cc_files})
file (STRINGS ${cc_file} test_case_lines REGEX "TEST_CASE")
foreach (line ${test_case_lines})
string(REGEX REPLACE "TEST_CASE\\(\"(.+)\"\\)" "\\1" test_case "${line}")
list(APPEND test_cases "${test_case}")
endforeach ()
endforeach ()
list(LENGTH test_cases num_test_cases)
MESSAGE(STATUS "-- Found ${num_test_cases} test cases for CTest")
foreach (test_case ${test_cases})
add_test(NAME "\"${test_case}\""
COMMAND zeek --test "--test-case=${test_case}")
endforeach ()
endif ()

View file

@ -146,6 +146,30 @@ char* CompositeHash::SingleValHash(int type_check, char* kp0,
break; break;
} }
case TYPE_PATTERN:
{
const char* texts[2] = {
v->AsPattern()->PatternText(),
v->AsPattern()->AnywherePatternText()
};
size_t* kp;
for ( int i = 0; i < 2; i++ )
{
kp = AlignAndPadType<size_t>(kp0+i);
*kp = strlen(texts[i]) + 1;
}
kp1 = reinterpret_cast<char*>(kp+1);
for ( int i = 0; i < 2; i++ )
{
memcpy(kp1, texts[i], strlen(texts[i]) + 1);
kp1 += strlen(texts[i]) + 1;
}
break;
}
case TYPE_RECORD: case TYPE_RECORD:
{ {
char* kp = kp0; char* kp = kp0;
@ -401,6 +425,19 @@ HashKey* CompositeHash::ComputeSingletonHash(const Val* v, int type_check) const
if ( v->Type()->Tag() == TYPE_FUNC ) if ( v->Type()->Tag() == TYPE_FUNC )
return new HashKey(v->AsFunc()->GetUniqueFuncID()); return new HashKey(v->AsFunc()->GetUniqueFuncID());
if ( v->Type()->Tag() == TYPE_PATTERN )
{
const char* texts[2] = {
v->AsPattern()->PatternText(),
v->AsPattern()->AnywherePatternText()
};
int n = strlen(texts[0]) + strlen(texts[1]) + 2; // 2 for null
char* key = new char[n];
std::memcpy(key, texts[0], strlen(texts[0]) + 1);
std::memcpy(key + strlen(texts[0]) + 1, texts[1], strlen(texts[1]) + 1);
return new HashKey(false, key, n);
}
reporter->InternalError("bad index type in CompositeHash::ComputeSingletonHash"); reporter->InternalError("bad index type in CompositeHash::ComputeSingletonHash");
return 0; return 0;
@ -462,6 +499,17 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v,
break; break;
} }
case TYPE_PATTERN:
{
if ( ! v )
return (optional && ! calc_static_size) ? sz : 0;
sz = SizeAlign(sz, 2 * sizeof(size_t));
sz += strlen(v->AsPattern()->PatternText())
+ strlen(v->AsPattern()->AnywherePatternText()) + 2; // 2 for null terminators
break;
}
case TYPE_RECORD: case TYPE_RECORD:
{ {
const RecordVal* rv = v ? v->AsRecordVal() : 0; const RecordVal* rv = v ? v->AsRecordVal() : 0;
@ -831,6 +879,28 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0,
} }
break; break;
case TYPE_PATTERN:
{
RE_Matcher* re = nullptr;
if ( is_singleton )
{
kp1 = kp0;
int divider = strlen(kp0) + 1;
re = new RE_Matcher(kp1, kp1 + divider);
kp1 += k->Size();
}
else
{
const size_t* const len = AlignType<size_t>(kp0);
kp1 = reinterpret_cast<const char*>(len+2);
re = new RE_Matcher(kp1, kp1 + len[0]);
kp1 += len[0] + len[1];
}
pval = new PatternVal(re);
}
break;
case TYPE_RECORD: case TYPE_RECORD:
{ {
const char* kp = kp0; const char* kp = kp0;

View file

@ -72,6 +72,7 @@ Connection::Connection(NetSessions* s, const ConnIDKey& k, double t, const ConnI
resp_flow_label = 0; resp_flow_label = 0;
saw_first_orig_packet = 1; saw_first_orig_packet = 1;
saw_first_resp_packet = 0; saw_first_resp_packet = 0;
is_successful = false;
if ( pkt->l2_src ) if ( pkt->l2_src )
memcpy(orig_l2_addr, pkt->l2_src, sizeof(orig_l2_addr)); memcpy(orig_l2_addr, pkt->l2_src, sizeof(orig_l2_addr));
@ -204,11 +205,18 @@ void Connection::NextPacket(double t, int is_orig,
if ( root_analyzer ) if ( root_analyzer )
{ {
auto was_successful = is_successful;
record_current_packet = record_packet; record_current_packet = record_packet;
record_current_content = record_content; record_current_content = record_content;
root_analyzer->NextPacket(len, data, is_orig, -1, ip, caplen); root_analyzer->NextPacket(len, data, is_orig, -1, ip, caplen);
record_packet = record_current_packet; record_packet = record_current_packet;
record_content = record_current_content; record_content = record_current_content;
if ( ConnTransport() != TRANSPORT_TCP )
is_successful = true;
if ( ! was_successful && is_successful && connection_successful )
ConnectionEventFast(connection_successful, nullptr, {BuildConnVal()});
} }
else else
last_time = t; last_time = t;
@ -300,7 +308,7 @@ void Connection::InactivityTimer(double t)
void Connection::RemoveConnectionTimer(double t) void Connection::RemoveConnectionTimer(double t)
{ {
Event(connection_state_remove, 0); RemovalEvent();
sessions->Remove(this); sessions->Remove(this);
} }
@ -396,6 +404,7 @@ RecordVal* Connection::BuildConnVal()
conn_val->Assign(3, new Val(start_time, TYPE_TIME)); // ### conn_val->Assign(3, new Val(start_time, TYPE_TIME)); // ###
conn_val->Assign(4, new Val(last_time - start_time, TYPE_INTERVAL)); conn_val->Assign(4, new Val(last_time - start_time, TYPE_INTERVAL));
conn_val->Assign(6, new StringVal(history.c_str())); conn_val->Assign(6, new StringVal(history.c_str()));
conn_val->Assign(11, val_mgr->GetBool(is_successful));
conn_val->SetOrigin(this); conn_val->SetOrigin(this);
@ -448,6 +457,19 @@ void Connection::Match(Rule::PatternType type, const u_char* data, int len, bool
primary_PIA->Match(type, data, len, is_orig, bol, eol, clear_state); primary_PIA->Match(type, data, len, is_orig, bol, eol, clear_state);
} }
void Connection::RemovalEvent()
{
auto cv = BuildConnVal();
if ( connection_state_remove )
ConnectionEventFast(connection_state_remove, nullptr, {cv->Ref()});
if ( is_successful && successful_connection_remove )
ConnectionEventFast(successful_connection_remove, nullptr, {cv->Ref()});
Unref(cv);
}
void Connection::Event(EventHandlerPtr f, analyzer::Analyzer* analyzer, const char* name) void Connection::Event(EventHandlerPtr f, analyzer::Analyzer* analyzer, const char* name)
{ {
if ( ! f ) if ( ! f )

View file

@ -114,6 +114,9 @@ public:
TransportProto ConnTransport() const { return proto; } TransportProto ConnTransport() const { return proto; }
bool IsSuccessful() const { return is_successful; };
void SetSuccessful() { is_successful = true; }
// True if we should record subsequent packets (either headers or // True if we should record subsequent packets (either headers or
// in their entirety, depending on record_contents). We still // in their entirety, depending on record_contents). We still
// record subsequent SYN/FIN/RST, regardless of how this is set. // record subsequent SYN/FIN/RST, regardless of how this is set.
@ -162,6 +165,11 @@ public:
void Match(Rule::PatternType type, const u_char* data, int len, void Match(Rule::PatternType type, const u_char* data, int len,
bool is_orig, bool bol, bool eol, bool clear_state); bool is_orig, bool bol, bool eol, bool clear_state);
/**
* Generates connection removal event(s).
*/
void RemovalEvent();
// If a handler exists for 'f', an event will be generated. If 'name' is // If a handler exists for 'f', an event will be generated. If 'name' is
// given that event's first argument will be it, and it's second will be // given that event's first argument will be it, and it's second will be
// the connection value. If 'name' is null, then the event's first // the connection value. If 'name' is null, then the event's first
@ -339,6 +347,7 @@ protected:
unsigned int record_packets:1, record_contents:1; unsigned int record_packets:1, record_contents:1;
unsigned int record_current_packet:1, record_current_content:1; unsigned int record_current_packet:1, record_current_content:1;
unsigned int saw_first_orig_packet:1, saw_first_resp_packet:1; unsigned int saw_first_orig_packet:1, saw_first_resp_packet:1;
unsigned int is_successful:1;
// Count number of connections. // Count number of connections.
static uint64_t total_connections; static uint64_t total_connections;

View file

@ -1404,7 +1404,7 @@ void DNS_Mgr::DoProcess()
{ {
AsyncRequest* req = asyncs_timeouts.top(); AsyncRequest* req = asyncs_timeouts.top();
if ( req->time + DNS_TIMEOUT > current_time() ) if ( req->time + DNS_TIMEOUT > current_time() && ! terminating )
break; break;
if ( ! req->processed ) if ( ! req->processed )

View file

@ -135,7 +135,7 @@ bool DbgBreakpoint::SetLocation(ParseLocationRec plr, string loc_str)
} }
at_stmt = plr.stmt; at_stmt = plr.stmt;
safe_snprintf(description, sizeof(description), "%s:%d", snprintf(description, sizeof(description), "%s:%d",
source_filename, source_line); source_filename, source_line);
debug_msg("Breakpoint %d set at %s\n", GetID(), Description()); debug_msg("Breakpoint %d set at %s\n", GetID(), Description());
@ -148,7 +148,7 @@ bool DbgBreakpoint::SetLocation(ParseLocationRec plr, string loc_str)
loc_str.c_str()); loc_str.c_str());
at_stmt = plr.stmt; at_stmt = plr.stmt;
const Location* loc = at_stmt->GetLocationInfo(); const Location* loc = at_stmt->GetLocationInfo();
safe_snprintf(description, sizeof(description), "%s at %s:%d", snprintf(description, sizeof(description), "%s at %s:%d",
function_name.c_str(), loc->filename, loc->last_line); function_name.c_str(), loc->filename, loc->last_line);
debug_msg("Breakpoint %d set at %s\n", GetID(), Description()); debug_msg("Breakpoint %d set at %s\n", GetID(), Description());
@ -171,7 +171,7 @@ bool DbgBreakpoint::SetLocation(Stmt* stmt)
AddToGlobalMap(); AddToGlobalMap();
const Location* loc = stmt->GetLocationInfo(); const Location* loc = stmt->GetLocationInfo();
safe_snprintf(description, sizeof(description), "%s:%d", snprintf(description, sizeof(description), "%s:%d",
loc->filename, loc->last_line); loc->filename, loc->last_line);
debug_msg("Breakpoint %d set at %s\n", GetID(), Description()); debug_msg("Breakpoint %d set at %s\n", GetID(), Description());

View file

@ -717,7 +717,7 @@ static char* get_prompt(bool reset_counter = false)
if ( reset_counter ) if ( reset_counter )
counter = 0; counter = 0;
safe_snprintf(prompt, sizeof(prompt), "(Zeek [%d]) ", counter++); snprintf(prompt, sizeof(prompt), "(Zeek [%d]) ", counter++);
return prompt; return prompt;
} }
@ -743,7 +743,7 @@ string get_context_description(const Stmt* stmt, const Frame* frame)
size_t buf_size = strlen(d.Description()) + strlen(loc.filename) + 1024; size_t buf_size = strlen(d.Description()) + strlen(loc.filename) + 1024;
char* buf = new char[buf_size]; char* buf = new char[buf_size];
safe_snprintf(buf, buf_size, "In %s at %s:%d", snprintf(buf, buf_size, "In %s at %s:%d",
d.Description(), loc.filename, loc.last_line); d.Description(), loc.filename, loc.last_line);
string retval(buf); string retval(buf);

View file

@ -4339,6 +4339,7 @@ LambdaExpr::LambdaExpr(std::unique_ptr<function_ingredients> arg_ing,
// Install a dummy version of the function globally for use only // Install a dummy version of the function globally for use only
// when broker provides a closure. // when broker provides a closure.
::Ref(ingredients->body);
BroFunc* dummy_func = new BroFunc( BroFunc* dummy_func = new BroFunc(
ingredients->id, ingredients->id,
ingredients->body, ingredients->body,
@ -4378,6 +4379,7 @@ LambdaExpr::LambdaExpr(std::unique_ptr<function_ingredients> arg_ing,
dummy_func->SetName(my_name.c_str()); dummy_func->SetName(my_name.c_str());
Val* v = new Val(dummy_func); Val* v = new Val(dummy_func);
Unref(dummy_func);
id->SetVal(v); // id will unref v when its done. id->SetVal(v); // id will unref v when its done.
id->SetType(ingredients->id->Type()->Ref()); id->SetType(ingredients->id->Type()->Ref());
id->SetConst(); id->SetConst();
@ -4385,6 +4387,7 @@ LambdaExpr::LambdaExpr(std::unique_ptr<function_ingredients> arg_ing,
Val* LambdaExpr::Eval(Frame* f) const Val* LambdaExpr::Eval(Frame* f) const
{ {
::Ref(ingredients->body);
BroFunc* lamb = new BroFunc( BroFunc* lamb = new BroFunc(
ingredients->id, ingredients->id,
ingredients->body, ingredients->body,
@ -4398,7 +4401,9 @@ Val* LambdaExpr::Eval(Frame* f) const
// Allows for lookups by the receiver. // Allows for lookups by the receiver.
lamb->SetName(my_name.c_str()); lamb->SetName(my_name.c_str());
return new Val(lamb); auto rval = new Val(lamb);
Unref(lamb);
return rval;
} }
void LambdaExpr::ExprDescribe(ODesc* d) const void LambdaExpr::ExprDescribe(ODesc* d) const

View file

@ -66,6 +66,9 @@ public:
bool IsRawOutput() const { return raw_output; } bool IsRawOutput() const { return raw_output; }
protected: protected:
friend class PrintStmt;
BroFile() { Init(); } BroFile() { Init(); }
void Init(); void Init();
@ -80,7 +83,8 @@ protected:
// Returns nil if the file is not active, was in error, etc. // Returns nil if the file is not active, was in error, etc.
// (Protected because we do not want anyone to write directly // (Protected because we do not want anyone to write directly
// to the file.) // to the file, but the PrintStmt friend uses this to check whether
// it's really stdout.)
FILE* File(); FILE* File();
// Raises a file_opened event. // Raises a file_opened event.

View file

@ -31,20 +31,54 @@ Frame::Frame(int arg_size, const BroFunc* func, const val_list* fn_args)
Frame::~Frame() Frame::~Frame()
{ {
for ( auto& func : functions_with_closure_frame_reference )
{
func->StrengthenClosureReference(this);
Unref(func);
}
// Deleting a Frame that is a view is a no-op. // Deleting a Frame that is a view is a no-op.
Unref(trigger); Unref(trigger);
if ( ! weak_closure_ref )
Unref(closure); Unref(closure);
for ( auto& i : outer_ids ) for ( auto& i : outer_ids )
Unref(i); Unref(i);
Release(); Release();
delete [] weak_refs;
} }
void Frame::SetElement(int n, Val* v) void Frame::AddFunctionWithClosureRef(BroFunc* func)
{ {
Unref(frame[n]); ::Ref(func);
functions_with_closure_frame_reference.emplace_back(func);
}
void Frame::SetElement(int n, Val* v, bool weak_ref)
{
UnrefElement(n);
frame[n] = v; frame[n] = v;
if ( weak_ref )
{
if ( ! weak_refs )
{
weak_refs = new bool[size];
for ( auto i = 0; i < size; ++i )
weak_refs[i] = false;
}
weak_refs[n] = true;
}
else
{
if ( weak_refs )
weak_refs[n] = false;
}
} }
void Frame::SetElement(const ID* id, Val* v) void Frame::SetElement(const ID* id, Val* v)
@ -62,8 +96,15 @@ void Frame::SetElement(const ID* id, Val* v)
if ( offset_map.size() ) if ( offset_map.size() )
{ {
auto where = offset_map.find(std::string(id->Name())); auto where = offset_map.find(std::string(id->Name()));
if ( where != offset_map.end() ) if ( where != offset_map.end() )
SetElement(where->second, v); {
// Need to add a Ref to 'v' since the SetElement() for
// id->Offset() below is otherwise responsible for keeping track
// of the implied reference count of the passed-in 'v' argument.
// i.e. if we end up storing it twice, we need an addition Ref.
SetElement(where->second, v->Ref());
}
} }
SetElement(id->Offset(), v); SetElement(id->Offset(), v);
@ -92,7 +133,7 @@ void Frame::Reset(int startIdx)
{ {
for ( int i = startIdx; i < size; ++i ) for ( int i = startIdx; i < size; ++i )
{ {
Unref(frame[i]); UnrefElement(i);
frame[i] = 0; frame[i] = 0;
} }
} }
@ -100,7 +141,7 @@ void Frame::Reset(int startIdx)
void Frame::Release() void Frame::Release()
{ {
for ( int i = 0; i < size; ++i ) for ( int i = 0; i < size; ++i )
Unref(frame[i]); UnrefElement(i);
delete [] frame; delete [] frame;
} }
@ -145,7 +186,34 @@ Frame* Frame::Clone() const
return other; return other;
} }
Frame* Frame::SelectiveClone(const id_list& selection) const static bool val_is_func(Val* v, BroFunc* func)
{
if ( v->Type()->Tag() != TYPE_FUNC )
return false;
return v->AsFunc() == func;
}
static Val* clone_if_not_func(Val** frame, int offset, BroFunc* func,
Frame* other)
{
auto v = frame[offset];
if ( ! v )
return nullptr;
if ( val_is_func(v, func) )
{
other->SetElement(offset, v, true);
return v;
}
auto rval = v->Clone();
other->SetElement(offset, rval);
return rval;
}
Frame* Frame::SelectiveClone(const id_list& selection, BroFunc* func) const
{ {
if ( selection.length() == 0 ) if ( selection.length() == 0 )
return nullptr; return nullptr;
@ -171,7 +239,7 @@ Frame* Frame::SelectiveClone(const id_list& selection) const
auto where = offset_map.find(std::string(id->Name())); auto where = offset_map.find(std::string(id->Name()));
if ( where != offset_map.end() ) if ( where != offset_map.end() )
{ {
other->frame[where->second] = frame[where->second]->Clone(); clone_if_not_func(frame, where->second, func, other);
continue; continue;
} }
} }
@ -179,7 +247,7 @@ Frame* Frame::SelectiveClone(const id_list& selection) const
if ( ! frame[id->Offset()] ) if ( ! frame[id->Offset()] )
reporter->InternalError("Attempted to clone an id ('%s') with no associated value.", id->Name()); reporter->InternalError("Attempted to clone an id ('%s') with no associated value.", id->Name());
other->frame[id->Offset()] = frame[id->Offset()]->Clone(); clone_if_not_func(frame, id->Offset(), func, other);
} }
/** /**
@ -379,6 +447,7 @@ std::pair<bool, Frame*> Frame::Unserialize(const broker::vector& data)
// Frame takes ownership of unref'ing elements in outer_ids // Frame takes ownership of unref'ing elements in outer_ids
rf->outer_ids = std::move(outer_ids); rf->outer_ids = std::move(outer_ids);
rf->closure = closure; rf->closure = closure;
rf->weak_closure_ref = false;
for ( int i = 0; i < frame_size; ++i ) for ( int i = 0; i < frame_size; ++i )
{ {
@ -437,7 +506,7 @@ void Frame::CaptureClosure(Frame* c, id_list arg_outer_ids)
closure = c; closure = c;
if ( closure ) if ( closure )
Ref(closure); weak_closure_ref = true;
/** /**
* Want to capture closures by copy? * Want to capture closures by copy?

View file

@ -44,8 +44,11 @@ public:
* *
* @param n the index to set * @param n the index to set
* @param v the value to set it to * @param v the value to set it to
* @param weak_ref whether the frame owns the value and should unref
* it upon destruction. Used to break circular references between
* lambda functions and closure frames.
*/ */
void SetElement(int n, Val* v); void SetElement(int n, Val* v, bool weak_ref = false);
/** /**
* Associates *id* and *v* in the frame. Future lookups of * Associates *id* and *v* in the frame. Future lookups of
@ -149,7 +152,7 @@ public:
* *selection* have been cloned. All other values are made to be * *selection* have been cloned. All other values are made to be
* null. * null.
*/ */
Frame* SelectiveClone(const id_list& selection) const; Frame* SelectiveClone(const id_list& selection, BroFunc* func) const;
/** /**
* Serializes the Frame into a Broker representation. * Serializes the Frame into a Broker representation.
@ -215,8 +218,28 @@ public:
void SetDelayed() { delayed = true; } void SetDelayed() { delayed = true; }
bool HasDelayed() const { return delayed; } bool HasDelayed() const { return delayed; }
/**
* Track a new function that refers to this frame for use as a closure.
* This frame's destructor will then upgrade that functions reference
* from weak to strong (by making a copy). The initial use of
* weak references prevents unbreakable circular references that
* otherwise cause memory leaks.
*/
void AddFunctionWithClosureRef(BroFunc* func);
private: private:
/**
* Unrefs the value at offset 'n' frame unless it's a weak reference.
*/
void UnrefElement(int n)
{
if ( weak_refs && weak_refs[n] )
return;
Unref(frame[n]);
}
/** Have we captured this id? */ /** Have we captured this id? */
bool IsOuterID(const ID* in) const; bool IsOuterID(const ID* in) const;
@ -242,8 +265,13 @@ private:
/** Associates ID's offsets with values. */ /** Associates ID's offsets with values. */
Val** frame; Val** frame;
/** Values that are weakly referenced by the frame. Used to
* prevent circular reference memory leaks in lambda/closures */
bool* weak_refs = nullptr;
/** The enclosing frame of this frame. */ /** The enclosing frame of this frame. */
Frame* closure; Frame* closure;
bool weak_closure_ref = false;
/** ID's used in this frame from the enclosing frame. */ /** ID's used in this frame from the enclosing frame. */
id_list outer_ids; id_list outer_ids;
@ -268,6 +296,8 @@ private:
Trigger* trigger; Trigger* trigger;
const CallExpr* call; const CallExpr* call;
bool delayed; bool delayed;
std::vector<BroFunc*> functions_with_closure_frame_reference;
}; };
/** /**

View file

@ -132,6 +132,7 @@ Func* Func::DoClone()
{ {
// By default, ok just to return a reference. Func does not have any state // By default, ok just to return a reference. Func does not have any state
// that is different across instances. // that is different across instances.
::Ref(this);
return this; return this;
} }
@ -286,6 +287,8 @@ BroFunc::~BroFunc()
{ {
std::for_each(bodies.begin(), bodies.end(), std::for_each(bodies.begin(), bodies.end(),
[](Body& b) { Unref(b.stmts); }); [](Body& b) { Unref(b.stmts); });
if ( ! weak_closure_ref )
Unref(closure); Unref(closure);
} }
@ -490,14 +493,35 @@ void BroFunc::AddClosure(id_list ids, Frame* f)
SetClosureFrame(f); SetClosureFrame(f);
} }
bool BroFunc::StrengthenClosureReference(Frame* f)
{
if ( closure != f )
return false;
if ( ! weak_closure_ref )
return false;
closure = closure->SelectiveClone(outer_ids, this);
weak_closure_ref = false;
return true;
}
void BroFunc::SetClosureFrame(Frame* f) void BroFunc::SetClosureFrame(Frame* f)
{ {
if ( closure ) if ( closure )
reporter->InternalError("Tried to override closure for BroFunc %s.", reporter->InternalError("Tried to override closure for BroFunc %s.",
Name()); Name());
// Have to use weak references initially because otherwise Ref'ing the
// original frame creates a circular reference: the function holds a
// reference to the frame and the frame contains a reference to this
// function value. And we can't just do a shallow clone of the frame
// up front because the closure semantics in Zeek allow mutating
// the outer frame.
closure = f; closure = f;
Ref(closure); weak_closure_ref = true;
f->AddFunctionWithClosureRef(this);
} }
bool BroFunc::UpdateClosure(const broker::vector& data) bool BroFunc::UpdateClosure(const broker::vector& data)
@ -510,9 +534,10 @@ bool BroFunc::UpdateClosure(const broker::vector& data)
if ( new_closure ) if ( new_closure )
new_closure->SetFunction(this); new_closure->SetFunction(this);
if ( closure ) if ( ! weak_closure_ref )
Unref(closure); Unref(closure);
weak_closure_ref = false;
closure = new_closure; closure = new_closure;
return true; return true;
@ -528,7 +553,8 @@ Func* BroFunc::DoClone()
CopyStateInto(other); CopyStateInto(other);
other->frame_size = frame_size; other->frame_size = frame_size;
other->closure = closure ? closure->SelectiveClone(outer_ids) : nullptr; other->closure = closure ? closure->SelectiveClone(outer_ids, this) : nullptr;
other->weak_closure_ref = false;
other->outer_ids = outer_ids; other->outer_ids = outer_ids;
return other; return other;
@ -814,3 +840,68 @@ bool check_built_in_call(BuiltinFunc* f, CallExpr* call)
return true; return true;
} }
// Gets a function's priority from its Scope's attributes. Errors if it sees any
// problems.
static int get_func_priority(const attr_list& attrs)
{
int priority = 0;
for ( const auto& a : attrs )
{
if ( a->Tag() == ATTR_DEPRECATED )
continue;
if ( a->Tag() != ATTR_PRIORITY )
{
a->Error("illegal attribute for function body");
continue;
}
Val* v = a->AttrExpr()->Eval(0);
if ( ! v )
{
a->Error("cannot evaluate attribute expression");
continue;
}
if ( ! IsIntegral(v->Type()->Tag()) )
{
a->Error("expression is not of integral type");
continue;
}
priority = v->InternalInt();
}
return priority;
}
function_ingredients::function_ingredients(Scope* scope, Stmt* body)
{
frame_size = scope->Length();
inits = scope->GetInits();
this->scope = scope;
::Ref(this->scope);
id = scope->ScopeID();
::Ref(id);
auto attrs = scope->Attrs();
priority = (attrs ? get_func_priority(*attrs) : 0);
this->body = body;
::Ref(this->body);
}
function_ingredients::~function_ingredients()
{
Unref(id);
Unref(body);
Unref(scope);
for ( const auto& i : *inits )
Unref(i);
delete inits;
}

View file

@ -114,6 +114,12 @@ public:
*/ */
bool UpdateClosure(const broker::vector& data); bool UpdateClosure(const broker::vector& data);
/**
* If the function's closure is a weak reference to the given frame,
* upgrade to a strong reference of a shallow clone of that frame.
*/
bool StrengthenClosureReference(Frame* f);
/** /**
* Serializes this function's closure. * Serializes this function's closure.
* *
@ -154,6 +160,7 @@ private:
id_list outer_ids; id_list outer_ids;
// The frame the BroFunc was initialized in. // The frame the BroFunc was initialized in.
Frame* closure = nullptr; Frame* closure = nullptr;
bool weak_closure_ref = false;
}; };
typedef Val* (*built_in_func)(Frame* frame, val_list* args); typedef Val* (*built_in_func)(Frame* frame, val_list* args);
@ -192,13 +199,20 @@ struct CallInfo {
// Struct that collects all the specifics defining a Func. Used for BroFuncs // Struct that collects all the specifics defining a Func. Used for BroFuncs
// with closures. // with closures.
struct function_ingredients { struct function_ingredients {
// Gathers all of the information from a scope and a function body needed
// to build a function.
function_ingredients(Scope* scope, Stmt* body);
~function_ingredients();
ID* id; ID* id;
Stmt* body; Stmt* body;
id_list* inits; id_list* inits;
int frame_size; int frame_size;
int priority; int priority;
Scope* scope; Scope* scope;
}; };
extern vector<CallInfo> call_stack; extern vector<CallInfo> call_stack;

View file

@ -65,6 +65,17 @@ void notifier::Registry::Modified(Modifiable* m)
i->second->Modified(m); i->second->Modified(m);
} }
void notifier::Registry::Terminate()
{
std::set<Receiver*> receivers;
for ( auto& r : registrations )
receivers.emplace(r.second);
for ( auto& r : receivers )
r->Terminate();
}
notifier::Modifiable::~Modifiable() notifier::Modifiable::~Modifiable()
{ {
if ( num_receivers ) if ( num_receivers )

View file

@ -30,6 +30,12 @@ public:
* @param m object that was modified * @param m object that was modified
*/ */
virtual void Modified(Modifiable* m) = 0; virtual void Modified(Modifiable* m) = 0;
/**
* Callback executed when notification registry is terminating and
* no further modifications can possibly occur.
*/
virtual void Terminate() { }
}; };
/** Singleton class tracking all notification requests globally. */ /** Singleton class tracking all notification requests globally. */
@ -69,6 +75,12 @@ public:
*/ */
void Unregister(Modifiable* m); void Unregister(Modifiable* m);
/**
* Notifies all receivers that no further modifications will occur
* as the registry is shutting down.
*/
void Terminate();
private: private:
friend class Modifiable; friend class Modifiable;

View file

@ -1,11 +1,25 @@
#include "PacketFilter.h" #include "PacketFilter.h"
void PacketFilter::DeleteFilter(void* data)
{
auto f = static_cast<Filter*>(data);
delete f;
}
PacketFilter::PacketFilter(bool arg_default)
{
default_match = arg_default;
src_filter.SetDeleteFunction(PacketFilter::DeleteFilter);
dst_filter.SetDeleteFunction(PacketFilter::DeleteFilter);
}
void PacketFilter::AddSrc(const IPAddr& src, uint32_t tcp_flags, double probability) void PacketFilter::AddSrc(const IPAddr& src, uint32_t tcp_flags, double probability)
{ {
Filter* f = new Filter; Filter* f = new Filter;
f->tcp_flags = tcp_flags; f->tcp_flags = tcp_flags;
f->probability = uint32_t(probability * RAND_MAX); f->probability = uint32_t(probability * RAND_MAX);
src_filter.Insert(src, 128, f); auto prev = static_cast<Filter*>(src_filter.Insert(src, 128, f));
delete prev;
} }
void PacketFilter::AddSrc(Val* src, uint32_t tcp_flags, double probability) void PacketFilter::AddSrc(Val* src, uint32_t tcp_flags, double probability)
@ -13,7 +27,8 @@ void PacketFilter::AddSrc(Val* src, uint32_t tcp_flags, double probability)
Filter* f = new Filter; Filter* f = new Filter;
f->tcp_flags = tcp_flags; f->tcp_flags = tcp_flags;
f->probability = uint32_t(probability * RAND_MAX); f->probability = uint32_t(probability * RAND_MAX);
src_filter.Insert(src, f); auto prev = static_cast<Filter*>(src_filter.Insert(src, f));
delete prev;
} }
void PacketFilter::AddDst(const IPAddr& dst, uint32_t tcp_flags, double probability) void PacketFilter::AddDst(const IPAddr& dst, uint32_t tcp_flags, double probability)
@ -21,7 +36,8 @@ void PacketFilter::AddDst(const IPAddr& dst, uint32_t tcp_flags, double probabil
Filter* f = new Filter; Filter* f = new Filter;
f->tcp_flags = tcp_flags; f->tcp_flags = tcp_flags;
f->probability = uint32_t(probability * RAND_MAX); f->probability = uint32_t(probability * RAND_MAX);
dst_filter.Insert(dst, 128, f); auto prev = static_cast<Filter*>(dst_filter.Insert(dst, 128, f));
delete prev;
} }
void PacketFilter::AddDst(Val* dst, uint32_t tcp_flags, double probability) void PacketFilter::AddDst(Val* dst, uint32_t tcp_flags, double probability)
@ -29,27 +45,36 @@ void PacketFilter::AddDst(Val* dst, uint32_t tcp_flags, double probability)
Filter* f = new Filter; Filter* f = new Filter;
f->tcp_flags = tcp_flags; f->tcp_flags = tcp_flags;
f->probability = uint32_t(probability * RAND_MAX); f->probability = uint32_t(probability * RAND_MAX);
dst_filter.Insert(dst, f); auto prev = static_cast<Filter*>(dst_filter.Insert(dst, f));
delete prev;
} }
bool PacketFilter::RemoveSrc(const IPAddr& src) bool PacketFilter::RemoveSrc(const IPAddr& src)
{ {
return src_filter.Remove(src, 128) != 0; auto f = static_cast<Filter*>(src_filter.Remove(src, 128));
delete f;
return f != nullptr;
} }
bool PacketFilter::RemoveSrc(Val* src) bool PacketFilter::RemoveSrc(Val* src)
{ {
return src_filter.Remove(src) != NULL; auto f = static_cast<Filter*>(src_filter.Remove(src));
delete f;
return f != nullptr;
} }
bool PacketFilter::RemoveDst(const IPAddr& dst) bool PacketFilter::RemoveDst(const IPAddr& dst)
{ {
return dst_filter.Remove(dst, 128) != NULL; auto f = static_cast<Filter*>(dst_filter.Remove(dst, 128));
delete f;
return f != nullptr;
} }
bool PacketFilter::RemoveDst(Val* dst) bool PacketFilter::RemoveDst(Val* dst)
{ {
return dst_filter.Remove(dst) != NULL; auto f = static_cast<Filter*>(dst_filter.Remove(dst));
delete f;
return f != nullptr;
} }
bool PacketFilter::Match(const IP_Hdr* ip, int len, int caplen) bool PacketFilter::Match(const IP_Hdr* ip, int len, int caplen)

View file

@ -7,7 +7,7 @@
class PacketFilter { class PacketFilter {
public: public:
explicit PacketFilter(bool arg_default) { default_match = arg_default; } explicit PacketFilter(bool arg_default);
~PacketFilter() {} ~PacketFilter() {}
// Drops all packets from a particular source (which may be given // Drops all packets from a particular source (which may be given
@ -34,6 +34,8 @@ private:
uint32_t probability; uint32_t probability;
}; };
static void DeleteFilter(void* data);
bool MatchFilter(const Filter& f, const IP_Hdr& ip, int len, int caplen); bool MatchFilter(const Filter& f, const IP_Hdr& ip, int len, int caplen);
bool default_match; bool default_match;

View file

@ -18,8 +18,8 @@ private:
}; };
public: public:
PrefixTable() { tree = New_Patricia(128); } PrefixTable() { tree = New_Patricia(128); delete_function = nullptr; }
~PrefixTable() { Destroy_Patricia(tree, 0); } ~PrefixTable() { Destroy_Patricia(tree, delete_function); }
// Addr in network byte order. If data is zero, acts like a set. // Addr in network byte order. If data is zero, acts like a set.
// Returns ptr to old data if already existing. // Returns ptr to old data if already existing.
@ -43,7 +43,10 @@ public:
void* Remove(const IPAddr& addr, int width); void* Remove(const IPAddr& addr, int width);
void* Remove(const Val* value); void* Remove(const Val* value);
void Clear() { Clear_Patricia(tree, 0); } void Clear() { Clear_Patricia(tree, delete_function); }
// Sets a function to call for each node when table is cleared/destroyed.
void SetDeleteFunction(data_fn_t del_fn) { delete_function = del_fn; }
iterator InitIterator(); iterator InitIterator();
void* GetNext(iterator* i); void* GetNext(iterator* i);
@ -53,4 +56,5 @@ private:
static IPPrefix PrefixToIPPrefix(prefix_t* p); static IPPrefix PrefixToIPPrefix(prefix_t* p);
patricia_tree_t* tree; patricia_tree_t* tree;
data_fn_t delete_function;
}; };

View file

@ -109,7 +109,7 @@ void Specific_RE_Matcher::MakeCaseInsensitive()
char* s = new char[n + 5 /* slop */]; char* s = new char[n + 5 /* slop */];
safe_snprintf(s, n + 5, fmt, pattern_text); snprintf(s, n + 5, fmt, pattern_text);
delete [] pattern_text; delete [] pattern_text;
pattern_text = s; pattern_text = s;
@ -493,7 +493,7 @@ static RE_Matcher* matcher_merge(const RE_Matcher* re1, const RE_Matcher* re2,
int n = strlen(text1) + strlen(text2) + strlen(merge_op) + 32 /* slop */ ; int n = strlen(text1) + strlen(text2) + strlen(merge_op) + 32 /* slop */ ;
char* merge_text = new char[n]; char* merge_text = new char[n];
safe_snprintf(merge_text, n, "(%s)%s(%s)", text1, merge_op, text2); snprintf(merge_text, n, "(%s)%s(%s)", text1, merge_op, text2);
RE_Matcher* merge = new RE_Matcher(merge_text); RE_Matcher* merge = new RE_Matcher(merge_text);
delete [] merge_text; delete [] merge_text;

View file

@ -430,7 +430,7 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out,
{ {
va_list aq; va_list aq;
va_copy(aq, ap); va_copy(aq, ap);
int n = safe_vsnprintf(buffer, size, fmt, aq); int n = vsnprintf(buffer, size, fmt, aq);
va_end(aq); va_end(aq);
if ( postfix ) if ( postfix )
@ -451,7 +451,7 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out,
if ( postfix && *postfix ) if ( postfix && *postfix )
// Note, if you change this fmt string, adjust the additional // Note, if you change this fmt string, adjust the additional
// buffer size above. // buffer size above.
safe_snprintf(buffer + strlen(buffer), size - strlen(buffer), " (%s)", postfix); snprintf(buffer + strlen(buffer), size - strlen(buffer), " (%s)", postfix);
bool raise_event = true; bool raise_event = true;

View file

@ -3,6 +3,7 @@
#include "zeek-config.h" #include "zeek-config.h"
#include <netinet/in.h>
#include <arpa/inet.h> #include <arpa/inet.h>
#include <stdlib.h> #include <stdlib.h>
@ -1046,9 +1047,7 @@ void NetSessions::Remove(Connection* c)
} }
c->Done(); c->Done();
c->RemovalEvent();
if ( connection_state_remove )
c->Event(connection_state_remove, 0);
// Zero out c's copy of the key, so that if c has been Ref()'d // Zero out c's copy of the key, so that if c has been Ref()'d
// up, we know on a future call to Remove() that it's no // up, we know on a future call to Remove() that it's no
@ -1141,21 +1140,21 @@ void NetSessions::Drain()
{ {
Connection* tc = entry.second; Connection* tc = entry.second;
tc->Done(); tc->Done();
tc->Event(connection_state_remove, 0); tc->RemovalEvent();
} }
for ( const auto& entry : udp_conns ) for ( const auto& entry : udp_conns )
{ {
Connection* uc = entry.second; Connection* uc = entry.second;
uc->Done(); uc->Done();
uc->Event(connection_state_remove, 0); uc->RemovalEvent();
} }
for ( const auto& entry : icmp_conns ) for ( const auto& entry : icmp_conns )
{ {
Connection* ic = entry.second; Connection* ic = entry.second;
ic->Done(); ic->Done();
ic->Event(connection_state_remove, 0); ic->RemovalEvent();
} }
ExpireTimerMgrs(); ExpireTimerMgrs();

View file

@ -14,6 +14,9 @@
#include "Debug.h" #include "Debug.h"
#include "Traverse.h" #include "Traverse.h"
#include "Trigger.h" #include "Trigger.h"
#include "IntrusivePtr.h"
#include "logging/Manager.h"
#include "logging/logging.bif.h"
const char* stmt_name(BroStmtTag t) const char* stmt_name(BroStmtTag t)
{ {
@ -184,6 +187,40 @@ TraversalCode ExprListStmt::Traverse(TraversalCallback* cb) const
static BroFile* print_stdout = 0; static BroFile* print_stdout = 0;
static IntrusivePtr<EnumVal> lookup_enum_val(const char* module_name, const char* name)
{
ID* id = lookup_ID(name, module_name);
assert(id);
assert(id->IsEnumConst());
EnumType* et = id->Type()->AsEnumType();
int index = et->Lookup(module_name, name);
assert(index >= 0);
IntrusivePtr<EnumVal> rval{et->GetVal(index), false};
return rval;
}
static Val* print_log(val_list* vals)
{
auto plval = lookup_enum_val("Log", "PRINTLOG");
auto record = make_intrusive<RecordVal>(internal_type("Log::PrintLogInfo")->AsRecordType());
auto vec = make_intrusive<VectorVal>(internal_type("string_vec")->AsVectorType());
for ( const auto& val : *vals )
{
ODesc d(DESC_READABLE);
val->Describe(&d);
vec->Assign(vec->Size(), new StringVal(d.Description()));
}
record->Assign(0, new Val(current_time(), TYPE_TIME));
record->Assign(1, vec.detach());
log_mgr->Write(plval.get(), record.get());
return nullptr;
}
Val* PrintStmt::DoExec(val_list* vals, stmt_flow_type& /* flow */) const Val* PrintStmt::DoExec(val_list* vals, stmt_flow_type& /* flow */) const
{ {
RegisterAccess(); RegisterAccess();
@ -203,6 +240,26 @@ Val* PrintStmt::DoExec(val_list* vals, stmt_flow_type& /* flow */) const
++offset; ++offset;
} }
static auto print_log_type = static_cast<BifEnum::Log::PrintLogType>(
internal_val("Log::print_to_log")->AsEnum());
switch ( print_log_type ) {
case BifEnum::Log::REDIRECT_NONE:
break;
case BifEnum::Log::REDIRECT_ALL:
return print_log(vals);
case BifEnum::Log::REDIRECT_STDOUT:
if ( f->File() == stdout )
// Should catch even printing to a "manually opened" stdout file,
// like "/dev/stdout" or "-".
return print_log(vals);
break;
default:
reporter->InternalError("unknown Log::PrintLogType value: %d",
print_log_type);
break;
}
desc_style style = f->IsRawOutput() ? RAW_STYLE : STANDARD_STYLE; desc_style style = f->IsRawOutput() ? RAW_STYLE : STANDARD_STYLE;
if ( f->IsRawOutput() ) if ( f->IsRawOutput() )

View file

@ -171,6 +171,27 @@ Trigger::Trigger(Expr* arg_cond, Stmt* arg_body, Stmt* arg_timeout_stmts,
Unref(this); Unref(this);
} }
void Trigger::Terminate()
{
if ( is_return )
{
auto parent = frame->GetTrigger();
if ( ! parent->Disabled() )
{
// If the trigger was already disabled due to interpreter
// exception, an Unref already happened at that point.
parent->Disable();
Unref(parent);
}
frame->ClearTrigger();
}
Disable();
Unref(this);
}
Trigger::~Trigger() Trigger::~Trigger()
{ {
DBG_LOG(DBG_NOTIFIERS, "%s: deleting", Name()); DBG_LOG(DBG_NOTIFIERS, "%s: deleting", Name());

View file

@ -62,6 +62,10 @@ public:
// later to avoid race conditions. // later to avoid race conditions.
void Modified(notifier::Modifiable* m) override void Modified(notifier::Modifiable* m) override
{ QueueTrigger(this); } { QueueTrigger(this); }
// Overridden from notifer::Receiver. If we're still waiting
// on an ID/Val to be modified at termination time, we can't hope
// for any further progress to be made, so just Unref ourselves.
void Terminate() override;
const char* Name() const; const char* Name() const;

View file

@ -391,7 +391,7 @@ TableType::TableType(TypeList* ind, BroType* yield)
// Allow functions, since they can be compared // Allow functions, since they can be compared
// for Func* pointer equality. // for Func* pointer equality.
if ( t == TYPE_INTERNAL_OTHER && tli->Tag() != TYPE_FUNC && if ( t == TYPE_INTERNAL_OTHER && tli->Tag() != TYPE_FUNC &&
tli->Tag() != TYPE_RECORD ) tli->Tag() != TYPE_RECORD && tli->Tag() != TYPE_PATTERN )
{ {
tli->Error("bad index type"); tli->Error("bad index type");
SetError(); SetError();
@ -824,6 +824,71 @@ void RecordType::DescribeReST(ODesc* d, bool roles_only) const
d->PopType(this); d->PopType(this);
} }
static string container_type_name(const BroType* ft)
{
string s;
if ( ft->Tag() == TYPE_RECORD )
s = "record " + ft->GetName();
else if ( ft->Tag() == TYPE_VECTOR )
s = "vector of " + container_type_name(ft->YieldType());
else if ( ft->Tag() == TYPE_TABLE )
{
if ( ft->IsSet() )
s = "set[";
else
s = "table[";
const type_list* tl = ((const IndexType*) ft)->IndexTypes();
loop_over_list(*tl, i)
{
if ( i > 0 )
s += ",";
s += container_type_name((*tl)[i]);
}
s += "]";
if ( ft->YieldType() )
{
s += " of ";
s += container_type_name(ft->YieldType());
}
}
else
s = type_name(ft->Tag());
return s;
}
TableVal* RecordType::GetRecordFieldsVal(const RecordVal* rv) const
{
auto rval = new TableVal(internal_type("record_field_table")->AsTableType());
for ( int i = 0; i < NumFields(); ++i )
{
const BroType* ft = FieldType(i);
const TypeDecl* fd = FieldDecl(i);
Val* fv = nullptr;
if ( rv )
fv = rv->Lookup(i);
if ( fv )
::Ref(fv);
bool logged = (fd->attrs && fd->FindAttr(ATTR_LOG) != 0);
RecordVal* nr = new RecordVal(internal_type("record_field")->AsRecordType());
string s = container_type_name(ft);
nr->Assign(0, new StringVal(s));
nr->Assign(1, val_mgr->GetBool(logged));
nr->Assign(2, fv);
nr->Assign(3, FieldDefault(i));
Val* field_name = new StringVal(FieldName(i));
rval->Assign(field_name, nr);
Unref(field_name);
}
return rval;
}
const char* RecordType::AddFields(type_decl_list* others, attr_list* attr) const char* RecordType::AddFields(type_decl_list* others, attr_list* attr)
{ {
assert(types); assert(types);

View file

@ -75,6 +75,7 @@ class VectorType;
class TypeType; class TypeType;
class OpaqueType; class OpaqueType;
class EnumVal; class EnumVal;
class TableVal;
const int DOES_NOT_MATCH_INDEX = 0; const int DOES_NOT_MATCH_INDEX = 0;
const int MATCHES_INDEX_SCALAR = 1; const int MATCHES_INDEX_SCALAR = 1;
@ -483,6 +484,13 @@ public:
int NumFields() const { return num_fields; } int NumFields() const { return num_fields; }
/**
* Returns a "record_field_table" value for introspection purposes.
* @param rv an optional record value, if given the values of
* all fields will be provided in the returned table.
*/
TableVal* GetRecordFieldsVal(const RecordVal* rv = nullptr) const;
// Returns 0 if all is ok, otherwise a pointer to an error message. // Returns 0 if all is ok, otherwise a pointer to an error message.
// Takes ownership of list. // Takes ownership of list.
const char* AddFields(type_decl_list* types, attr_list* attr); const char* AddFields(type_decl_list* types, attr_list* attr);

View file

@ -120,7 +120,12 @@ Val* Val::DoClone(CloneState* state)
// Derived classes are responsible for this. Exception: // Derived classes are responsible for this. Exception:
// Functions and files. There aren't any derived classes. // Functions and files. There aren't any derived classes.
if ( type->Tag() == TYPE_FUNC ) if ( type->Tag() == TYPE_FUNC )
return new Val(AsFunc()->DoClone()); {
auto c = AsFunc()->DoClone();
auto rval = new Val(c);
Unref(c);
return rval;
}
if ( type->Tag() == TYPE_FILE ) if ( type->Tag() == TYPE_FILE )
{ {
@ -396,14 +401,12 @@ bool Val::WouldOverflow(const BroType* from_type, const BroType* to_type, const
TableVal* Val::GetRecordFields() TableVal* Val::GetRecordFields()
{ {
TableVal* fields = new TableVal(internal_type("record_field_table")->AsTableType());
auto t = Type(); auto t = Type();
if ( t->Tag() != TYPE_RECORD && t->Tag() != TYPE_TYPE ) if ( t->Tag() != TYPE_RECORD && t->Tag() != TYPE_TYPE )
{ {
reporter->Error("non-record value/type passed to record_fields"); reporter->Error("non-record value/type passed to record_fields");
return fields; return new TableVal(internal_type("record_field_table")->AsTableType());
} }
RecordType* rt = nullptr; RecordType* rt = nullptr;
@ -421,47 +424,17 @@ TableVal* Val::GetRecordFields()
if ( t->Tag() != TYPE_RECORD ) if ( t->Tag() != TYPE_RECORD )
{ {
reporter->Error("non-record value/type passed to record_fields"); reporter->Error("non-record value/type passed to record_fields");
return fields; return new TableVal(internal_type("record_field_table")->AsTableType());
} }
rt = t->AsRecordType(); rt = t->AsRecordType();
} }
for ( int i = 0; i < rt->NumFields(); ++i ) return rt->GetRecordFieldsVal(rv);
{
BroType* ft = rt->FieldType(i);
TypeDecl* fd = rt->FieldDecl(i);
Val* fv = nullptr;
if ( rv )
fv = rv->Lookup(i);
if ( fv )
::Ref(fv);
bool logged = (fd->attrs && fd->FindAttr(ATTR_LOG) != 0);
RecordVal* nr = new RecordVal(internal_type("record_field")->AsRecordType());
if ( ft->Tag() == TYPE_RECORD )
nr->Assign(0, new StringVal("record " + ft->GetName()));
else
nr->Assign(0, new StringVal(type_name(ft->Tag())));
nr->Assign(1, val_mgr->GetBool(logged));
nr->Assign(2, fv);
nr->Assign(3, rt->FieldDefault(i));
Val* field_name = new StringVal(rt->FieldName(i));
fields->Assign(field_name, nr);
Unref(field_name);
}
return fields;
} }
// This is a static method in this file to avoid including json.hpp in Val.h since it's huge. // This is a static method in this file to avoid including json.hpp in Val.h since it's huge.
static ZeekJson BuildJSON(Val* val, bool only_loggable=false, RE_Matcher* re=new RE_Matcher("^_")) static ZeekJson BuildJSON(Val* val, bool only_loggable=false, RE_Matcher* re=nullptr)
{ {
// If the value wasn't set, return a nullptr. This will get turned into a 'null' in the json output. // If the value wasn't set, return a nullptr. This will get turned into a 'null' in the json output.
if ( ! val ) if ( ! val )
@ -511,11 +484,7 @@ static ZeekJson BuildJSON(Val* val, bool only_loggable=false, RE_Matcher* re=new
ODesc d; ODesc d;
d.SetStyle(RAW_STYLE); d.SetStyle(RAW_STYLE);
val->Describe(&d); val->Describe(&d);
j = string(reinterpret_cast<const char*>(d.Bytes()), d.Len());
auto* bs = new BroString(1, d.TakeBytes(), d.Len());
j = string((char*)bs->Bytes(), bs->Len());
delete bs;
break; break;
} }
@ -527,11 +496,7 @@ static ZeekJson BuildJSON(Val* val, bool only_loggable=false, RE_Matcher* re=new
ODesc d; ODesc d;
d.SetStyle(RAW_STYLE); d.SetStyle(RAW_STYLE);
val->Describe(&d); val->Describe(&d);
j = json_escape_utf8(string(reinterpret_cast<const char*>(d.Bytes()), d.Len()));
auto* bs = new BroString(1, d.TakeBytes(), d.Len());
j = json_escape_utf8(string((char*)bs->Bytes(), bs->Len()));
delete bs;
break; break;
} }
@ -594,7 +559,7 @@ static ZeekJson BuildJSON(Val* val, bool only_loggable=false, RE_Matcher* re=new
auto field_name = rt->FieldName(i); auto field_name = rt->FieldName(i);
std::string key_string; std::string key_string;
if ( re->MatchAnywhere(field_name) != 0 ) if ( re && re->MatchAnywhere(field_name) != 0 )
{ {
StringVal blank(""); StringVal blank("");
StringVal fn_val(field_name); StringVal fn_val(field_name);
@ -2616,7 +2581,7 @@ RecordVal* RecordVal::CoerceTo(const RecordType* t, Val* aggr, bool allow_orphan
continue; continue;
char buf[512]; char buf[512];
safe_snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"orphan field \"%s\" in initialization", "orphan field \"%s\" in initialization",
rv_t->FieldName(i)); rv_t->FieldName(i));
Error(buf); Error(buf);
@ -2646,7 +2611,7 @@ RecordVal* RecordVal::CoerceTo(const RecordType* t, Val* aggr, bool allow_orphan
! ar_t->FieldDecl(i)->FindAttr(ATTR_OPTIONAL) ) ! ar_t->FieldDecl(i)->FindAttr(ATTR_OPTIONAL) )
{ {
char buf[512]; char buf[512];
safe_snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"non-optional field \"%s\" missing in initialization", ar_t->FieldName(i)); "non-optional field \"%s\" missing in initialization", ar_t->FieldName(i));
Error(buf); Error(buf);
} }
@ -2665,6 +2630,11 @@ RecordVal* RecordVal::CoerceTo(RecordType* t, bool allow_orphaning)
return CoerceTo(t, 0, allow_orphaning); return CoerceTo(t, 0, allow_orphaning);
} }
TableVal* RecordVal::GetRecordFieldsVal() const
{
return Type()->AsRecordType()->GetRecordFieldsVal(this);
}
void RecordVal::Describe(ODesc* d) const void RecordVal::Describe(ODesc* d) const
{ {
const val_list* vl = AsRecord(); const val_list* vl = AsRecord();

View file

@ -348,7 +348,7 @@ public:
TableVal* GetRecordFields(); TableVal* GetRecordFields();
StringVal* ToJSON(bool only_loggable=false, RE_Matcher* re=new RE_Matcher("^_")); StringVal* ToJSON(bool only_loggable=false, RE_Matcher* re=nullptr);
protected: protected:
@ -960,6 +960,11 @@ public:
void Describe(ODesc* d) const override; void Describe(ODesc* d) const override;
/**
* Returns a "record_field_table" value for introspection purposes.
*/
TableVal* GetRecordFieldsVal() const;
// This is an experiment to associate a BroObj within the // This is an experiment to associate a BroObj within the
// event engine to a record value in bro script. // event engine to a record value in bro script.
void SetOrigin(BroObj* o) { origin = o; } void SetOrigin(BroObj* o) { origin = o; }

View file

@ -416,13 +416,30 @@ public:
: scope(s) { } : scope(s) { }
virtual TraversalCode PreExpr(const Expr*); virtual TraversalCode PreExpr(const Expr*);
virtual TraversalCode PostExpr(const Expr*);
Scope* scope; Scope* scope;
vector<const NameExpr*> outer_id_references; vector<const NameExpr*> outer_id_references;
int lambda_depth = 0;
// Note: think we really ought to toggle this to false to prevent
// considering locals within inner-lambdas as "outer", but other logic
// for "selective cloning" and locating IDs in the closure chain may
// depend on current behavior and also needs to be changed.
bool search_inner_lambdas = true;
}; };
TraversalCode OuterIDBindingFinder::PreExpr(const Expr* expr) TraversalCode OuterIDBindingFinder::PreExpr(const Expr* expr)
{ {
if ( expr->Tag() == EXPR_LAMBDA )
++lambda_depth;
if ( lambda_depth > 0 && ! search_inner_lambdas )
// Don't inspect the bodies of inner lambdas as they will have their
// own traversal to find outer IDs and we don't want to detect
// references to local IDs inside and accidentally treat them as
// "outer" since they can't be found in current scope.
return TC_CONTINUE;
if ( expr->Tag() != EXPR_NAME ) if ( expr->Tag() != EXPR_NAME )
return TC_CONTINUE; return TC_CONTINUE;
@ -438,45 +455,20 @@ TraversalCode OuterIDBindingFinder::PreExpr(const Expr* expr)
return TC_CONTINUE; return TC_CONTINUE;
} }
// Gets a function's priority from its Scope's attributes. Errors if it sees any TraversalCode OuterIDBindingFinder::PostExpr(const Expr* expr)
// problems.
static int get_func_priotity(const attr_list& attrs)
{ {
int priority = 0; if ( expr->Tag() == EXPR_LAMBDA )
for ( const auto& a : attrs )
{ {
if ( a->Tag() == ATTR_DEPRECATED ) --lambda_depth;
continue; assert(lambda_depth >= 0);
if ( a->Tag() != ATTR_PRIORITY )
{
a->Error("illegal attribute for function body");
continue;
} }
Val* v = a->AttrExpr()->Eval(0); return TC_CONTINUE;
if ( ! v )
{
a->Error("cannot evaluate attribute expression");
continue;
}
if ( ! IsIntegral(v->Type()->Tag()) )
{
a->Error("expression is not of integral type");
continue;
}
priority = v->InternalInt();
}
return priority;
} }
void end_func(Stmt* body) void end_func(Stmt* body)
{ {
std::unique_ptr<function_ingredients> ingredients = gather_function_ingredients(pop_scope(), body); auto ingredients = std::make_unique<function_ingredients>(pop_scope(), body);
if ( streq(ingredients->id->Name(), "anonymous-function") ) if ( streq(ingredients->id->Name(), "anonymous-function") )
{ {
@ -508,24 +500,10 @@ void end_func(Stmt* body)
} }
ingredients->id->ID_Val()->AsFunc()->SetScope(ingredients->scope); ingredients->id->ID_Val()->AsFunc()->SetScope(ingredients->scope);
} // Note: ideally, something would take ownership of this memory until the
// end of script execution, but that's essentially the same as the
std::unique_ptr<function_ingredients> gather_function_ingredients(Scope* scope, Stmt* body) // lifetime of the process at the moment, so ok to "leak" it.
{ ingredients.release();
auto ingredients = std::make_unique<function_ingredients>();
ingredients->frame_size = scope->Length();
ingredients->inits = scope->GetInits();
ingredients->scope = scope;
ingredients->id = scope->ScopeID();
auto attrs = scope->Attrs();
ingredients->priority = (attrs ? get_func_priotity(*attrs) : 0);
ingredients->body = body;
return ingredients;
} }
Val* internal_val(const char* name) Val* internal_val(const char* name)
@ -548,7 +526,14 @@ id_list gather_outer_ids(Scope* scope, Stmt* body)
id_list idl ( cb.outer_id_references.size() ); id_list idl ( cb.outer_id_references.size() );
for ( size_t i = 0; i < cb.outer_id_references.size(); ++i ) for ( size_t i = 0; i < cb.outer_id_references.size(); ++i )
idl.append(cb.outer_id_references[i]->Id()); {
auto id = cb.outer_id_references[i]->Id();
if ( idl.is_member(id) )
continue;
idl.append(id);
}
return idl; return idl;
} }

View file

@ -26,11 +26,6 @@ extern void begin_func(ID* id, const char* module_name, function_flavor flavor,
int is_redef, FuncType* t, attr_list* attrs = nullptr); int is_redef, FuncType* t, attr_list* attrs = nullptr);
extern void end_func(Stmt* body); extern void end_func(Stmt* body);
// Gathers all of the information from a scope and a function body needed to
// build a function and collects it into a function_ingredients struct.
// Gathered elements are not refeed.
extern std::unique_ptr<function_ingredients> gather_function_ingredients(Scope* scope, Stmt* body);
// Gather all IDs referenced inside a body that aren't part of a given scope. // Gather all IDs referenced inside a body that aren't part of a given scope.
extern id_list gather_outer_ids(Scope* scope, Stmt* body); extern id_list gather_outer_ids(Scope* scope, Stmt* body);

View file

@ -5,6 +5,7 @@
#include <ctype.h> #include <ctype.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h> #include <arpa/inet.h>
#include "NetVar.h" #include "NetVar.h"

View file

@ -25,6 +25,13 @@ KRB_Analyzer::KRB_Analyzer(Connection* conn)
} }
#ifdef USE_KRB5 #ifdef USE_KRB5
static void warn_krb(const char* msg, krb5_context ctx, krb5_error_code code)
{
auto err = krb5_get_error_message(ctx, code);
reporter->Warning("%s (%s)", msg, err);
krb5_free_error_message(ctx, err);
}
void KRB_Analyzer::Initialize_Krb() void KRB_Analyzer::Initialize_Krb()
{ {
if ( BifConst::KRB::keytab->Len() == 0 ) if ( BifConst::KRB::keytab->Len() == 0 )
@ -40,14 +47,14 @@ void KRB_Analyzer::Initialize_Krb()
krb5_error_code retval = krb5_init_context(&krb_context); krb5_error_code retval = krb5_init_context(&krb_context);
if ( retval ) if ( retval )
{ {
reporter->Warning("KRB: Couldn't initialize the context (%s)", krb5_get_error_message(krb_context, retval)); warn_krb("KRB: Couldn't initialize the context", krb_context, retval);
return; return;
} }
retval = krb5_kt_resolve(krb_context, keytab_filename, &krb_keytab); retval = krb5_kt_resolve(krb_context, keytab_filename, &krb_keytab);
if ( retval ) if ( retval )
{ {
reporter->Warning("KRB: Couldn't resolve keytab (%s)", krb5_get_error_message(krb_context, retval)); warn_krb("KRB: Couldn't resolve keytab", krb_context, retval);
return; return;
} }
krb_available = true; krb_available = true;
@ -103,33 +110,44 @@ StringVal* KRB_Analyzer::GetAuthenticationInfo(const BroString* principal, const
krb5_error_code retval = krb5_sname_to_principal(krb_context, hostname->CheckString(), service->CheckString(), KRB5_NT_SRV_HST, &sprinc); krb5_error_code retval = krb5_sname_to_principal(krb_context, hostname->CheckString(), service->CheckString(), KRB5_NT_SRV_HST, &sprinc);
if ( retval ) if ( retval )
{ {
reporter->Warning("KRB: Couldn't generate principal name (%s)", krb5_get_error_message(krb_context, retval)); warn_krb("KRB: Couldn't generate principal name", krb_context, retval);
return nullptr; return nullptr;
} }
krb5_ticket tkt; auto tkt = static_cast<krb5_ticket*>(safe_malloc(sizeof(krb5_ticket)));
tkt.server = sprinc; memset(tkt, 0, sizeof(krb5_ticket));
tkt.enc_part.enctype = enctype;
tkt.enc_part.ciphertext.data = reinterpret_cast<char*>(ciphertext->Bytes()); tkt->server = sprinc;
tkt.enc_part.ciphertext.length = ciphertext->Len(); tkt->enc_part.enctype = enctype;
auto ctd = static_cast<char*>(safe_malloc(ciphertext->Len()));
memcpy(ctd, ciphertext->Bytes(), ciphertext->Len());
tkt->enc_part.ciphertext.data = ctd;
tkt->enc_part.ciphertext.length = ciphertext->Len();
retval = krb5_server_decrypt_ticket_keytab(krb_context, krb_keytab, tkt);
retval = krb5_server_decrypt_ticket_keytab(krb_context, krb_keytab, &tkt);
if ( retval ) if ( retval )
{ {
reporter->Warning("KRB: Couldn't decrypt ticket (%s)", krb5_get_error_message(krb_context, retval)); krb5_free_ticket(krb_context, tkt);
warn_krb("KRB: Couldn't decrypt ticket", krb_context, retval);
return nullptr; return nullptr;
} }
char* cp; char* cp;
retval = krb5_unparse_name(krb_context, tkt.enc_part2->client, &cp); retval = krb5_unparse_name(krb_context, tkt->enc_part2->client, &cp);
if ( retval ) if ( retval )
{ {
reporter->Warning("KRB: Couldn't unparse name (%s)", krb5_get_error_message(krb_context, retval)); krb5_free_ticket(krb_context, tkt);
warn_krb("KRB: Couldn't unparse name", krb_context, retval);
return nullptr; return nullptr;
} }
StringVal* ret = new StringVal(cp); StringVal* ret = new StringVal(cp);
krb5_free_unparsed_name(krb_context, cp); krb5_free_unparsed_name(krb_context, cp);
krb5_free_ticket(krb_context, tkt);
return ret; return ret;
#else #else

View file

@ -29,6 +29,9 @@ RPC_CallInfo::RPC_CallInfo(uint32_t arg_xid, const u_char*& buf, int& n, double
{ {
v = nullptr; v = nullptr;
xid = arg_xid; xid = arg_xid;
stamp = 0;
uid = 0;
gid = 0;
start_time = arg_start_time; start_time = arg_start_time;
last_time = arg_last_time; last_time = arg_last_time;
@ -42,7 +45,8 @@ RPC_CallInfo::RPC_CallInfo(uint32_t arg_xid, const u_char*& buf, int& n, double
vers = extract_XDR_uint32(buf, n); vers = extract_XDR_uint32(buf, n);
proc = extract_XDR_uint32(buf, n); proc = extract_XDR_uint32(buf, n);
cred_flavor = extract_XDR_uint32(buf, n); cred_flavor = extract_XDR_uint32(buf, n);
int cred_opaque_n, machinename_n;
int cred_opaque_n;
const u_char* cred_opaque = extract_XDR_opaque(buf, n, cred_opaque_n); const u_char* cred_opaque = extract_XDR_opaque(buf, n, cred_opaque_n);
if ( ! cred_opaque ) if ( ! cred_opaque )
@ -51,20 +55,28 @@ RPC_CallInfo::RPC_CallInfo(uint32_t arg_xid, const u_char*& buf, int& n, double
return; return;
} }
verf_flavor = skip_XDR_opaque_auth(buf, n);
if ( ! buf )
return;
if ( cred_flavor == RPC_AUTH_UNIX )
{
stamp = extract_XDR_uint32(cred_opaque, cred_opaque_n); stamp = extract_XDR_uint32(cred_opaque, cred_opaque_n);
int machinename_n;
constexpr auto max_machinename_len = 255;
auto mnp = extract_XDR_opaque(cred_opaque, cred_opaque_n, machinename_n, max_machinename_len);
const u_char* tmp = extract_XDR_opaque(cred_opaque, cred_opaque_n, machinename_n); if ( ! mnp )
if ( ! tmp )
{ {
buf = nullptr; buf = nullptr;
return; return;
} }
machinename = std::string(reinterpret_cast<const char*>(tmp), machinename_n); machinename = std::string(reinterpret_cast<const char*>(mnp), machinename_n);
uid = extract_XDR_uint32(cred_opaque, cred_opaque_n); uid = extract_XDR_uint32(cred_opaque, cred_opaque_n);
gid = extract_XDR_uint32(cred_opaque, cred_opaque_n); gid = extract_XDR_uint32(cred_opaque, cred_opaque_n);
size_t number_of_gids = extract_XDR_uint32(cred_opaque, cred_opaque_n); size_t number_of_gids = extract_XDR_uint32(cred_opaque, cred_opaque_n);
if ( number_of_gids > 64 ) if ( number_of_gids > 64 )
@ -75,8 +87,7 @@ RPC_CallInfo::RPC_CallInfo(uint32_t arg_xid, const u_char*& buf, int& n, double
for ( auto i = 0u; i < number_of_gids; ++i ) for ( auto i = 0u; i < number_of_gids; ++i )
auxgids.push_back(extract_XDR_uint32(cred_opaque, cred_opaque_n)); auxgids.push_back(extract_XDR_uint32(cred_opaque, cred_opaque_n));
}
verf_flavor = skip_XDR_opaque_auth(buf, n);
header_len = call_n - n; header_len = call_n - n;

View file

@ -889,7 +889,7 @@ void SMTP_Analyzer::UnexpectedCommand(const int cmd_code, const int reply_code)
// If this happens, please fix the SMTP state machine! // If this happens, please fix the SMTP state machine!
// ### Eventually, these should be turned into "weird" events. // ### Eventually, these should be turned into "weird" events.
static char buf[512]; static char buf[512];
int len = safe_snprintf(buf, sizeof(buf), int len = snprintf(buf, sizeof(buf),
"%s reply = %d state = %d", "%s reply = %d state = %d",
SMTP_CMD_WORD(cmd_code), reply_code, state); SMTP_CMD_WORD(cmd_code), reply_code, state);
if ( len > (int) sizeof(buf) ) if ( len > (int) sizeof(buf) )
@ -902,7 +902,7 @@ void SMTP_Analyzer::UnexpectedReply(const int cmd_code, const int reply_code)
// If this happens, please fix the SMTP state machine! // If this happens, please fix the SMTP state machine!
// ### Eventually, these should be turned into "weird" events. // ### Eventually, these should be turned into "weird" events.
static char buf[512]; static char buf[512];
int len = safe_snprintf(buf, sizeof(buf), int len = snprintf(buf, sizeof(buf),
"%d state = %d, last command = %s", "%d state = %d, last command = %s",
reply_code, state, SMTP_CMD_WORD(cmd_code)); reply_code, state, SMTP_CMD_WORD(cmd_code));
Unexpected (1, "unexpected reply", len, buf); Unexpected (1, "unexpected reply", len, buf);

View file

@ -71,7 +71,7 @@ void TCPStateStats::PrintStats(BroFile* file, const char* prefix)
if ( n > 0 ) if ( n > 0 )
{ {
char buf[32]; char buf[32];
safe_snprintf(buf, sizeof(buf), "%-8d", state_cnt[i][j]); snprintf(buf, sizeof(buf), "%-8d", state_cnt[i][j]);
file->Write(buf); file->Write(buf);
} }
else else

View file

@ -1197,6 +1197,12 @@ void TCP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig,
// TCP Fast Open). // TCP Fast Open).
CheckPIA_FirstPacket(is_orig, ip); CheckPIA_FirstPacket(is_orig, ip);
// Note the similar/inverse logic to connection_attempt.
if ( resp->state != TCP_ENDPOINT_INACTIVE ||
(orig->state != TCP_ENDPOINT_SYN_SENT &&
orig->state != TCP_ENDPOINT_SYN_ACK_SENT))
Conn()->SetSuccessful();
if ( DEBUG_tcp_data_sent ) if ( DEBUG_tcp_data_sent )
{ {
DEBUG_MSG("%.6f before DataSent: len=%d caplen=%d skip=%d\n", DEBUG_MSG("%.6f before DataSent: len=%d caplen=%d skip=%d\n",

View file

@ -27,6 +27,7 @@ event new_connection_contents%(c: connection%);
## connection_rejected connection_reset connection_reused connection_state_remove ## connection_rejected connection_reset connection_reused connection_state_remove
## connection_status_update connection_timeout scheduled_analyzer_applied ## connection_status_update connection_timeout scheduled_analyzer_applied
## new_connection new_connection_contents partial_connection ## new_connection new_connection_contents partial_connection
## connection_successful successful_connection_remove
event connection_attempt%(c: connection%); event connection_attempt%(c: connection%);
## Generated when seeing a SYN-ACK packet from the responder in a TCP ## Generated when seeing a SYN-ACK packet from the responder in a TCP
@ -45,6 +46,7 @@ event connection_attempt%(c: connection%);
## connection_rejected connection_reset connection_reused connection_state_remove ## connection_rejected connection_reset connection_reused connection_state_remove
## connection_status_update connection_timeout scheduled_analyzer_applied ## connection_status_update connection_timeout scheduled_analyzer_applied
## new_connection new_connection_contents partial_connection ## new_connection new_connection_contents partial_connection
## connection_successful successful_connection_remove
event connection_established%(c: connection%); event connection_established%(c: connection%);
## Generated for a new active TCP connection if Zeek did not see the initial ## Generated for a new active TCP connection if Zeek did not see the initial

View file

@ -1,5 +1,6 @@
#include "Data.h" #include "Data.h"
#include "File.h" #include "File.h"
#include "3rdparty/doctest.h"
#include "broker/data.bif.h" #include "broker/data.bif.h"
#include <broker/error.hh> #include <broker/error.hh>
@ -35,6 +36,16 @@ static broker::port::protocol to_broker_port_proto(TransportProto tp)
} }
} }
TEST_CASE("converting Zeek to Broker protocol constants")
{
CHECK_EQ(to_broker_port_proto(TRANSPORT_TCP), broker::port::protocol::tcp);
CHECK_EQ(to_broker_port_proto(TRANSPORT_UDP), broker::port::protocol::udp);
CHECK_EQ(to_broker_port_proto(TRANSPORT_ICMP),
broker::port::protocol::icmp);
CHECK_EQ(to_broker_port_proto(TRANSPORT_UNKNOWN),
broker::port::protocol::unknown);
}
TransportProto bro_broker::to_bro_port_proto(broker::port::protocol tp) TransportProto bro_broker::to_bro_port_proto(broker::port::protocol tp)
{ {
switch ( tp ) { switch ( tp ) {
@ -50,6 +61,16 @@ TransportProto bro_broker::to_bro_port_proto(broker::port::protocol tp)
} }
} }
TEST_CASE("converting Broker to Zeek protocol constants")
{
using bro_broker::to_bro_port_proto;
CHECK_EQ(to_bro_port_proto(broker::port::protocol::tcp), TRANSPORT_TCP);
CHECK_EQ(to_bro_port_proto(broker::port::protocol::udp), TRANSPORT_UDP);
CHECK_EQ(to_bro_port_proto(broker::port::protocol::icmp), TRANSPORT_ICMP);
CHECK_EQ(to_bro_port_proto(broker::port::protocol::unknown),
TRANSPORT_UNKNOWN);
}
struct val_converter { struct val_converter {
using result_type = Val*; using result_type = Val*;

View file

@ -143,8 +143,49 @@ event connection_timeout%(c: connection%);
## connection_status_update connection_timeout scheduled_analyzer_applied ## connection_status_update connection_timeout scheduled_analyzer_applied
## new_connection new_connection_contents partial_connection udp_inactivity_timeout ## new_connection new_connection_contents partial_connection udp_inactivity_timeout
## tcp_inactivity_timeout icmp_inactivity_timeout conn_stats ## tcp_inactivity_timeout icmp_inactivity_timeout conn_stats
## connection_successful successful_connection_remove
event connection_state_remove%(c: connection%); event connection_state_remove%(c: connection%);
## Generated for every new connection that is deemed "successful" according to
## transport-layer-dependent criteria. Zeek uses a flow-based definition of
## "connection" here that includes not only TCP sessions but also UDP and ICMP
## flows. For anything except TCP, this event is raised with the first packet
## of a previously unknown connection. For TCP, this event is raised if the
## responder host ever sends a packet or if the originator host ever sends a
## packet that is not a SYN (i.e. the "success" status of a connection can be
## useful to help weed out SYN scans).
##
## c: The new connection.
##
## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt
## connection_established connection_external connection_finished
## connection_first_ACK connection_half_finished connection_partial_close
## connection_pending connection_rejected connection_reset connection_reused
## connection_status_update connection_timeout scheduled_analyzer_applied
## new_connection new_connection_contents partial_connection udp_inactivity_timeout
## tcp_inactivity_timeout icmp_inactivity_timeout conn_stats connection_state_remove
## successful_connection_remove
event connection_successful%(c: connection%);
## Like :zeek:see:`connection_state_remove`, but raised only for "successful"
## connections, as defined by :zeek:see:`connection_successful`. This in particular
## excludes TCP connections that were never established (removal of such
## "unsuccessful" connections is implied by the :zeek:see:`connection_attempt`
## event instead.) Handlers for this event will run after handlers for
## :zeek:see:`connection_state_remove`
##
## c: The connection being removed
##
## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt
## connection_established connection_external connection_finished
## connection_first_ACK connection_half_finished connection_partial_close
## connection_pending connection_rejected connection_reset connection_reused
## connection_status_update connection_timeout scheduled_analyzer_applied
## new_connection new_connection_contents partial_connection udp_inactivity_timeout
## tcp_inactivity_timeout icmp_inactivity_timeout conn_stats connection_state_remove
## connection_successful
event successful_connection_remove%(c: connection%);
## Generated when a connection 4-tuple is reused. This event is raised when Zeek ## Generated when a connection 4-tuple is reused. This event is raised when Zeek
## sees a new TCP session or UDP flow using a 4-tuple matching that of an ## sees a new TCP session or UDP flow using a 4-tuple matching that of an
## earlier connection it still considers active. ## earlier connection it still considers active.

Some files were not shown because too many files have changed in this diff Show more