mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 14:48:21 +00:00
Merge branch 'master' into topic/jsiwek/reorg-followup
Conflicts: scripts/base/frameworks/cluster/setup-connections.bro scripts/base/frameworks/metrics/main.bro scripts/base/frameworks/notice/actions/email_admin.bro scripts/base/frameworks/notice/weird.bro scripts/base/protocols/mime/file-hash.bro scripts/base/protocols/mime/file-ident.bro scripts/policy/frameworks/communication/listen-clear.bro scripts/policy/frameworks/communication/listen-ssl.bro scripts/policy/frameworks/control/controller.bro scripts/policy/frameworks/metrics/http-example.bro scripts/policy/frameworks/metrics/ssl-example.bro scripts/policy/protocols/conn/scan.bro src/CMakeLists.txt
This commit is contained in:
commit
41dd0b98e9
79 changed files with 855 additions and 311 deletions
62
CHANGES
62
CHANGES
|
@ -1,4 +1,66 @@
|
||||||
|
|
||||||
|
1.6-dev-1095 | 2011-08-13 11:59:07 -0700
|
||||||
|
|
||||||
|
* A larger number of script documentation updates. Closes #543. (Jon
|
||||||
|
Siwek)
|
||||||
|
|
||||||
|
* Workaround for FreeBSD CMake port missing debug flags. (Jon Siwek)
|
||||||
|
|
||||||
|
* piped_exec() can now deal with null bytes. (Seth Hall)
|
||||||
|
|
||||||
|
* Fix vector initialization for lists of records with optional
|
||||||
|
types. Closes #485. (Jon Siwek)
|
||||||
|
|
||||||
|
* Fix redef'ing records with &default empty set fields. Closes #460.
|
||||||
|
(Jon Siwek)
|
||||||
|
|
||||||
|
* Fix ConnSize_Analyzer when used in conjunction with the connection
|
||||||
|
compressor. (Gregor Maier)
|
||||||
|
|
||||||
|
* Fix reporter using part of the actual message as a format string.
|
||||||
|
(Jon Siwek)
|
||||||
|
|
||||||
|
* Fixing reporter's location tracking. Closes #492. (Robin Sommer)
|
||||||
|
|
||||||
|
* Turning DNS errors into warnings. Closes #255. (Robin Sommer)
|
||||||
|
|
||||||
|
* Logging's path_func now receives the log record as argument.
|
||||||
|
Closes #555. (Robin Sommer)
|
||||||
|
|
||||||
|
* Functions can now be logged; their full body gets recorded.
|
||||||
|
Closes #506. (Robin Sommer)
|
||||||
|
|
||||||
|
* Bugfix for hostname notice email extension. (Seth Hall)
|
||||||
|
|
||||||
|
* Updates for notice framework. (Seth Hall)
|
||||||
|
|
||||||
|
- New ACTION_ADD_GEODATA to add geodata to notices in an extension
|
||||||
|
field named remote_location.
|
||||||
|
|
||||||
|
- Loading extend-email/hostnames by default now that it only does
|
||||||
|
anything when the ACTION_EMAIL action is applied (finally).
|
||||||
|
|
||||||
|
* Updates to local.bro (Seth Hall)
|
||||||
|
|
||||||
|
* Added the profiling script. (Seth Hall)
|
||||||
|
|
||||||
|
* Updates for SSH scripts. (Seth Hall)
|
||||||
|
|
||||||
|
* ConnSize analyzer is turned on by default now. (Seth Hall)
|
||||||
|
|
||||||
|
* Updates for the build system and site local scripts for cluster.
|
||||||
|
(Seth Hall)
|
||||||
|
|
||||||
|
* HTTP now uses the extract_filename_from_content_disposition function. (Seth Hall)
|
||||||
|
|
||||||
|
* Major SMTP script refactor. Closes #509. (Jon Siwek and Seth Hall)
|
||||||
|
|
||||||
|
* New variable Site::local_nets_table in utils/site for mapping
|
||||||
|
address to defined local subnet.
|
||||||
|
|
||||||
|
* Metrics framework updates, more to come. (Seth Hall)
|
||||||
|
|
||||||
|
|
||||||
1.6-dev-1061 | 2011-08-08 18:25:27 -0700
|
1.6-dev-1061 | 2011-08-08 18:25:27 -0700
|
||||||
|
|
||||||
* A set of new/changed tests regarding the new policy script
|
* A set of new/changed tests regarding the new policy script
|
||||||
|
|
|
@ -35,14 +35,16 @@ endif ()
|
||||||
## Project/Build Configuration
|
## Project/Build Configuration
|
||||||
|
|
||||||
set(BRO_ROOT_DIR ${CMAKE_INSTALL_PREFIX})
|
set(BRO_ROOT_DIR ${CMAKE_INSTALL_PREFIX})
|
||||||
if (NOT POLICYDIR)
|
if (NOT BRO_SCRIPT_INSTALL_PATH)
|
||||||
# set the default policy installation path (user did not specify one)
|
# set the default Bro script installation path (user did not specify one)
|
||||||
set(POLICYDIR ${BRO_ROOT_DIR}/share/bro)
|
set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro)
|
||||||
endif ()
|
endif ()
|
||||||
|
set(BRO_SCRIPT_SOURCE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/scripts)
|
||||||
|
|
||||||
# sanitize the policy install directory into an absolute path
|
# sanitize the Bro script install directory into an absolute path
|
||||||
# (CMake is confused by ~ as a representation of home directory)
|
# (CMake is confused by ~ as a representation of home directory)
|
||||||
get_filename_component(POLICYDIR ${POLICYDIR} ABSOLUTE)
|
get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH}
|
||||||
|
ABSOLUTE)
|
||||||
|
|
||||||
configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev)
|
configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev)
|
||||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh
|
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh
|
||||||
|
@ -63,7 +65,8 @@ set(EXTRA_COMPILE_FLAGS "-Wall -Wno-unused")
|
||||||
|
|
||||||
if (ENABLE_DEBUG)
|
if (ENABLE_DEBUG)
|
||||||
set(CMAKE_BUILD_TYPE Debug)
|
set(CMAKE_BUILD_TYPE Debug)
|
||||||
set(EXTRA_COMPILE_FLAGS "${EXTRA_COMPILE_FLAGS} -DDEBUG")
|
# manual add of -g works around its omission in FreeBSD's CMake port
|
||||||
|
set(EXTRA_COMPILE_FLAGS "${EXTRA_COMPILE_FLAGS} -g -DDEBUG")
|
||||||
else ()
|
else ()
|
||||||
set(CMAKE_BUILD_TYPE RelWithDebInfo)
|
set(CMAKE_BUILD_TYPE RelWithDebInfo)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -212,7 +215,7 @@ message(
|
||||||
"\n====================| Bro Build Summary |====================="
|
"\n====================| Bro Build Summary |====================="
|
||||||
"\n"
|
"\n"
|
||||||
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
|
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
|
||||||
"\nPolicy dir: ${POLICYDIR}"
|
"\nBro Script Path: ${BRO_SCRIPT_INSTALL_PATH}"
|
||||||
"\nDebug mode: ${ENABLE_DEBUG}"
|
"\nDebug mode: ${ENABLE_DEBUG}"
|
||||||
"\n"
|
"\n"
|
||||||
"\nCC: ${CMAKE_C_COMPILER}"
|
"\nCC: ${CMAKE_C_COMPILER}"
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
1.6-dev-1061
|
1.6-dev-1095
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 7cdd9c39d97c2984293fbe4a6dbe9ac0b33ecbfa
|
Subproject commit a3a9410dedc842f6bb9859642f334ed354633b57
|
|
@ -1 +1 @@
|
||||||
Subproject commit 86990f1640d986e39d5bb1287dbeb03b59a464f0
|
Subproject commit d68b98bb995a105b257f805ec4ff22c4929c7476
|
|
@ -1 +1 @@
|
||||||
Subproject commit 45f577240089d63dd0dc58be564280725a97acec
|
Subproject commit 03e6d398edf422140ba9f50e6fabbec33ee2f3cb
|
|
@ -1 +1 @@
|
||||||
Subproject commit dbbe6c81ef40666338c950d8f69dc8597f2adc70
|
Subproject commit c39622855e3c3a5cc94c7376f86184ed1db1939a
|
|
@ -1 +1 @@
|
||||||
Subproject commit ab78a66dd782f165ddf921faaf1f065b2f987481
|
Subproject commit d1c620d98ce9d9c0b203314108b413784965d2ed
|
|
@ -1,7 +1,7 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# After configured by CMake, this file prints the absolute path to policy
|
# After configured by CMake, this file prints the absolute path to Bro scripts
|
||||||
# files that come with the source distributions of Bro as well as policy
|
# that come with the source distributions of Bro as well as scripts that are
|
||||||
# files that are generated by the BIF compiler at compile time.
|
# generated by the BIF compiler at compile time.
|
||||||
#
|
#
|
||||||
# The intended use of this script is to make it easier to run Bro from
|
# The intended use of this script is to make it easier to run Bro from
|
||||||
# the build directory, avoiding the need to install it. This could be
|
# the build directory, avoiding the need to install it. This could be
|
||||||
|
@ -10,10 +10,10 @@
|
||||||
# BROPATH=`./bro-path-dev` ./src/bro
|
# BROPATH=`./bro-path-dev` ./src/bro
|
||||||
#
|
#
|
||||||
|
|
||||||
broPolicies=${PROJECT_SOURCE_DIR}/scripts:${PROJECT_SOURCE_DIR}/scripts/policy:${PROJECT_SOURCE_DIR}/scripts/site
|
broPolicies=${BRO_SCRIPT_SOURCE_PATH}:${BRO_SCRIPT_SOURCE_PATH}/policy:${BRO_SCRIPT_SOURCE_PATH}/site
|
||||||
|
|
||||||
broGenPolicies=${CMAKE_BINARY_DIR}/src
|
broGenPolicies=${CMAKE_BINARY_DIR}/src
|
||||||
|
|
||||||
installedPolicies=${POLICYDIR}:${POLICYDIR}/site
|
installedPolicies=${BRO_SCRIPT_INSTALL_PATH}:${BRO_SCRIPT_INSTALL_PATH}/site
|
||||||
|
|
||||||
echo .:$broPolicies:$broGenPolicies
|
echo .:$broPolicies:$broGenPolicies
|
||||||
|
|
14
configure
vendored
14
configure
vendored
|
@ -22,7 +22,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
|
|
||||||
Installation Directories:
|
Installation Directories:
|
||||||
--prefix=PREFIX installation directory [/usr/local/bro]
|
--prefix=PREFIX installation directory [/usr/local/bro]
|
||||||
--policydir=PATH policy file installation directory
|
--scriptdir=PATH root installation directory for Bro scripts
|
||||||
[PREFIX/share/bro]
|
[PREFIX/share/bro]
|
||||||
|
|
||||||
Optional Features:
|
Optional Features:
|
||||||
|
@ -85,7 +85,7 @@ CMakeCacheEntries=""
|
||||||
append_cache_entry CMAKE_INSTALL_PREFIX PATH /usr/local/bro
|
append_cache_entry CMAKE_INSTALL_PREFIX PATH /usr/local/bro
|
||||||
append_cache_entry BRO_ROOT_DIR PATH /usr/local/bro
|
append_cache_entry BRO_ROOT_DIR PATH /usr/local/bro
|
||||||
append_cache_entry PY_MOD_INSTALL_DIR PATH /usr/local/bro/lib/broctl
|
append_cache_entry PY_MOD_INSTALL_DIR PATH /usr/local/bro/lib/broctl
|
||||||
append_cache_entry POLICYDIR STRING /usr/local/bro/share/bro
|
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING /usr/local/bro/share/bro
|
||||||
append_cache_entry ENABLE_DEBUG BOOL false
|
append_cache_entry ENABLE_DEBUG BOOL false
|
||||||
append_cache_entry BROv6 BOOL false
|
append_cache_entry BROv6 BOOL false
|
||||||
append_cache_entry ENABLE_PERFTOOLS BOOL false
|
append_cache_entry ENABLE_PERFTOOLS BOOL false
|
||||||
|
@ -118,13 +118,13 @@ while [ $# -ne 0 ]; do
|
||||||
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
|
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
|
||||||
append_cache_entry BRO_ROOT_DIR PATH $optarg
|
append_cache_entry BRO_ROOT_DIR PATH $optarg
|
||||||
append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl
|
append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl
|
||||||
if [ "$user_set_policydir" != "true" ]; then
|
if [ "$user_set_scriptdir" != "true" ]; then
|
||||||
append_cache_entry POLICYDIR STRING $optarg/share/bro
|
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg/share/bro
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
--policydir=*)
|
--scriptdir=*)
|
||||||
append_cache_entry POLICYDIR STRING $optarg
|
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg
|
||||||
user_set_policydir="true"
|
user_set_scriptdir="true"
|
||||||
;;
|
;;
|
||||||
--enable-debug)
|
--enable-debug)
|
||||||
append_cache_entry ENABLE_DEBUG BOOL true
|
append_cache_entry ENABLE_DEBUG BOOL true
|
||||||
|
|
|
@ -57,10 +57,12 @@ macro(REST_TARGET srcDir broInput)
|
||||||
get_filename_component(relDstDir ${broInput} PATH)
|
get_filename_component(relDstDir ${broInput} PATH)
|
||||||
|
|
||||||
set(sumTextSrc ${absSrcPath})
|
set(sumTextSrc ${absSrcPath})
|
||||||
|
set(ogSourceFile ${absSrcPath})
|
||||||
if (${extension} STREQUAL ".bif.bro")
|
if (${extension} STREQUAL ".bif.bro")
|
||||||
|
set(ogSourceFile ${BIF_SRC_DIR}/${basename})
|
||||||
# the summary text is taken at configure time, but .bif.bro files
|
# the summary text is taken at configure time, but .bif.bro files
|
||||||
# may not have been generated yet, so read .bif file instead
|
# may not have been generated yet, so read .bif file instead
|
||||||
set(sumTextSrc ${BIF_SRC_DIR}/${basename})
|
set(sumTextSrc ${ogSourceFile})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT relDstDir)
|
if (NOT relDstDir)
|
||||||
|
@ -124,9 +126,9 @@ macro(REST_TARGET srcDir broInput)
|
||||||
ARGS -E make_directory ${dstDir}
|
ARGS -E make_directory ${dstDir}
|
||||||
COMMAND "${CMAKE_COMMAND}"
|
COMMAND "${CMAKE_COMMAND}"
|
||||||
ARGS -E copy ${restFile} ${restOutput}
|
ARGS -E copy ${restFile} ${restOutput}
|
||||||
# copy the bro policy script, too
|
# copy the bro or bif script, too
|
||||||
COMMAND "${CMAKE_COMMAND}"
|
COMMAND "${CMAKE_COMMAND}"
|
||||||
ARGS -E copy ${absSrcPath} ${dstDir}
|
ARGS -E copy ${ogSourceFile} ${dstDir}
|
||||||
# clean up the build directory
|
# clean up the build directory
|
||||||
COMMAND rm
|
COMMAND rm
|
||||||
ARGS -rf .state *.log *.rst
|
ARGS -rf .state *.log *.rst
|
||||||
|
@ -151,6 +153,8 @@ set(MASTER_PKG_INDEX_TEXT "")
|
||||||
foreach (pkg ${MASTER_PKG_LIST})
|
foreach (pkg ${MASTER_PKG_LIST})
|
||||||
# strip of the trailing /index for the link name
|
# strip of the trailing /index for the link name
|
||||||
get_filename_component(lnktxt ${pkg} PATH)
|
get_filename_component(lnktxt ${pkg} PATH)
|
||||||
|
# pretty-up the link name by removing common scripts/ prefix
|
||||||
|
string(REPLACE "scripts/" "" lnktxt "${lnktxt}")
|
||||||
set(MASTER_PKG_INDEX_TEXT "${MASTER_PKG_INDEX_TEXT}\n ${lnktxt} <${pkg}>")
|
set(MASTER_PKG_INDEX_TEXT "${MASTER_PKG_INDEX_TEXT}\n ${lnktxt} <${pkg}>")
|
||||||
endforeach ()
|
endforeach ()
|
||||||
file(WRITE ${MASTER_PACKAGE_INDEX} "${MASTER_PKG_INDEX_TEXT}")
|
file(WRITE ${MASTER_PACKAGE_INDEX} "${MASTER_PKG_INDEX_TEXT}")
|
||||||
|
|
|
@ -13,22 +13,14 @@
|
||||||
set(psd ${PROJECT_SOURCE_DIR}/scripts)
|
set(psd ${PROJECT_SOURCE_DIR}/scripts)
|
||||||
|
|
||||||
rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
|
rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
|
||||||
rest_target(${psd} base/bro.init internal)
|
rest_target(${psd} base/init-default.bro internal)
|
||||||
rest_target(${psd} base/all.bro internal)
|
rest_target(${psd} base/init-bare.bro internal)
|
||||||
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src bro.bif.bro)
|
rest_target(${CMAKE_BINARY_DIR}/src bro.bif.bro)
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src common-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src const.bif.bro)
|
rest_target(${CMAKE_BINARY_DIR}/src const.bif.bro)
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src dns-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src event.bif.bro)
|
rest_target(${CMAKE_BINARY_DIR}/src event.bif.bro)
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src finger-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src ftp-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src http-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src ident-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src logging.bif.bro)
|
rest_target(${CMAKE_BINARY_DIR}/src logging.bif.bro)
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src reporter.bif.bro)
|
rest_target(${CMAKE_BINARY_DIR}/src reporter.bif.bro)
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src smb-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src smtp-rw.bif.bro)
|
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src strings.bif.bro)
|
rest_target(${CMAKE_BINARY_DIR}/src strings.bif.bro)
|
||||||
rest_target(${CMAKE_BINARY_DIR}/src types.bif.bro)
|
rest_target(${CMAKE_BINARY_DIR}/src types.bif.bro)
|
||||||
rest_target(${psd} base/frameworks/cluster/main.bro)
|
rest_target(${psd} base/frameworks/cluster/main.bro)
|
||||||
|
@ -42,9 +34,8 @@ rest_target(${psd} base/frameworks/dpd/main.bro)
|
||||||
rest_target(${psd} base/frameworks/intel/main.bro)
|
rest_target(${psd} base/frameworks/intel/main.bro)
|
||||||
rest_target(${psd} base/frameworks/logging/main.bro)
|
rest_target(${psd} base/frameworks/logging/main.bro)
|
||||||
rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
|
rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
|
||||||
rest_target(${psd} base/frameworks/metrics/conn-example.bro)
|
|
||||||
rest_target(${psd} base/frameworks/metrics/http-example.bro)
|
|
||||||
rest_target(${psd} base/frameworks/metrics/main.bro)
|
rest_target(${psd} base/frameworks/metrics/main.bro)
|
||||||
|
rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro)
|
||||||
rest_target(${psd} base/frameworks/notice/actions/drop.bro)
|
rest_target(${psd} base/frameworks/notice/actions/drop.bro)
|
||||||
rest_target(${psd} base/frameworks/notice/actions/email_admin.bro)
|
rest_target(${psd} base/frameworks/notice/actions/email_admin.bro)
|
||||||
rest_target(${psd} base/frameworks/notice/actions/page.bro)
|
rest_target(${psd} base/frameworks/notice/actions/page.bro)
|
||||||
|
@ -56,6 +47,7 @@ rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
|
||||||
rest_target(${psd} base/frameworks/reporter/main.bro)
|
rest_target(${psd} base/frameworks/reporter/main.bro)
|
||||||
rest_target(${psd} base/frameworks/signatures/main.bro)
|
rest_target(${psd} base/frameworks/signatures/main.bro)
|
||||||
rest_target(${psd} base/frameworks/software/main.bro)
|
rest_target(${psd} base/frameworks/software/main.bro)
|
||||||
|
rest_target(${psd} base/frameworks/time-machine/notice.bro)
|
||||||
rest_target(${psd} base/protocols/conn/contents.bro)
|
rest_target(${psd} base/protocols/conn/contents.bro)
|
||||||
rest_target(${psd} base/protocols/conn/inactivity.bro)
|
rest_target(${psd} base/protocols/conn/inactivity.bro)
|
||||||
rest_target(${psd} base/protocols/conn/main.bro)
|
rest_target(${psd} base/protocols/conn/main.bro)
|
||||||
|
@ -72,6 +64,13 @@ rest_target(${psd} base/protocols/http/partial-content.bro)
|
||||||
rest_target(${psd} base/protocols/http/utils.bro)
|
rest_target(${psd} base/protocols/http/utils.bro)
|
||||||
rest_target(${psd} base/protocols/irc/dcc-send.bro)
|
rest_target(${psd} base/protocols/irc/dcc-send.bro)
|
||||||
rest_target(${psd} base/protocols/irc/main.bro)
|
rest_target(${psd} base/protocols/irc/main.bro)
|
||||||
|
rest_target(${psd} base/protocols/mime/base.bro)
|
||||||
|
rest_target(${psd} base/protocols/mime/file-extract.bro)
|
||||||
|
rest_target(${psd} base/protocols/mime/file-hash.bro)
|
||||||
|
rest_target(${psd} base/protocols/mime/file-ident.bro)
|
||||||
|
rest_target(${psd} base/protocols/rpc/base.bro)
|
||||||
|
rest_target(${psd} base/protocols/smtp/entities-excerpt.bro)
|
||||||
|
rest_target(${psd} base/protocols/smtp/entities.bro)
|
||||||
rest_target(${psd} base/protocols/smtp/main.bro)
|
rest_target(${psd} base/protocols/smtp/main.bro)
|
||||||
rest_target(${psd} base/protocols/ssh/main.bro)
|
rest_target(${psd} base/protocols/ssh/main.bro)
|
||||||
rest_target(${psd} base/protocols/ssl/consts.bro)
|
rest_target(${psd} base/protocols/ssl/consts.bro)
|
||||||
|
@ -95,6 +94,9 @@ rest_target(${psd} policy/frameworks/control/controllee.bro)
|
||||||
rest_target(${psd} policy/frameworks/control/controller.bro)
|
rest_target(${psd} policy/frameworks/control/controller.bro)
|
||||||
rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro)
|
rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro)
|
||||||
rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro)
|
rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro)
|
||||||
|
rest_target(${psd} policy/frameworks/metrics/conn-example.bro)
|
||||||
|
rest_target(${psd} policy/frameworks/metrics/http-example.bro)
|
||||||
|
rest_target(${psd} policy/frameworks/metrics/ssl-example.bro)
|
||||||
rest_target(${psd} policy/frameworks/software/version-changes.bro)
|
rest_target(${psd} policy/frameworks/software/version-changes.bro)
|
||||||
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
|
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
|
||||||
rest_target(${psd} policy/integration/barnyard2/base.bro)
|
rest_target(${psd} policy/integration/barnyard2/base.bro)
|
||||||
|
@ -102,10 +104,10 @@ rest_target(${psd} policy/integration/barnyard2/event.bro)
|
||||||
rest_target(${psd} policy/integration/barnyard2/types.bro)
|
rest_target(${psd} policy/integration/barnyard2/types.bro)
|
||||||
rest_target(${psd} policy/misc/analysis-groups.bro)
|
rest_target(${psd} policy/misc/analysis-groups.bro)
|
||||||
rest_target(${psd} policy/misc/loaded-scripts.bro)
|
rest_target(${psd} policy/misc/loaded-scripts.bro)
|
||||||
|
rest_target(${psd} policy/misc/profiling.bro)
|
||||||
rest_target(${psd} policy/misc/trim-trace-file.bro)
|
rest_target(${psd} policy/misc/trim-trace-file.bro)
|
||||||
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
|
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
|
||||||
rest_target(${psd} policy/protocols/conn/known-services.bro)
|
rest_target(${psd} policy/protocols/conn/known-services.bro)
|
||||||
rest_target(${psd} policy/protocols/conn/scan.bro)
|
|
||||||
rest_target(${psd} policy/protocols/dns/auth-addl.bro)
|
rest_target(${psd} policy/protocols/dns/auth-addl.bro)
|
||||||
rest_target(${psd} policy/protocols/dns/detect-external-names.bro)
|
rest_target(${psd} policy/protocols/dns/detect-external-names.bro)
|
||||||
rest_target(${psd} policy/protocols/ftp/detect.bro)
|
rest_target(${psd} policy/protocols/ftp/detect.bro)
|
||||||
|
@ -118,8 +120,12 @@ rest_target(${psd} policy/protocols/http/headers.bro)
|
||||||
rest_target(${psd} policy/protocols/http/software.bro)
|
rest_target(${psd} policy/protocols/http/software.bro)
|
||||||
rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro)
|
rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro)
|
||||||
rest_target(${psd} policy/protocols/http/var-extraction-uri.bro)
|
rest_target(${psd} policy/protocols/http/var-extraction-uri.bro)
|
||||||
|
rest_target(${psd} policy/protocols/smtp/blocklists.bro)
|
||||||
rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro)
|
rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro)
|
||||||
rest_target(${psd} policy/protocols/smtp/software.bro)
|
rest_target(${psd} policy/protocols/smtp/software.bro)
|
||||||
|
rest_target(${psd} policy/protocols/ssh/detect-bruteforcing.bro)
|
||||||
|
rest_target(${psd} policy/protocols/ssh/geo-data.bro)
|
||||||
|
rest_target(${psd} policy/protocols/ssh/interesting-hostnames.bro)
|
||||||
rest_target(${psd} policy/protocols/ssh/software.bro)
|
rest_target(${psd} policy/protocols/ssh/software.bro)
|
||||||
rest_target(${psd} policy/protocols/ssl/known-certs.bro)
|
rest_target(${psd} policy/protocols/ssl/known-certs.bro)
|
||||||
rest_target(${psd} policy/protocols/ssl/validate-certs.bro)
|
rest_target(${psd} policy/protocols/ssl/validate-certs.bro)
|
||||||
|
@ -127,4 +133,8 @@ rest_target(${psd} policy/tuning/defaults/packet-fragments.bro)
|
||||||
rest_target(${psd} policy/tuning/defaults/remove-high-volume-notices.bro)
|
rest_target(${psd} policy/tuning/defaults/remove-high-volume-notices.bro)
|
||||||
rest_target(${psd} policy/tuning/defaults/warnings.bro)
|
rest_target(${psd} policy/tuning/defaults/warnings.bro)
|
||||||
rest_target(${psd} policy/tuning/track-all-assets.bro)
|
rest_target(${psd} policy/tuning/track-all-assets.bro)
|
||||||
|
rest_target(${psd} site/local-manager.bro)
|
||||||
|
rest_target(${psd} site/local-proxy.bro)
|
||||||
|
rest_target(${psd} site/local-worker.bro)
|
||||||
rest_target(${psd} site/local.bro)
|
rest_target(${psd} site/local.bro)
|
||||||
|
rest_target(${psd} test-all-policy.bro)
|
||||||
|
|
|
@ -44,7 +44,7 @@ of documentation targets. This script should be run after adding new
|
||||||
Bro script source files, and the changes commited to git.
|
Bro script source files, and the changes commited to git.
|
||||||
|
|
||||||
If a script shouldn't have documentation generated for it, there's also a
|
If a script shouldn't have documentation generated for it, there's also a
|
||||||
blacklist variable that can be maintained in the ``genDocSourcesList.sh``
|
blacklist manifest that can be maintained in the ``genDocSourcesList.sh``
|
||||||
script.
|
script.
|
||||||
|
|
||||||
The blacklist can also be used if you want to define a certain grouping for
|
The blacklist can also be used if you want to define a certain grouping for
|
||||||
|
|
|
@ -11,8 +11,31 @@
|
||||||
# Specific scripts can be blacklisted below when e.g. they currently aren't
|
# Specific scripts can be blacklisted below when e.g. they currently aren't
|
||||||
# parseable or they just aren't meant to be documented.
|
# parseable or they just aren't meant to be documented.
|
||||||
|
|
||||||
blacklist="__load__.bro|test-all.bro|all.bro"
|
blacklist ()
|
||||||
blacklist_addl="hot.conn.bro"
|
{
|
||||||
|
if [[ "$blacklist" == "" ]]; then
|
||||||
|
blacklist="$1"
|
||||||
|
else
|
||||||
|
blacklist="$blacklist|$1"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# files passed into this function are meant to be temporary workarounds
|
||||||
|
# because they're not finished or otherwise can't be loaded for some reason
|
||||||
|
tmp_blacklist ()
|
||||||
|
{
|
||||||
|
echo "Warning: temporarily blacklisted files named '$1'" 1>&2
|
||||||
|
blacklist $1
|
||||||
|
}
|
||||||
|
|
||||||
|
blacklist __load__.bro
|
||||||
|
blacklist test-all.bro
|
||||||
|
blacklist all.bro
|
||||||
|
blacklist init-default.bro
|
||||||
|
blacklist init-bare.bro
|
||||||
|
|
||||||
|
tmp_blacklist hot.conn.bro
|
||||||
|
tmp_blacklist scan.bro
|
||||||
|
|
||||||
statictext="\
|
statictext="\
|
||||||
# DO NOT EDIT
|
# DO NOT EDIT
|
||||||
|
@ -30,8 +53,8 @@ statictext="\
|
||||||
set(psd \${PROJECT_SOURCE_DIR}/scripts)
|
set(psd \${PROJECT_SOURCE_DIR}/scripts)
|
||||||
|
|
||||||
rest_target(\${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
|
rest_target(\${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
|
||||||
rest_target(\${psd} base/bro.init internal)
|
rest_target(\${psd} base/init-default.bro internal)
|
||||||
rest_target(\${psd} base/all.bro internal)
|
rest_target(\${psd} base/init-bare.bro internal)
|
||||||
"
|
"
|
||||||
|
|
||||||
if [[ $# -ge 1 ]]; then
|
if [[ $# -ge 1 ]]; then
|
||||||
|
@ -58,7 +81,7 @@ scriptfiles=`( cd ${sourcedir}/scripts && find . -name \*\.bro | sort )`
|
||||||
for file in $scriptfiles
|
for file in $scriptfiles
|
||||||
do
|
do
|
||||||
f=${file:2}
|
f=${file:2}
|
||||||
if [[ (! $f =~ $blacklist) && (! $f =~ $blacklist_addl) ]]; then
|
if [[ ! $f =~ $blacklist ]]; then
|
||||||
echo "rest_target(\${psd} $f)" >> $outfile
|
echo "rest_target(\${psd} $f)" >> $outfile
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
#! /usr/bin/env python
|
#! /usr/bin/env python
|
||||||
|
|
||||||
# This script automatically generates a reST documents that lists
|
# This script automatically generates a reST documents that lists
|
||||||
# a collection of Bro policy scripts that are "grouped" together.
|
# a collection of Bro scripts that are "grouped" together.
|
||||||
# The summary text (##! comments) of the policy script is embedded
|
# The summary text (##! comments) of the script is embedded in the list
|
||||||
# in the list.
|
|
||||||
#
|
#
|
||||||
# 1st argument is the file containing list of groups
|
# 1st argument is the file containing list of groups
|
||||||
# 2nd argument is the directory containing ${group}_files lists of
|
# 2nd argument is the directory containing ${group}_files lists of
|
||||||
|
@ -57,6 +56,6 @@ with open(group_list, 'r') as f_group_list:
|
||||||
f_group_file.write("\n");
|
f_group_file.write("\n");
|
||||||
|
|
||||||
with open(group_file, 'a') as f_group_file:
|
with open(group_file, 'a') as f_group_file:
|
||||||
f_group_file.write("\n:doc:`/policy/%s`\n" % doc_names[i])
|
f_group_file.write("\n:doc:`/scripts/%s`\n" % doc_names[i])
|
||||||
for line in summary_comments:
|
for line in summary_comments:
|
||||||
f_group_file.write(" " + line)
|
f_group_file.write(" " + line)
|
||||||
|
|
|
@ -14,7 +14,7 @@ Contents:
|
||||||
internal
|
internal
|
||||||
bifs
|
bifs
|
||||||
packages
|
packages
|
||||||
policy/index
|
scripts/index
|
||||||
|
|
||||||
Indices and tables
|
Indices and tables
|
||||||
==================
|
==================
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
.. This is a stub doc to which the build process can append.
|
.. This is a stub doc to which the build process can append.
|
||||||
|
|
||||||
Internal Policy Scripts
|
Internal Scripts
|
||||||
=======================
|
================
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,15 @@
|
||||||
.. This is a stub doc to which the build process can append.
|
.. This is a stub doc to which the build process can append.
|
||||||
|
|
||||||
Policy Script Packages
|
Bro Script Packages
|
||||||
======================
|
===================
|
||||||
|
|
||||||
Bro has the following policy script packages (e.g. collections of related
|
Bro has the following script packages (e.g. collections of related scripts in
|
||||||
policy scripts). If the package contains a ``__load__.bro`` script, it
|
a common directory). If the package directory contains a ``__load__.bro``
|
||||||
supports being loaded in mass as a whole directory for convenience.
|
script, it supports being loaded in mass as a whole directory for convenience.
|
||||||
|
|
||||||
|
Packages/scripts in the ``base/`` directory are all loaded by default, while
|
||||||
|
ones in ``policy/`` provide functionality and customization options that are
|
||||||
|
more appropriate for users to decide whether they'd like to load it or not.
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
Index of All Policy Script Documentation
|
|
||||||
========================================
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 1
|
|
||||||
|
|
6
doc/scripts/source/scripts/index.rst
Normal file
6
doc/scripts/source/scripts/index.rst
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
Index of All Bro Script Documentation
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
include(InstallPackageConfigFile)
|
include(InstallPackageConfigFile)
|
||||||
|
|
||||||
install(DIRECTORY ./ DESTINATION ${POLICYDIR} FILES_MATCHING
|
install(DIRECTORY ./ DESTINATION ${BRO_SCRIPT_INSTALL_PATH} FILES_MATCHING
|
||||||
PATTERN "site/local*" EXCLUDE
|
PATTERN "site/local*" EXCLUDE
|
||||||
PATTERN "*.bro"
|
PATTERN "*.bro"
|
||||||
PATTERN "*.sig"
|
PATTERN "*.sig"
|
||||||
|
@ -11,17 +11,17 @@ install(DIRECTORY ./ DESTINATION ${POLICYDIR} FILES_MATCHING
|
||||||
# user modify-able.
|
# user modify-able.
|
||||||
InstallPackageConfigFile(
|
InstallPackageConfigFile(
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/site/local.bro
|
${CMAKE_CURRENT_SOURCE_DIR}/site/local.bro
|
||||||
${POLICYDIR}/site
|
${BRO_SCRIPT_INSTALL_PATH}/site
|
||||||
local.bro)
|
local.bro)
|
||||||
InstallPackageConfigFile(
|
InstallPackageConfigFile(
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/site/local-manager.bro
|
${CMAKE_CURRENT_SOURCE_DIR}/site/local-manager.bro
|
||||||
${POLICYDIR}/site
|
${BRO_SCRIPT_INSTALL_PATH}/site
|
||||||
local-manager.bro)
|
local-manager.bro)
|
||||||
InstallPackageConfigFile(
|
InstallPackageConfigFile(
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/site/local-proxy.bro
|
${CMAKE_CURRENT_SOURCE_DIR}/site/local-proxy.bro
|
||||||
${POLICYDIR}/site
|
${BRO_SCRIPT_INSTALL_PATH}/site
|
||||||
local-proxy.bro)
|
local-proxy.bro)
|
||||||
InstallPackageConfigFile(
|
InstallPackageConfigFile(
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/site/local-worker.bro
|
${CMAKE_CURRENT_SOURCE_DIR}/site/local-worker.bro
|
||||||
${POLICYDIR}/site
|
${BRO_SCRIPT_INSTALL_PATH}/site
|
||||||
local-worker.bro)
|
local-worker.bro)
|
||||||
|
|
|
@ -19,7 +19,7 @@ redef peer_description = Cluster::node;
|
||||||
|
|
||||||
@load ./setup-connections
|
@load ./setup-connections
|
||||||
|
|
||||||
# Don't start the listening process until we're a bit more sure that the
|
# Don't load the listening script until we're a bit more sure that the
|
||||||
# cluster framework is actually being enabled.
|
# cluster framework is actually being enabled.
|
||||||
@load frameworks/communication/listen-clear
|
@load frameworks/communication/listen-clear
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,25 @@ export {
|
||||||
time_machine: string &optional;
|
time_machine: string &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
## This function can be called at any time to determine if the cluster
|
||||||
|
## framework is being enabled for this run.
|
||||||
|
global is_enabled: function(): bool;
|
||||||
|
|
||||||
|
## This function can be called at any time to determine what type of
|
||||||
|
## cluster node the current Bro instance is going to be acting as.
|
||||||
|
## :bro:id:`is_enabled` should be called first to find out if this is
|
||||||
|
## actually going to be a cluster node.
|
||||||
|
global local_node_type: function(): NodeType;
|
||||||
|
|
||||||
|
## This gives the value for the number of workers currently connected to,
|
||||||
|
## and it's maintained internally by the cluster framework. It's
|
||||||
|
## primarily intended for use by managers to find out how many workers
|
||||||
|
## should be responding to requests.
|
||||||
|
global worker_count: count = 0;
|
||||||
|
|
||||||
|
## The cluster layout definition. This should be placed into a filter
|
||||||
|
## named cluster-layout.bro somewhere in the BROPATH. It will be
|
||||||
|
## automatically loaded if the CLUSTER_NODE environment variable is set.
|
||||||
const nodes: table[string] of Node = {} &redef;
|
const nodes: table[string] of Node = {} &redef;
|
||||||
|
|
||||||
## This is usually supplied on the command line for each instance
|
## This is usually supplied on the command line for each instance
|
||||||
|
@ -55,7 +74,29 @@ export {
|
||||||
const node = getenv("CLUSTER_NODE") &redef;
|
const node = getenv("CLUSTER_NODE") &redef;
|
||||||
}
|
}
|
||||||
|
|
||||||
event bro_init()
|
function is_enabled(): bool
|
||||||
|
{
|
||||||
|
return (node != "");
|
||||||
|
}
|
||||||
|
|
||||||
|
function local_node_type(): NodeType
|
||||||
|
{
|
||||||
|
return nodes[node]$node_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
event remote_connection_handshake_done(p: event_peer)
|
||||||
|
{
|
||||||
|
if ( nodes[p$descr]$node_type == WORKER )
|
||||||
|
++worker_count;
|
||||||
|
}
|
||||||
|
event remote_connection_closed(p: event_peer)
|
||||||
|
{
|
||||||
|
if ( nodes[p$descr]$node_type == WORKER )
|
||||||
|
--worker_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
event bro_init() &priority=5
|
||||||
{
|
{
|
||||||
# If a node is given, but it's an unknown name we need to fail.
|
# If a node is given, but it's an unknown name we need to fail.
|
||||||
if ( node != "" && node !in nodes )
|
if ( node != "" && node !in nodes )
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
@prefixes += cluster-manager
|
@prefixes += cluster-manager
|
||||||
|
|
||||||
## Load the script for local site configuration for the manager node.
|
# Load the script for local site configuration for the manager node.
|
||||||
@load site/local-manager
|
@load site/local-manager
|
||||||
|
|
||||||
## Turn off remote logging since this is the manager and should only log here.
|
## Turn off remote logging since this is the manager and should only log here.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
|
|
||||||
@prefixes += cluster-proxy
|
@prefixes += cluster-proxy
|
||||||
|
|
||||||
## Load the script for local site configuration for proxy nodes.
|
# Load the script for local site configuration for proxy nodes.
|
||||||
@load site/local-proxy
|
@load site/local-proxy
|
||||||
|
|
||||||
## The proxy only syncs state; does not forward events.
|
## The proxy only syncs state; does not forward events.
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
@prefixes += cluster-worker
|
@prefixes += cluster-worker
|
||||||
|
|
||||||
## Load the script for local site configuration for the worker nodes.
|
# Load the script for local site configuration for the worker nodes.
|
||||||
@load site/local-worker
|
@load site/local-worker
|
||||||
|
|
||||||
## Don't do any local logging.
|
## Don't do any local logging.
|
||||||
|
|
|
@ -62,13 +62,12 @@ event bro_init() &priority=9
|
||||||
$connect=T, $retry=1mins,
|
$connect=T, $retry=1mins,
|
||||||
$class=node];
|
$class=node];
|
||||||
}
|
}
|
||||||
|
|
||||||
else if ( me$node_type == WORKER )
|
else if ( me$node_type == WORKER )
|
||||||
{
|
{
|
||||||
if ( n$node_type == MANAGER && me$manager == i )
|
if ( n$node_type == MANAGER && me$manager == i )
|
||||||
Communication::nodes["manager"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
|
Communication::nodes["manager"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
|
||||||
$connect=T, $retry=1mins,
|
$connect=T, $retry=1mins,
|
||||||
$class=node];
|
$class=node, $events=manager_events];
|
||||||
|
|
||||||
if ( n$node_type == PROXY && me$proxy == i )
|
if ( n$node_type == PROXY && me$proxy == i )
|
||||||
Communication::nodes["proxy"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
|
Communication::nodes["proxy"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
|
||||||
|
|
|
@ -33,10 +33,12 @@ export {
|
||||||
##
|
##
|
||||||
## id: The log stream.
|
## id: The log stream.
|
||||||
## path: A suggested path value, which may be either the filter's ``path``
|
## path: A suggested path value, which may be either the filter's ``path``
|
||||||
## if defined or a fall-back generated internally.
|
## if defined or a fall-back generated internally.
|
||||||
|
## rec: An instance of the streams's ``columns`` type with its
|
||||||
|
## fields set to the values to logged.
|
||||||
##
|
##
|
||||||
## Returns: The path to be used for the filter.
|
## Returns: The path to be used for the filter.
|
||||||
global default_path_func: function(id: ID, path: string) : string &redef;
|
global default_path_func: function(id: ID, path: string, rec: any) : string &redef;
|
||||||
|
|
||||||
## Filter customizing logging.
|
## Filter customizing logging.
|
||||||
type Filter: record {
|
type Filter: record {
|
||||||
|
@ -71,7 +73,15 @@ export {
|
||||||
## different strings for separate calls, but be careful: it's
|
## different strings for separate calls, but be careful: it's
|
||||||
## easy to flood the disk by returning a new string for each
|
## easy to flood the disk by returning a new string for each
|
||||||
## connection ...
|
## connection ...
|
||||||
path_func: function(id: ID, path: string): string &optional;
|
##
|
||||||
|
## id: The log stream.
|
||||||
|
## path: A suggested path value, which may be either the filter's ``path``
|
||||||
|
## if defined or a fall-back generated internally.
|
||||||
|
## rec: An instance of the streams's ``columns`` type with its
|
||||||
|
## fields set to the values to logged.
|
||||||
|
##
|
||||||
|
## Returns: The path to be used for the filter.
|
||||||
|
path_func: function(id: ID, path: string, rec: any): string &optional;
|
||||||
|
|
||||||
## Subset of column names to record. If not given, all
|
## Subset of column names to record. If not given, all
|
||||||
## columns are recorded.
|
## columns are recorded.
|
||||||
|
@ -160,7 +170,7 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool
|
||||||
return default_rotation_postprocessors[info$writer](info);
|
return default_rotation_postprocessors[info$writer](info);
|
||||||
}
|
}
|
||||||
|
|
||||||
function default_path_func(id: ID, path: string) : string
|
function default_path_func(id: ID, path: string, rec: any) : string
|
||||||
{
|
{
|
||||||
# TODO for Seth: Do what you want. :)
|
# TODO for Seth: Do what you want. :)
|
||||||
return path;
|
return path;
|
||||||
|
|
|
@ -1 +1,11 @@
|
||||||
@load ./main
|
@load ./main
|
||||||
|
|
||||||
|
# The cluster framework must be loaded first.
|
||||||
|
@load base/frameworks/cluster
|
||||||
|
|
||||||
|
# Load either the cluster support script or the non-cluster support script.
|
||||||
|
@if ( Cluster::is_enabled() )
|
||||||
|
@load ./cluster
|
||||||
|
@else
|
||||||
|
@load ./non-cluster
|
||||||
|
@endif
|
146
scripts/base/frameworks/metrics/cluster.bro
Normal file
146
scripts/base/frameworks/metrics/cluster.bro
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
##! This implements transparent cluster support for the metrics framework.
|
||||||
|
##! Do not load this file directly. It's only meant to be loaded automatically
|
||||||
|
##! and will be depending on if the cluster framework has been enabled.
|
||||||
|
##! The goal of this script is to make metric calculation completely and
|
||||||
|
##! transparently automated when running on a cluster.
|
||||||
|
|
||||||
|
@load base/frameworks/cluster
|
||||||
|
|
||||||
|
module Metrics;
|
||||||
|
|
||||||
|
export {
|
||||||
|
## This event is sent by the manager in a cluster to initiate the 3
|
||||||
|
## collection of metrics values
|
||||||
|
global cluster_collect: event(uid: string, id: ID, filter_name: string);
|
||||||
|
|
||||||
|
## This event is sent by nodes that are collecting metrics after receiving
|
||||||
|
## a request for the metric filter from the manager.
|
||||||
|
global cluster_results: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool);
|
||||||
|
|
||||||
|
## This event is used internally by workers to send result chunks.
|
||||||
|
global send_data: event(uid: string, id: ID, filter_name: string, data: MetricTable);
|
||||||
|
|
||||||
|
## This value allows a user to decide how large of result groups the
|
||||||
|
## workers should transmit values.
|
||||||
|
const cluster_send_in_groups_of = 50 &redef;
|
||||||
|
}
|
||||||
|
|
||||||
|
# This is maintained by managers so they can know what data they requested and
|
||||||
|
# when they requested it.
|
||||||
|
global requested_results: table[string] of time = table() &create_expire=5mins;
|
||||||
|
|
||||||
|
# TODO: Both of the next variables make the assumption that a value never
|
||||||
|
# takes longer than 5 minutes to transmit from workers to manager. This needs to
|
||||||
|
# be tunable or self-tuning. These should also be restructured to be
|
||||||
|
# maintained within a single variable.
|
||||||
|
# This variable is maintained by manager nodes as they collect and aggregate
|
||||||
|
# results.
|
||||||
|
global collecting_results: table[string, ID, string] of MetricTable &create_expire=5mins;
|
||||||
|
|
||||||
|
# This variable is maintained by manager nodes to track how many "dones" they
|
||||||
|
# collected per collection unique id. Once the number of results for a uid
|
||||||
|
# matches the number of peer nodes that results should be coming from, the
|
||||||
|
# result is written out and deleted from here.
|
||||||
|
# TODO: add an &expire_func in case not all results are received.
|
||||||
|
global done_with: table[string] of count &create_expire=5mins &default=0;
|
||||||
|
|
||||||
|
# Add events to the cluster framework to make this work.
|
||||||
|
redef Cluster::manager_events += /Metrics::cluster_collect/;
|
||||||
|
redef Cluster::worker_events += /Metrics::cluster_results/;
|
||||||
|
|
||||||
|
# The metrics collection process can only be done by a manager.
|
||||||
|
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||||
|
event Metrics::log_it(filter: Filter)
|
||||||
|
{
|
||||||
|
local uid = unique_id("");
|
||||||
|
|
||||||
|
# Set some tracking variables.
|
||||||
|
requested_results[uid] = network_time();
|
||||||
|
collecting_results[uid, filter$id, filter$name] = table();
|
||||||
|
|
||||||
|
# Request data from peers.
|
||||||
|
event Metrics::cluster_collect(uid, filter$id, filter$name);
|
||||||
|
# Schedule the log_it event for the next break period.
|
||||||
|
schedule filter$break_interval { Metrics::log_it(filter) };
|
||||||
|
}
|
||||||
|
@endif
|
||||||
|
|
||||||
|
@if ( Cluster::local_node_type() == Cluster::WORKER )
|
||||||
|
|
||||||
|
event Metrics::send_data(uid: string, id: ID, filter_name: string, data: MetricTable)
|
||||||
|
{
|
||||||
|
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
|
||||||
|
|
||||||
|
local local_data: MetricTable;
|
||||||
|
local num_added = 0;
|
||||||
|
for ( index in data )
|
||||||
|
{
|
||||||
|
local_data[index] = data[index];
|
||||||
|
delete data[index];
|
||||||
|
|
||||||
|
# Only send cluster_send_in_groups_of at a time. Queue another
|
||||||
|
# event to send the next group.
|
||||||
|
if ( cluster_send_in_groups_of == ++num_added )
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
local done = F;
|
||||||
|
# If data is empty, this metric is done.
|
||||||
|
if ( |data| == 0 )
|
||||||
|
done = T;
|
||||||
|
|
||||||
|
event Metrics::cluster_results(uid, id, filter_name, local_data, done);
|
||||||
|
if ( ! done )
|
||||||
|
event Metrics::send_data(uid, id, filter_name, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
event Metrics::cluster_collect(uid: string, id: ID, filter_name: string)
|
||||||
|
{
|
||||||
|
#print fmt("WORKER %s: received the cluster_collect event.", Cluster::node);
|
||||||
|
|
||||||
|
event Metrics::send_data(uid, id, filter_name, store[id, filter_name]);
|
||||||
|
|
||||||
|
# Lookup the actual filter and reset it, the reference to the data
|
||||||
|
# currently stored will be maintained interally by the send_data event.
|
||||||
|
reset(filter_store[id, filter_name]);
|
||||||
|
}
|
||||||
|
@endif
|
||||||
|
|
||||||
|
|
||||||
|
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||||
|
|
||||||
|
event Metrics::cluster_results(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool)
|
||||||
|
{
|
||||||
|
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
|
||||||
|
|
||||||
|
local local_data = collecting_results[uid, id, filter_name];
|
||||||
|
for ( index in data )
|
||||||
|
{
|
||||||
|
if ( index !in local_data )
|
||||||
|
local_data[index] = 0;
|
||||||
|
local_data[index] += data[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mark another worker as being "done" for this uid.
|
||||||
|
if ( done )
|
||||||
|
++done_with[uid];
|
||||||
|
|
||||||
|
# If the data has been collected from all peers, we are done and ready to log.
|
||||||
|
if ( Cluster::worker_count == done_with[uid] )
|
||||||
|
{
|
||||||
|
local ts = network_time();
|
||||||
|
# Log the time this was initially requested if it's available.
|
||||||
|
if ( uid in requested_results )
|
||||||
|
ts = requested_results[uid];
|
||||||
|
|
||||||
|
write_log(ts, filter_store[id, filter_name], local_data);
|
||||||
|
if ( [uid, id, filter_name] in collecting_results )
|
||||||
|
delete collecting_results[uid, id, filter_name];
|
||||||
|
if ( uid in done_with )
|
||||||
|
delete done_with[uid];
|
||||||
|
if ( uid in requested_results )
|
||||||
|
delete requested_results[uid];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@endif
|
|
@ -15,16 +15,7 @@ export {
|
||||||
## current value to the logging stream.
|
## current value to the logging stream.
|
||||||
const default_break_interval = 15mins &redef;
|
const default_break_interval = 15mins &redef;
|
||||||
|
|
||||||
type Info: record {
|
type Index: record {
|
||||||
ts: time &log;
|
|
||||||
metric_id: ID &log;
|
|
||||||
filter_name: string &log;
|
|
||||||
agg_subnet: string &log &optional;
|
|
||||||
index: string &log &optional;
|
|
||||||
value: count &log;
|
|
||||||
};
|
|
||||||
|
|
||||||
type Entry: record {
|
|
||||||
## Host is the value to which this metric applies.
|
## Host is the value to which this metric applies.
|
||||||
host: addr &optional;
|
host: addr &optional;
|
||||||
|
|
||||||
|
@ -35,11 +26,19 @@ export {
|
||||||
## value in a Host header. This is an example of a non-host based
|
## value in a Host header. This is an example of a non-host based
|
||||||
## metric since multiple IP addresses could respond for the same Host
|
## metric since multiple IP addresses could respond for the same Host
|
||||||
## header value.
|
## header value.
|
||||||
index: string &default="";
|
str: string &optional;
|
||||||
|
|
||||||
## The value by which the counter should be increased in each filter
|
## The CIDR block that this metric applies to. This is typically
|
||||||
## where this entry is accepted.
|
## only used internally for host based aggregation.
|
||||||
increment: count &default=1;
|
network: subnet &optional;
|
||||||
|
} &log;
|
||||||
|
|
||||||
|
type Info: record {
|
||||||
|
ts: time &log;
|
||||||
|
metric_id: ID &log;
|
||||||
|
filter_name: string &log;
|
||||||
|
index: Index &log;
|
||||||
|
value: count &log;
|
||||||
};
|
};
|
||||||
|
|
||||||
# TODO: configure a metrics filter logging stream to log the current
|
# TODO: configure a metrics filter logging stream to log the current
|
||||||
|
@ -54,11 +53,11 @@ export {
|
||||||
name: string &default="default";
|
name: string &default="default";
|
||||||
## A predicate so that you can decide per index if you would like
|
## A predicate so that you can decide per index if you would like
|
||||||
## to accept the data being inserted.
|
## to accept the data being inserted.
|
||||||
pred: function(entry: Entry): bool &optional;
|
pred: function(index: Index): bool &optional;
|
||||||
## Global mask by which you'd like to aggregate traffic.
|
## Global mask by which you'd like to aggregate traffic.
|
||||||
aggregation_mask: count &optional;
|
aggregation_mask: count &optional;
|
||||||
## This is essentially applying names to various subnets.
|
## This is essentially applying names to various subnets.
|
||||||
aggregation_table: table[subnet] of string &optional;
|
aggregation_table: table[subnet] of subnet &optional;
|
||||||
## The interval at which the metric should be "broken" and written
|
## The interval at which the metric should be "broken" and written
|
||||||
## to the logging stream.
|
## to the logging stream.
|
||||||
break_interval: interval &default=default_break_interval;
|
break_interval: interval &default=default_break_interval;
|
||||||
|
@ -70,6 +69,7 @@ export {
|
||||||
## A straight threshold for generating a notice.
|
## A straight threshold for generating a notice.
|
||||||
notice_threshold: count &optional;
|
notice_threshold: count &optional;
|
||||||
## A series of thresholds at which to generate notices.
|
## A series of thresholds at which to generate notices.
|
||||||
|
## TODO: This is not implemented yet!
|
||||||
notice_thresholds: vector of count &optional;
|
notice_thresholds: vector of count &optional;
|
||||||
## If this and a $notice_threshold value are set, this notice type
|
## If this and a $notice_threshold value are set, this notice type
|
||||||
## will be generated by the metrics framework.
|
## will be generated by the metrics framework.
|
||||||
|
@ -77,15 +77,23 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
global add_filter: function(id: ID, filter: Filter);
|
global add_filter: function(id: ID, filter: Filter);
|
||||||
global add_data: function(id: ID, entry: Entry);
|
global add_data: function(id: ID, index: Index, increment: count);
|
||||||
|
|
||||||
|
# This is the event that is used to "finish" metrics and adapt the metrics
|
||||||
|
# framework for clustered or non-clustered usage.
|
||||||
|
global log_it: event(filter: Filter);
|
||||||
|
|
||||||
global log_metrics: event(rec: Info);
|
global log_metrics: event(rec: Info);
|
||||||
}
|
}
|
||||||
|
|
||||||
global metric_filters: table[ID] of vector of Filter = table();
|
redef record Notice::Info += {
|
||||||
|
metric_index: Index &log &optional;
|
||||||
|
};
|
||||||
|
|
||||||
type MetricIndex: table[string] of count &default=0;
|
global metric_filters: table[ID] of vector of Filter = table();
|
||||||
type MetricTable: table[string] of MetricIndex;
|
global filter_store: table[ID, string] of Filter = table();
|
||||||
|
|
||||||
|
type MetricTable: table[Index] of count &default=0;
|
||||||
# This is indexed by metric ID and stream filter name.
|
# This is indexed by metric ID and stream filter name.
|
||||||
global store: table[ID, string] of MetricTable = table();
|
global store: table[ID, string] of MetricTable = table();
|
||||||
|
|
||||||
|
@ -98,62 +106,44 @@ event bro_init() &priority=5
|
||||||
Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]);
|
Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function write_log(ts: time, filter: Filter, data: MetricTable)
|
||||||
|
{
|
||||||
|
for ( index in data )
|
||||||
|
{
|
||||||
|
local val = data[index];
|
||||||
|
local m: Info = [$ts=ts,
|
||||||
|
$metric_id=filter$id,
|
||||||
|
$filter_name=filter$name,
|
||||||
|
$index=index,
|
||||||
|
$value=val];
|
||||||
|
|
||||||
|
if ( m$index?$host &&
|
||||||
|
filter?$notice_threshold &&
|
||||||
|
m$value >= filter$notice_threshold )
|
||||||
|
{
|
||||||
|
NOTICE([$note=filter$note,
|
||||||
|
$msg=fmt("Metrics threshold crossed by %s %d/%d", index$host, m$value, filter$notice_threshold),
|
||||||
|
$src=m$index$host, $n=m$value,
|
||||||
|
$metric_index=index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
else if ( filter?$notice_thresholds &&
|
||||||
|
m$value >= filter$notice_thresholds[thresholds[cat(filter$id,filter$name)]] )
|
||||||
|
{
|
||||||
|
# TODO: implement this
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( filter$log )
|
||||||
|
Log::write(METRICS, m);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
function reset(filter: Filter)
|
function reset(filter: Filter)
|
||||||
{
|
{
|
||||||
store[filter$id, filter$name] = table();
|
store[filter$id, filter$name] = table();
|
||||||
}
|
}
|
||||||
|
|
||||||
event log_it(filter: Filter)
|
|
||||||
{
|
|
||||||
# If this node is the manager in a cluster, this needs to request values
|
|
||||||
# for this metric from all of the workers.
|
|
||||||
|
|
||||||
local id = filter$id;
|
|
||||||
local name = filter$name;
|
|
||||||
for ( agg_subnet in store[id, name] )
|
|
||||||
{
|
|
||||||
local metric_values = store[id, name][agg_subnet];
|
|
||||||
for ( index in metric_values )
|
|
||||||
{
|
|
||||||
local val = metric_values[index];
|
|
||||||
local m: Info = [$ts=network_time(),
|
|
||||||
$metric_id=id,
|
|
||||||
$filter_name=name,
|
|
||||||
$agg_subnet=fmt("%s", agg_subnet),
|
|
||||||
$index=index,
|
|
||||||
$value=val];
|
|
||||||
|
|
||||||
if ( filter?$notice_threshold &&
|
|
||||||
m$value >= filter$notice_threshold )
|
|
||||||
{
|
|
||||||
print m;
|
|
||||||
NOTICE([$note=filter$note,
|
|
||||||
$msg=fmt("Metrics threshold crossed by %s %d/%d", m$agg_subnet, m$value, filter$notice_threshold),
|
|
||||||
$n=m$value]);
|
|
||||||
}
|
|
||||||
|
|
||||||
else if ( filter?$notice_thresholds &&
|
|
||||||
m$value >= filter$notice_thresholds[thresholds[cat(id,name)]] )
|
|
||||||
{
|
|
||||||
# TODO: implement this
|
|
||||||
}
|
|
||||||
|
|
||||||
# If there wasn't an index, remove the field.
|
|
||||||
if ( index == "" )
|
|
||||||
delete m$index;
|
|
||||||
|
|
||||||
# If there wasn't an aggregation subnet, remove the field.
|
|
||||||
if ( agg_subnet == "" )
|
|
||||||
delete m$agg_subnet;
|
|
||||||
|
|
||||||
Log::write(METRICS, m);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reset(filter);
|
|
||||||
|
|
||||||
schedule filter$break_interval { log_it(filter) };
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_filter(id: ID, filter: Filter)
|
function add_filter(id: ID, filter: Filter)
|
||||||
{
|
{
|
||||||
if ( filter?$aggregation_table && filter?$aggregation_mask )
|
if ( filter?$aggregation_table && filter?$aggregation_mask )
|
||||||
|
@ -179,13 +169,13 @@ function add_filter(id: ID, filter: Filter)
|
||||||
metric_filters[id] = vector();
|
metric_filters[id] = vector();
|
||||||
metric_filters[id][|metric_filters[id]|] = filter;
|
metric_filters[id][|metric_filters[id]|] = filter;
|
||||||
|
|
||||||
|
filter_store[id, filter$name] = filter;
|
||||||
store[id, filter$name] = table();
|
store[id, filter$name] = table();
|
||||||
|
|
||||||
# Only do this on the manager if in a cluster.
|
schedule filter$break_interval { Metrics::log_it(filter) };
|
||||||
schedule filter$break_interval { log_it(filter) };
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function add_data(id: ID, entry: Entry)
|
function add_data(id: ID, index: Index, increment: count)
|
||||||
{
|
{
|
||||||
if ( id !in metric_filters )
|
if ( id !in metric_filters )
|
||||||
return;
|
return;
|
||||||
|
@ -198,38 +188,28 @@ function add_data(id: ID, entry: Entry)
|
||||||
local filter = filters[filter_id];
|
local filter = filters[filter_id];
|
||||||
|
|
||||||
# If this filter has a predicate, run the predicate and skip this
|
# If this filter has a predicate, run the predicate and skip this
|
||||||
# entry if the predicate return false.
|
# index if the predicate return false.
|
||||||
if ( filter?$pred &&
|
if ( filter?$pred &&
|
||||||
! filter$pred(entry) )
|
! filter$pred(index) )
|
||||||
next;
|
next;
|
||||||
|
|
||||||
local agg_subnet = "";
|
|
||||||
local filt_store = store[id, filter$name];
|
local filt_store = store[id, filter$name];
|
||||||
if ( entry?$host )
|
if ( index?$host )
|
||||||
{
|
{
|
||||||
if ( filter?$aggregation_mask )
|
if ( filter?$aggregation_mask )
|
||||||
{
|
{
|
||||||
local agg_mask = filter$aggregation_mask;
|
index$network = mask_addr(index$host, filter$aggregation_mask);
|
||||||
agg_subnet = fmt("%s", mask_addr(entry$host, agg_mask));
|
delete index$host;
|
||||||
}
|
}
|
||||||
else if ( filter?$aggregation_table )
|
else if ( filter?$aggregation_table )
|
||||||
{
|
{
|
||||||
agg_subnet = fmt("%s", filter$aggregation_table[entry$host]);
|
index$network = filter$aggregation_table[index$host];
|
||||||
# if an aggregation table is being used and the value isn't
|
delete index$host;
|
||||||
# in the table, that means we aren't interested in it.
|
|
||||||
if ( agg_subnet == "" )
|
|
||||||
next;
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
agg_subnet = fmt("%s", entry$host);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( agg_subnet !in filt_store )
|
if ( index !in filt_store )
|
||||||
filt_store[agg_subnet] = table();
|
filt_store[index] = 0;
|
||||||
|
filt_store[index] += increment;
|
||||||
local fs = filt_store[agg_subnet];
|
|
||||||
if ( entry$index !in fs )
|
|
||||||
fs[entry$index] = 0;
|
|
||||||
fs[entry$index] = fs[entry$index] + entry$increment;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
17
scripts/base/frameworks/metrics/non-cluster.bro
Normal file
17
scripts/base/frameworks/metrics/non-cluster.bro
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
|
||||||
|
module Metrics;
|
||||||
|
|
||||||
|
export {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
event Metrics::log_it(filter: Filter)
|
||||||
|
{
|
||||||
|
local id = filter$id;
|
||||||
|
local name = filter$name;
|
||||||
|
|
||||||
|
write_log(network_time(), filter, store[id, name]);
|
||||||
|
reset(filter);
|
||||||
|
|
||||||
|
schedule filter$break_interval { Metrics::log_it(filter) };
|
||||||
|
}
|
|
@ -6,7 +6,8 @@
|
||||||
@load ./actions/drop
|
@load ./actions/drop
|
||||||
@load ./actions/email_admin
|
@load ./actions/email_admin
|
||||||
@load ./actions/page
|
@load ./actions/page
|
||||||
|
@load ./actions/add-geodata
|
||||||
|
|
||||||
# Load the script to add hostnames to emails by default.
|
# There shouldn't be any defaul toverhead from loading these since they
|
||||||
# NOTE: this exposes a memleak in async DNS lookups.
|
# *should* only do anything when notices have the ACTION_EMAIL action applied.
|
||||||
#@load ./extend-email/hostnames
|
@load ./extend-email/hostnames
|
||||||
|
|
47
scripts/base/frameworks/notice/actions/add-geodata.bro
Normal file
47
scripts/base/frameworks/notice/actions/add-geodata.bro
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
##! This script adds geographic location data to notices for the "remote"
|
||||||
|
##! host in a connection. It does make the assumption that one of the
|
||||||
|
##! addresses in a connection is "local" and one is "remote" which is
|
||||||
|
##! probably a safe assumption to make in most cases. If both addresses
|
||||||
|
##! are remote, it will use the $src address.
|
||||||
|
|
||||||
|
module Notice;
|
||||||
|
|
||||||
|
export {
|
||||||
|
redef enum Action += {
|
||||||
|
## Indicates that the notice should have geodata added for the
|
||||||
|
## "remote" host. :bro:id:`Site::local_nets` must be defined
|
||||||
|
## in order for this to work.
|
||||||
|
ACTION_ADD_GEODATA
|
||||||
|
};
|
||||||
|
|
||||||
|
redef record Info += {
|
||||||
|
## If libGeoIP support is built in, notices can have geographic
|
||||||
|
## information attached to them.
|
||||||
|
remote_location: geo_location &log &optional;
|
||||||
|
};
|
||||||
|
|
||||||
|
## Notice types which should have the "remote" location looked up.
|
||||||
|
## If GeoIP support is not built in, this does nothing.
|
||||||
|
const lookup_location_types: set[Notice::Type] = {} &redef;
|
||||||
|
|
||||||
|
## Add a helper to the notice policy for looking up GeoIP data.
|
||||||
|
redef Notice::policy += {
|
||||||
|
[$pred(n: Notice::Info) = { return (n$note in Notice::lookup_location_types); },
|
||||||
|
$priority = 10],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# This is handled at a high priority in case other notice handlers
|
||||||
|
# want to use the data.
|
||||||
|
event notice(n: Notice::Info) &priority=10
|
||||||
|
{
|
||||||
|
if ( ACTION_ADD_GEODATA in n$actions &&
|
||||||
|
|Site::local_nets| > 0 &&
|
||||||
|
! n?$remote_location )
|
||||||
|
{
|
||||||
|
if ( n?$src && ! Site::is_local_addr(n$src) )
|
||||||
|
n$remote_location = lookup_location(n$src);
|
||||||
|
else if ( n?$dst && ! Site::is_local_addr(n$dst) )
|
||||||
|
n$remote_location = lookup_location(n$dst);
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,11 +23,11 @@
|
||||||
@load base/frameworks/signatures
|
@load base/frameworks/signatures
|
||||||
@load base/frameworks/packet-filter
|
@load base/frameworks/packet-filter
|
||||||
@load base/frameworks/software
|
@load base/frameworks/software
|
||||||
@load base/frameworks/intel
|
|
||||||
@load base/frameworks/metrics
|
|
||||||
@load base/frameworks/communication
|
@load base/frameworks/communication
|
||||||
@load base/frameworks/control
|
@load base/frameworks/control
|
||||||
@load base/frameworks/cluster
|
@load base/frameworks/cluster
|
||||||
|
@load base/frameworks/metrics
|
||||||
|
@load base/frameworks/intel
|
||||||
@load base/frameworks/reporter
|
@load base/frameworks/reporter
|
||||||
|
|
||||||
@load base/protocols/conn
|
@load base/protocols/conn
|
||||||
|
|
|
@ -17,9 +17,11 @@ export {
|
||||||
## Networks that are considered "local".
|
## Networks that are considered "local".
|
||||||
const local_nets: set[subnet] &redef;
|
const local_nets: set[subnet] &redef;
|
||||||
|
|
||||||
## This is used for mapping between local networks and string
|
## This is used for retrieving the subnet when you multiple
|
||||||
## values for the CIDRs represented.
|
## :bro:id:`local_nets`. A membership query can be done with an
|
||||||
global local_nets_table: table[subnet] of string = {};
|
## :bro:type:`addr` and the table will yield the subnet it was found
|
||||||
|
## within.
|
||||||
|
global local_nets_table: table[subnet] of subnet = {};
|
||||||
|
|
||||||
## Networks that are considered "neighbors".
|
## Networks that are considered "neighbors".
|
||||||
const neighbor_nets: set[subnet] &redef;
|
const neighbor_nets: set[subnet] &redef;
|
||||||
|
@ -145,6 +147,6 @@ event bro_init() &priority=10
|
||||||
|
|
||||||
# Create the local_nets mapping table.
|
# Create the local_nets mapping table.
|
||||||
for ( cidr in Site::local_nets )
|
for ( cidr in Site::local_nets )
|
||||||
local_nets_table[cidr] = fmt("%s", cidr);
|
local_nets_table[cidr] = cidr;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ event bro_init()
|
||||||
|
|
||||||
event connection_established(c: connection)
|
event connection_established(c: connection)
|
||||||
{
|
{
|
||||||
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h]);
|
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1);
|
||||||
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h]);
|
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,16 +9,26 @@ redef enum Metrics::ID += {
|
||||||
|
|
||||||
event bro_init()
|
event bro_init()
|
||||||
{
|
{
|
||||||
Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER, [$break_interval=5mins]);
|
# TODO: these are waiting on a fix with table vals + records before they will work.
|
||||||
|
#Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER,
|
||||||
# Site::local_nets must be defined in order for this to actually do anything.
|
# [$pred(index: Index) = { return Site:is_local_addr(index$host) },
|
||||||
Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table, $break_interval=5mins]);
|
# $aggregation_mask=24,
|
||||||
|
# $break_interval=5mins]);
|
||||||
|
#
|
||||||
|
## Site::local_nets must be defined in order for this to actually do anything.
|
||||||
|
#Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table,
|
||||||
|
# $break_interval=5mins]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event HTTP::log_http(rec: HTTP::Info)
|
event HTTP::log_http(rec: HTTP::Info)
|
||||||
{
|
{
|
||||||
if ( rec?$host )
|
if ( rec?$host )
|
||||||
Metrics::add_data(HTTP_REQUESTS_BY_HOST_HEADER, [$index=rec$host]);
|
Metrics::add_data(HTTP_REQUESTS_BY_HOST_HEADER, [$str=rec$host]);
|
||||||
if ( rec?$status_code )
|
if ( rec?$status_code )
|
||||||
|
<<<<<<< HEAD
|
||||||
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)]);
|
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)]);
|
||||||
}
|
}
|
||||||
|
=======
|
||||||
|
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $str=fmt("%d", rec$status_code)]);
|
||||||
|
}
|
||||||
|
>>>>>>> master
|
||||||
|
|
|
@ -9,8 +9,8 @@ event bro_init()
|
||||||
{
|
{
|
||||||
Metrics::add_filter(SSL_SERVERNAME,
|
Metrics::add_filter(SSL_SERVERNAME,
|
||||||
[$name="no-google-ssl-servers",
|
[$name="no-google-ssl-servers",
|
||||||
$pred(entry: Metrics::Entry) = {
|
$pred(index: Metrics::Index) = {
|
||||||
return (/google\.com$/ !in entry$index);
|
return (/google\.com$/ !in index$str);
|
||||||
},
|
},
|
||||||
$break_interval=10secs
|
$break_interval=10secs
|
||||||
]);
|
]);
|
||||||
|
@ -19,5 +19,5 @@ event bro_init()
|
||||||
event SSL::log_ssl(rec: SSL::Info)
|
event SSL::log_ssl(rec: SSL::Info)
|
||||||
{
|
{
|
||||||
if ( rec?$server_name )
|
if ( rec?$server_name )
|
||||||
Metrics::add_data(SSL_SERVERNAME, [$index=rec$server_name]);
|
Metrics::add_data(SSL_SERVERNAME, [$str=rec$server_name], 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,8 +20,8 @@ BroDoc::BroDoc(const std::string& rel, const std::string& abs)
|
||||||
|
|
||||||
if ( rel[0] == '/' || rel[0] == '.' )
|
if ( rel[0] == '/' || rel[0] == '.' )
|
||||||
{
|
{
|
||||||
// The Bro script must not be on a subpath of the policy/ dir of
|
// The Bro script isn't being loaded via BROPATH, so just use basename
|
||||||
// BROPATH, so just use the basename as the document title.
|
// as the document title.
|
||||||
doc_title = source_filename;
|
doc_title = source_filename;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -33,8 +33,14 @@ BroDoc::BroDoc(const std::string& rel, const std::string& abs)
|
||||||
doc_title = rel + "/" + source_filename;
|
doc_title = rel + "/" + source_filename;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
downloadable_filename = source_filename;
|
||||||
|
|
||||||
|
size_t ext_pos = downloadable_filename.find(".bif.bro");
|
||||||
|
if ( std::string::npos != ext_pos )
|
||||||
|
downloadable_filename.erase(ext_pos + 4);
|
||||||
|
|
||||||
reST_filename = doc_title;
|
reST_filename = doc_title;
|
||||||
size_t ext_pos = reST_filename.find(".bro");
|
ext_pos = reST_filename.find(".bro");
|
||||||
|
|
||||||
if ( std::string::npos == ext_pos )
|
if ( std::string::npos == ext_pos )
|
||||||
reST_filename += ".rst";
|
reST_filename += ".rst";
|
||||||
|
@ -103,14 +109,14 @@ void BroDoc::AddImport(const std::string& s)
|
||||||
{
|
{
|
||||||
if ( subpath[0] == '/' || subpath[0] == '.' )
|
if ( subpath[0] == '/' || subpath[0] == '.' )
|
||||||
{
|
{
|
||||||
// it's not a subpath of policy/, so just add the name of it
|
// it's not a subpath of scripts/, so just add the name of it
|
||||||
// as it's given in the @load directive
|
// as it's given in the @load directive
|
||||||
imports.push_back(lname);
|
imports.push_back(lname);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// combine the base file name of script in the @load directive
|
// combine the base file name of script in the @load directive
|
||||||
// with the subpath of BROPATH's policy/ directory
|
// with the subpath of BROPATH's scripts/ directory
|
||||||
string fname(subpath);
|
string fname(subpath);
|
||||||
char* othertmp = copy_string(lname.c_str());
|
char* othertmp = copy_string(lname.c_str());
|
||||||
fname.append("/").append(basename(othertmp));
|
fname.append("/").append(basename(othertmp));
|
||||||
|
@ -167,7 +173,7 @@ void BroDoc::WriteDocFile() const
|
||||||
WriteSectionHeading(doc_title.c_str(), '=');
|
WriteSectionHeading(doc_title.c_str(), '=');
|
||||||
|
|
||||||
WriteToDoc("\n:download:`Original Source File <%s>`\n\n",
|
WriteToDoc("\n:download:`Original Source File <%s>`\n\n",
|
||||||
source_filename.c_str());
|
downloadable_filename.c_str());
|
||||||
|
|
||||||
WriteSectionHeading("Overview", '-');
|
WriteSectionHeading("Overview", '-');
|
||||||
WriteStringList("%s\n", "%s\n\n", summary);
|
WriteStringList("%s\n", "%s\n\n", summary);
|
||||||
|
@ -185,7 +191,7 @@ void BroDoc::WriteDocFile() const
|
||||||
size_t pos = pretty.find("/index");
|
size_t pos = pretty.find("/index");
|
||||||
if ( pos != std::string::npos && pos + 6 == pretty.size() )
|
if ( pos != std::string::npos && pos + 6 == pretty.size() )
|
||||||
pretty = pretty.substr(0, pos);
|
pretty = pretty.substr(0, pos);
|
||||||
WriteToDoc(":doc:`%s </policy/%s>`", pretty.c_str(), it->c_str());
|
WriteToDoc(":doc:`%s </scripts/%s>`", pretty.c_str(), it->c_str());
|
||||||
}
|
}
|
||||||
WriteToDoc("\n");
|
WriteToDoc("\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,10 +24,9 @@ public:
|
||||||
* If the filename doesn't end in ".bro", then ".rst" is just appended.
|
* If the filename doesn't end in ".bro", then ".rst" is just appended.
|
||||||
* Any '/' characters in the reST file name that result from choice of
|
* Any '/' characters in the reST file name that result from choice of
|
||||||
* the 'rel' parameter are replaced with '^'.
|
* the 'rel' parameter are replaced with '^'.
|
||||||
* @param subpath A string representing a subpath of BROPATH's policy/
|
* @param rel A string representing a subpath of the root Bro script
|
||||||
* directory in which the source file is located. It can
|
* source/install directory in which the source file is located.
|
||||||
* also be full path to the file or a full path that's in BROPATH,
|
* It can also be an absolute path, but then the parameter is
|
||||||
* but in either of those cases, the parameter is essentially
|
|
||||||
* ignored and the document title is just derived from file name
|
* ignored and the document title is just derived from file name
|
||||||
* @param abs The absolute path to the Bro script for which to generate
|
* @param abs The absolute path to the Bro script for which to generate
|
||||||
* documentation.
|
* documentation.
|
||||||
|
@ -211,6 +210,7 @@ protected:
|
||||||
FILE* reST_file;
|
FILE* reST_file;
|
||||||
std::string reST_filename;
|
std::string reST_filename;
|
||||||
std::string source_filename; // points to the basename of source file
|
std::string source_filename; // points to the basename of source file
|
||||||
|
std::string downloadable_filename; // file that will be linked for download
|
||||||
std::string doc_title;
|
std::string doc_title;
|
||||||
std::string packet_filter;
|
std::string packet_filter;
|
||||||
|
|
||||||
|
|
|
@ -415,7 +415,8 @@ set(bro_SRCS
|
||||||
|
|
||||||
collect_headers(bro_HEADERS ${bro_SRCS})
|
collect_headers(bro_HEADERS ${bro_SRCS})
|
||||||
|
|
||||||
add_definitions(-DPOLICYDEST="${POLICYDIR}")
|
add_definitions(-DBRO_SCRIPT_INSTALL_PATH="${BRO_SCRIPT_INSTALL_PATH}")
|
||||||
|
add_definitions(-DBRO_SCRIPT_SOURCE_PATH="${BRO_SCRIPT_SOURCE_PATH}")
|
||||||
|
|
||||||
add_executable(bro ${bro_SRCS} ${bro_HEADERS})
|
add_executable(bro ${bro_SRCS} ${bro_HEADERS})
|
||||||
|
|
||||||
|
@ -430,7 +431,7 @@ set(brolibs
|
||||||
target_link_libraries(bro ${brolibs})
|
target_link_libraries(bro ${brolibs})
|
||||||
|
|
||||||
install(TARGETS bro DESTINATION bin)
|
install(TARGETS bro DESTINATION bin)
|
||||||
install(FILES ${INSTALL_BIF_OUTPUTS} DESTINATION ${POLICYDIR}/base)
|
install(FILES ${INSTALL_BIF_OUTPUTS} DESTINATION ${BRO_SCRIPT_INSTALL_PATH}/base)
|
||||||
|
|
||||||
set(BRO_EXE bro
|
set(BRO_EXE bro
|
||||||
CACHE STRING "Bro executable binary" FORCE)
|
CACHE STRING "Bro executable binary" FORCE)
|
||||||
|
|
|
@ -866,15 +866,10 @@ void ConnCompressor::Event(const PendingConn* pending, double t,
|
||||||
|
|
||||||
if ( ConnSize_Analyzer::Available() )
|
if ( ConnSize_Analyzer::Available() )
|
||||||
{
|
{
|
||||||
|
// Fill in optional fields if ConnSize_Analyzer is on.
|
||||||
orig_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT));
|
orig_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT));
|
||||||
orig_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT));
|
orig_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT));
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
orig_endp->Assign(2, new Val(0, TYPE_COUNT));
|
|
||||||
orig_endp->Assign(3, new Val(0, TYPE_COUNT));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resp_endp->Assign(0, new Val(0, TYPE_COUNT));
|
resp_endp->Assign(0, new Val(0, TYPE_COUNT));
|
||||||
resp_endp->Assign(1, new Val(resp_state, TYPE_COUNT));
|
resp_endp->Assign(1, new Val(resp_state, TYPE_COUNT));
|
||||||
|
@ -900,14 +895,10 @@ void ConnCompressor::Event(const PendingConn* pending, double t,
|
||||||
|
|
||||||
if ( ConnSize_Analyzer::Available() )
|
if ( ConnSize_Analyzer::Available() )
|
||||||
{
|
{
|
||||||
|
// Fill in optional fields if ConnSize_Analyzer is on
|
||||||
resp_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT));
|
resp_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT));
|
||||||
resp_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT));
|
resp_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT));
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
resp_endp->Assign(2, new Val(0, TYPE_COUNT));
|
|
||||||
resp_endp->Assign(3, new Val(0, TYPE_COUNT));
|
|
||||||
}
|
|
||||||
|
|
||||||
DBG_LOG(DBG_COMPRESSOR, "%s swapped direction", fmt_conn_id(pending));
|
DBG_LOG(DBG_COMPRESSOR, "%s swapped direction", fmt_conn_id(pending));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1071,7 +1071,7 @@ void DNS_Mgr::Process()
|
||||||
int status = nb_dns_activity(nb_dns, &r, err);
|
int status = nb_dns_activity(nb_dns, &r, err);
|
||||||
|
|
||||||
if ( status < 0 )
|
if ( status < 0 )
|
||||||
reporter->InternalError("NB-DNS error in DNS_Mgr::Process (%s)", err);
|
reporter->Warning("NB-DNS error in DNS_Mgr::Process (%s)", err);
|
||||||
|
|
||||||
else if ( status > 0 )
|
else if ( status > 0 )
|
||||||
{
|
{
|
||||||
|
|
|
@ -5020,8 +5020,9 @@ Val* ListExpr::InitVal(const BroType* t, Val* aggr) const
|
||||||
loop_over_list(exprs, i)
|
loop_over_list(exprs, i)
|
||||||
{
|
{
|
||||||
Expr* e = exprs[i];
|
Expr* e = exprs[i];
|
||||||
|
check_and_promote_expr(e, vec->Type()->AsVectorType()->YieldType());
|
||||||
Val* v = e->Eval(0);
|
Val* v = e->Eval(0);
|
||||||
if ( ! vec->Assign(i, v, e) )
|
if ( ! vec->Assign(i, v->RefCnt() == 1 ? v->Ref() : v, e) )
|
||||||
{
|
{
|
||||||
e->Error(fmt("type mismatch at index %d", i));
|
e->Error(fmt("type mismatch at index %d", i));
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -89,7 +89,7 @@ bool LogField::Write(SerializationFormat* fmt) const
|
||||||
|
|
||||||
LogVal::~LogVal()
|
LogVal::~LogVal()
|
||||||
{
|
{
|
||||||
if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE)
|
if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC)
|
||||||
&& present )
|
&& present )
|
||||||
delete val.string_val;
|
delete val.string_val;
|
||||||
|
|
||||||
|
@ -130,6 +130,7 @@ bool LogVal::IsCompatibleType(BroType* t, bool atomic_only)
|
||||||
case TYPE_ENUM:
|
case TYPE_ENUM:
|
||||||
case TYPE_STRING:
|
case TYPE_STRING:
|
||||||
case TYPE_FILE:
|
case TYPE_FILE:
|
||||||
|
case TYPE_FUNC:
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
case TYPE_RECORD:
|
case TYPE_RECORD:
|
||||||
|
@ -231,6 +232,7 @@ bool LogVal::Read(SerializationFormat* fmt)
|
||||||
case TYPE_ENUM:
|
case TYPE_ENUM:
|
||||||
case TYPE_STRING:
|
case TYPE_STRING:
|
||||||
case TYPE_FILE:
|
case TYPE_FILE:
|
||||||
|
case TYPE_FUNC:
|
||||||
{
|
{
|
||||||
val.string_val = new string;
|
val.string_val = new string;
|
||||||
return fmt->Read(val.string_val, "string");
|
return fmt->Read(val.string_val, "string");
|
||||||
|
@ -343,6 +345,7 @@ bool LogVal::Write(SerializationFormat* fmt) const
|
||||||
case TYPE_ENUM:
|
case TYPE_ENUM:
|
||||||
case TYPE_STRING:
|
case TYPE_STRING:
|
||||||
case TYPE_FILE:
|
case TYPE_FILE:
|
||||||
|
case TYPE_FUNC:
|
||||||
return fmt->Write(*val.string_val, "string");
|
return fmt->Write(*val.string_val, "string");
|
||||||
|
|
||||||
case TYPE_TABLE:
|
case TYPE_TABLE:
|
||||||
|
@ -648,6 +651,11 @@ bool LogMgr::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt,
|
||||||
// That's ok, we handle it below.
|
// That's ok, we handle it below.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
else if ( t->Tag() == TYPE_FUNC )
|
||||||
|
{
|
||||||
|
// That's ok, we handle it below.
|
||||||
|
}
|
||||||
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
reporter->Error("unsupported field type for log column");
|
reporter->Error("unsupported field type for log column");
|
||||||
|
@ -894,9 +902,10 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns)
|
||||||
|
|
||||||
if ( filter->path_func )
|
if ( filter->path_func )
|
||||||
{
|
{
|
||||||
val_list vl(2);
|
val_list vl(3);
|
||||||
vl.append(id->Ref());
|
vl.append(id->Ref());
|
||||||
vl.append(filter->path_val->Ref());
|
vl.append(filter->path_val->Ref());
|
||||||
|
vl.append(columns->Ref());
|
||||||
Val* v = filter->path_func->Call(&vl);
|
Val* v = filter->path_func->Call(&vl);
|
||||||
|
|
||||||
if ( ! v->Type()->Tag() == TYPE_STRING )
|
if ( ! v->Type()->Tag() == TYPE_STRING )
|
||||||
|
@ -907,6 +916,7 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns)
|
||||||
}
|
}
|
||||||
|
|
||||||
path = v->AsString()->CheckString();
|
path = v->AsString()->CheckString();
|
||||||
|
Unref(v);
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
DBG_LOG(DBG_LOGGING, "Path function for filter '%s' on stream '%s' return '%s'",
|
DBG_LOG(DBG_LOGGING, "Path function for filter '%s' on stream '%s' return '%s'",
|
||||||
|
@ -1074,6 +1084,15 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case TYPE_FUNC:
|
||||||
|
{
|
||||||
|
ODesc d;
|
||||||
|
const Func* f = val->AsFunc();
|
||||||
|
f->Describe(&d);
|
||||||
|
lval->val.string_val = new string(d.Description());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case TYPE_TABLE:
|
case TYPE_TABLE:
|
||||||
{
|
{
|
||||||
ListVal* set = val->AsTableVal()->ConvertToPureList();
|
ListVal* set = val->AsTableVal()->ConvertToPureList();
|
||||||
|
|
|
@ -155,6 +155,7 @@ bool LogWriterAscii::DoWriteOne(ODesc* desc, LogVal* val, const LogField* field)
|
||||||
case TYPE_ENUM:
|
case TYPE_ENUM:
|
||||||
case TYPE_STRING:
|
case TYPE_STRING:
|
||||||
case TYPE_FILE:
|
case TYPE_FILE:
|
||||||
|
case TYPE_FUNC:
|
||||||
{
|
{
|
||||||
int size = val->val.string_val->size();
|
int size = val->val.string_val->size();
|
||||||
if ( size )
|
if ( size )
|
||||||
|
|
|
@ -127,6 +127,7 @@ void BroObj::BadTag(const char* msg, const char* t1, const char* t2) const
|
||||||
ODesc d;
|
ODesc d;
|
||||||
DoMsg(&d, out);
|
DoMsg(&d, out);
|
||||||
reporter->FatalError("%s", d.Description());
|
reporter->FatalError("%s", d.Description());
|
||||||
|
reporter->PopLocation();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BroObj::Internal(const char* msg) const
|
void BroObj::Internal(const char* msg) const
|
||||||
|
@ -134,6 +135,7 @@ void BroObj::Internal(const char* msg) const
|
||||||
ODesc d;
|
ODesc d;
|
||||||
DoMsg(&d, msg);
|
DoMsg(&d, msg);
|
||||||
reporter->InternalError("%s", d.Description());
|
reporter->InternalError("%s", d.Description());
|
||||||
|
reporter->PopLocation();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BroObj::InternalWarning(const char* msg) const
|
void BroObj::InternalWarning(const char* msg) const
|
||||||
|
@ -141,6 +143,7 @@ void BroObj::InternalWarning(const char* msg) const
|
||||||
ODesc d;
|
ODesc d;
|
||||||
DoMsg(&d, msg);
|
DoMsg(&d, msg);
|
||||||
reporter->InternalWarning("%s", d.Description());
|
reporter->InternalWarning("%s", d.Description());
|
||||||
|
reporter->PopLocation();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BroObj::AddLocation(ODesc* d) const
|
void BroObj::AddLocation(ODesc* d) const
|
||||||
|
|
|
@ -302,7 +302,7 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Conne
|
||||||
s += buffer;
|
s += buffer;
|
||||||
s += "\n";
|
s += "\n";
|
||||||
|
|
||||||
fprintf(out, s.c_str());
|
fprintf(out, "%s", s.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( alloced )
|
if ( alloced )
|
||||||
|
|
17
src/bro.bif
17
src/bro.bif
|
@ -3624,17 +3624,28 @@ function NFS3::mode2string%(mode: count%): string
|
||||||
function piped_exec%(program: string, to_write: string%): bool
|
function piped_exec%(program: string, to_write: string%): bool
|
||||||
%{
|
%{
|
||||||
const char* prog = program->CheckString();
|
const char* prog = program->CheckString();
|
||||||
|
|
||||||
FILE* f = popen(prog, "w");
|
FILE* f = popen(prog, "w");
|
||||||
if ( ! f )
|
if ( ! f )
|
||||||
{
|
{
|
||||||
reporter->Error("Failed to popen %s", prog);
|
reporter->Error("Failed to popen %s", prog);
|
||||||
return new Val(false, TYPE_BOOL);
|
return new Val(0, TYPE_BOOL);
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(f, "%s", to_write->CheckString());
|
const u_char* input_data = to_write->Bytes();
|
||||||
|
int input_data_len = to_write->Len();
|
||||||
|
|
||||||
|
int bytes_written = fwrite(input_data, 1, input_data_len, f);
|
||||||
|
|
||||||
pclose(f);
|
pclose(f);
|
||||||
|
|
||||||
return new Val(true, TYPE_BOOL);
|
if ( bytes_written != input_data_len )
|
||||||
|
{
|
||||||
|
reporter->Error("Failed to write all given data to %s", prog);
|
||||||
|
return new Val(0, TYPE_BOOL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Val(1, TYPE_BOOL);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
## Enables the communication system. Note that by default,
|
## Enables the communication system. Note that by default,
|
||||||
|
|
|
@ -1070,10 +1070,10 @@ decl:
|
||||||
}
|
}
|
||||||
|
|
||||||
| TOK_REDEF TOK_RECORD global_id TOK_ADD_TO
|
| TOK_REDEF TOK_RECORD global_id TOK_ADD_TO
|
||||||
'{' { do_doc_token_start(); } type_decl_list '}' opt_attr ';'
|
'{' { ++in_record; do_doc_token_start(); }
|
||||||
|
type_decl_list
|
||||||
|
{ --in_record; do_doc_token_stop(); } '}' opt_attr ';'
|
||||||
{
|
{
|
||||||
do_doc_token_stop();
|
|
||||||
|
|
||||||
if ( ! $3->Type() )
|
if ( ! $3->Type() )
|
||||||
$3->Error("unknown identifier");
|
$3->Error("unknown identifier");
|
||||||
else
|
else
|
||||||
|
@ -1083,7 +1083,7 @@ decl:
|
||||||
$3->Error("not a record type");
|
$3->Error("not a record type");
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
const char* error = add_to->AddFields($7, $9);
|
const char* error = add_to->AddFields($7, $10);
|
||||||
if ( error )
|
if ( error )
|
||||||
$3->Error(error);
|
$3->Error(error);
|
||||||
else if ( generate_documentation )
|
else if ( generate_documentation )
|
||||||
|
|
84
src/util.cc
84
src/util.cc
|
@ -757,9 +757,9 @@ const char* bro_path()
|
||||||
const char* path = getenv("BROPATH");
|
const char* path = getenv("BROPATH");
|
||||||
if ( ! path )
|
if ( ! path )
|
||||||
path = ".:"
|
path = ".:"
|
||||||
POLICYDEST ":"
|
BRO_SCRIPT_INSTALL_PATH ":"
|
||||||
POLICYDEST "/policy" ":"
|
BRO_SCRIPT_INSTALL_PATH "/policy" ":"
|
||||||
POLICYDEST "/site";
|
BRO_SCRIPT_INSTALL_PATH "/site";
|
||||||
|
|
||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
|
@ -891,60 +891,36 @@ const char* normalize_path(const char* path)
|
||||||
return copy_string(new_path.c_str());
|
return copy_string(new_path.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the subpath of BROPATH's policy/ directory in which the loaded
|
// Returns the subpath of the root Bro script install/source directory in
|
||||||
// file in located. If it's not under a subpath of policy/ then the full
|
// which the loaded file is located. If it's not under a subpath of that
|
||||||
// path is returned, else the subpath of policy/ concatentated with any
|
// directory (e.g. cwd or custom path) then the full path is returned.
|
||||||
// directory prefix of the file is returned.
|
void get_script_subpath(const std::string& full_filename, const char** subpath)
|
||||||
void get_policy_subpath(const char* dir, const char* file, const char** subpath)
|
|
||||||
{
|
{
|
||||||
// first figure out if this is a subpath of policy/
|
size_t p;
|
||||||
const char* ploc = strstr(dir, "policy");
|
std::string my_subpath(full_filename);
|
||||||
if ( ploc )
|
|
||||||
if ( ploc[6] == '\0' )
|
|
||||||
*subpath = copy_string(ploc + 6);
|
|
||||||
else if ( ploc[6] == '/' )
|
|
||||||
*subpath = copy_string(ploc + 7);
|
|
||||||
else
|
|
||||||
*subpath = copy_string(dir);
|
|
||||||
else
|
|
||||||
*subpath = copy_string(dir);
|
|
||||||
|
|
||||||
// and now add any directory parts of the filename
|
// get the parent directory of file (if not already a directory)
|
||||||
char full_filename_buf[1024];
|
if ( ! is_dir(full_filename.c_str()) )
|
||||||
safe_snprintf(full_filename_buf, sizeof(full_filename_buf),
|
|
||||||
"%s/%s", dir, file);
|
|
||||||
char* tmp = copy_string(file);
|
|
||||||
const char* fdir = 0;
|
|
||||||
|
|
||||||
if ( is_dir(full_filename_buf) )
|
|
||||||
fdir = file;
|
|
||||||
|
|
||||||
if ( ! fdir )
|
|
||||||
fdir = dirname(tmp);
|
|
||||||
|
|
||||||
if ( ! streq(fdir, ".") )
|
|
||||||
{
|
{
|
||||||
size_t full_subpath_len = strlen(*subpath) + strlen(fdir) + 1;
|
char* tmp = copy_string(full_filename.c_str());
|
||||||
bool needslash = false;
|
my_subpath = dirname(tmp);
|
||||||
if ( strlen(*subpath) != 0 && (*subpath)[strlen(*subpath) - 1] != '/' )
|
delete [] tmp;
|
||||||
{
|
|
||||||
++full_subpath_len;
|
|
||||||
needslash = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
char* full_subpath = new char[full_subpath_len];
|
|
||||||
strcpy(full_subpath, *subpath);
|
|
||||||
if ( needslash )
|
|
||||||
strcat(full_subpath, "/");
|
|
||||||
strcat(full_subpath, fdir);
|
|
||||||
delete [] *subpath;
|
|
||||||
*subpath = full_subpath;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* normalized_subpath = normalize_path(*subpath);
|
// first check if this is some subpath of the installed scripts root path,
|
||||||
delete [] tmp;
|
// if not check if it's a subpath of the script source root path,
|
||||||
delete [] *subpath;
|
// if neither, will just use the given directory
|
||||||
*subpath = normalized_subpath;
|
if ( (p=my_subpath.find(BRO_SCRIPT_INSTALL_PATH)) != std::string::npos )
|
||||||
|
my_subpath.erase(0, strlen(BRO_SCRIPT_INSTALL_PATH));
|
||||||
|
else if ( (p=my_subpath.find(BRO_SCRIPT_SOURCE_PATH)) != std::string::npos )
|
||||||
|
my_subpath.erase(0, strlen(BRO_SCRIPT_SOURCE_PATH));
|
||||||
|
|
||||||
|
// if root path found, remove path separators until next path component
|
||||||
|
if ( p != std::string::npos )
|
||||||
|
while ( my_subpath.size() && my_subpath[0] == '/' )
|
||||||
|
my_subpath.erase(0, 1);
|
||||||
|
|
||||||
|
*subpath = normalize_path(my_subpath.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
extern string current_scanned_file_path;
|
extern string current_scanned_file_path;
|
||||||
|
@ -1001,7 +977,7 @@ FILE* search_for_file(const char* filename, const char* ext,
|
||||||
! is_dir(full_filename_buf) )
|
! is_dir(full_filename_buf) )
|
||||||
{
|
{
|
||||||
if ( bropath_subpath )
|
if ( bropath_subpath )
|
||||||
get_policy_subpath(dir_beginning, filename, bropath_subpath);
|
get_script_subpath(full_filename_buf, bropath_subpath);
|
||||||
return open_file(full_filename_buf, full_filename, load_pkgs);
|
return open_file(full_filename_buf, full_filename, load_pkgs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1010,7 +986,7 @@ FILE* search_for_file(const char* filename, const char* ext,
|
||||||
if ( access(full_filename_buf, R_OK) == 0 )
|
if ( access(full_filename_buf, R_OK) == 0 )
|
||||||
{
|
{
|
||||||
if ( bropath_subpath )
|
if ( bropath_subpath )
|
||||||
get_policy_subpath(dir_beginning, filename, bropath_subpath);
|
get_script_subpath(full_filename_buf, bropath_subpath);
|
||||||
return open_file(full_filename_buf, full_filename, load_pkgs);
|
return open_file(full_filename_buf, full_filename, load_pkgs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -179,7 +179,7 @@ extern const char* bro_path();
|
||||||
extern const char* bro_prefixes();
|
extern const char* bro_prefixes();
|
||||||
std::string dot_canon(std::string path, std::string file, std::string prefix = "");
|
std::string dot_canon(std::string path, std::string file, std::string prefix = "");
|
||||||
const char* normalize_path(const char* path);
|
const char* normalize_path(const char* path);
|
||||||
void get_policy_subpath(const char* dir, const char* file, const char** subpath);
|
void get_script_subpath(const std::string& full_filename, const char** subpath);
|
||||||
extern FILE* search_for_file(const char* filename, const char* ext,
|
extern FILE* search_for_file(const char* filename, const char* ext,
|
||||||
const char** full_filename, bool load_pkgs, const char** bropath_subpath);
|
const char** full_filename, bool load_pkgs, const char** bropath_subpath);
|
||||||
|
|
||||||
|
|
BIN
testing/btest/Baseline/bifs.piped_exec/test.txt
Normal file
BIN
testing/btest/Baseline/bifs.piped_exec/test.txt
Normal file
Binary file not shown.
|
@ -1,4 +1,4 @@
|
||||||
loaded lcl2.site.bro
|
loaded lcl2.base.utils.site.bro
|
||||||
loaded lcl.site.bro
|
loaded lcl.base.utils.site.bro
|
||||||
loaded lcl2.protocols.http.bro
|
loaded lcl2.base.protocols.http.bro
|
||||||
loaded lcl.protocols.http.bro
|
loaded lcl.base.protocols.http.bro
|
||||||
|
|
1
testing/btest/Baseline/core.reporter-fmt-strings/output
Normal file
1
testing/btest/Baseline/core.reporter-fmt-strings/output
Normal file
|
@ -0,0 +1 @@
|
||||||
|
error in /Users/jsiwek/tmp/bro/testing/btest/.tmp/core.reporter-fmt-strings/reporter-fmt-strings.bro, line 9: not an event (dont_interpret_this(%s))
|
|
@ -29,7 +29,7 @@ each of "columns", "event", "filter" depending on exactly what it's doing.
|
||||||
|
|
||||||
:Author: Jon Siwek <jsiwek@ncsa.illinois.edu>
|
:Author: Jon Siwek <jsiwek@ncsa.illinois.edu>
|
||||||
|
|
||||||
:Imports: :doc:`frameworks/notice </policy/frameworks/notice/index>`
|
:Imports: :doc:`policy/frameworks/software/vulnerable </scripts/policy/frameworks/software/vulnerable>`
|
||||||
|
|
||||||
Summary
|
Summary
|
||||||
~~~~~~~
|
~~~~~~~
|
||||||
|
|
0
testing/btest/Baseline/doc.blacklist-reminder/.stderr
Normal file
0
testing/btest/Baseline/doc.blacklist-reminder/.stderr
Normal file
|
@ -1,2 +1,10 @@
|
||||||
[a=21, b=<uninitialized>, c=42, d=<uninitialized>]
|
[a=21, b=<uninitialized>, myset={
|
||||||
[a=21, b=<uninitialized>, c=42, d=XXX]
|
|
||||||
|
}, c=42, d=<uninitialized>, anotherset={
|
||||||
|
|
||||||
|
}]
|
||||||
|
[a=21, b=<uninitialized>, myset={
|
||||||
|
|
||||||
|
}, c=42, d=XXX, anotherset={
|
||||||
|
|
||||||
|
}]
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
element 0 = [s=bar, o=check]
|
||||||
|
element 1 = [s=baz, o=<uninitialized>]
|
||||||
|
[[s=bar, o=check], [s=baz, o=<uninitialized>]]
|
|
@ -0,0 +1,4 @@
|
||||||
|
# ts metric_id filter_name index.host index.str index.network value
|
||||||
|
1313429477.091485 TEST_METRIC foo-bar 6.5.4.3 - - 4
|
||||||
|
1313429477.091485 TEST_METRIC foo-bar 1.2.3.4 - - 6
|
||||||
|
1313429477.091485 TEST_METRIC foo-bar 7.2.1.5 - - 2
|
|
@ -0,0 +1,4 @@
|
||||||
|
# ts metric_id filter_name index.host index.str index.network value
|
||||||
|
1313430544.678529 TEST_METRIC foo-bar 6.5.4.3 - - 2
|
||||||
|
1313430544.678529 TEST_METRIC foo-bar 1.2.3.4 - - 3
|
||||||
|
1313430544.678529 TEST_METRIC foo-bar 7.2.1.5 - - 1
|
|
@ -0,0 +1,4 @@
|
||||||
|
# ts uid id.orig_h id.orig_p id.resp_h id.resp_p note msg sub src dst p n peer_descr actions policy_items dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network
|
||||||
|
1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 6.5.4.3 2/1 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 4 - - - - - - 6.5.4.3 - -
|
||||||
|
1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 1.2.3.4 3/1 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 4 - - - - - - 1.2.3.4 - -
|
||||||
|
1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 7.2.1.5 1/1 - 7.2.1.5 - - 1 bro Notice::ACTION_LOG 4 - - - - - - 7.2.1.5 - -
|
|
@ -1,3 +1,3 @@
|
||||||
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
1299718503.40319 1.2.3.4 1234 2.3.4.5 80 success unknown
|
1313212563.234939 1.2.3.4 1234 2.3.4.5 80 success unknown
|
||||||
1299718503.40319 1.2.3.4 1234 2.3.4.5 80 failure US
|
1313212563.234939 1.2.3.4 1234 2.3.4.5 80 failure US
|
||||||
|
|
|
@ -1,13 +1,21 @@
|
||||||
static-prefix-0.log
|
static-prefix-0-BR.log
|
||||||
static-prefix-1.log
|
static-prefix-0-MX3.log
|
||||||
static-prefix-2.log
|
static-prefix-0-unknown.log
|
||||||
|
static-prefix-1-MX.log
|
||||||
|
static-prefix-1-US.log
|
||||||
|
static-prefix-2-MX2.log
|
||||||
|
static-prefix-2-UK.log
|
||||||
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
1299718503.05867 1.2.3.4 1234 2.3.4.5 80 success unknown
|
1313212701.542245 1.2.3.4 1234 2.3.4.5 80 success BR
|
||||||
1299718503.05867 1.2.3.4 1234 2.3.4.5 80 success BR
|
|
||||||
1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure MX3
|
|
||||||
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure US
|
1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure MX3
|
||||||
1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure MX
|
|
||||||
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure UK
|
1313212701.542245 1.2.3.4 1234 2.3.4.5 80 success unknown
|
||||||
1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure MX2
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
|
1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure MX
|
||||||
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
|
1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure US
|
||||||
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
|
1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure MX2
|
||||||
|
# t id.orig_h id.orig_p id.resp_h id.resp_p status country
|
||||||
|
1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure UK
|
||||||
|
|
Binary file not shown.
|
@ -27,6 +27,8 @@
|
||||||
2 scripts/base/frameworks/notice/./actions/drop.bro
|
2 scripts/base/frameworks/notice/./actions/drop.bro
|
||||||
2 scripts/base/frameworks/notice/./actions/email_admin.bro
|
2 scripts/base/frameworks/notice/./actions/email_admin.bro
|
||||||
2 scripts/base/frameworks/notice/./actions/page.bro
|
2 scripts/base/frameworks/notice/./actions/page.bro
|
||||||
|
2 scripts/base/frameworks/notice/./actions/add-geodata.bro
|
||||||
|
2 scripts/base/frameworks/notice/./extend-email/hostnames.bro
|
||||||
1 scripts/base/frameworks/dpd/__load__.bro
|
1 scripts/base/frameworks/dpd/__load__.bro
|
||||||
2 scripts/base/frameworks/dpd/./main.bro
|
2 scripts/base/frameworks/dpd/./main.bro
|
||||||
1 scripts/base/frameworks/signatures/__load__.bro
|
1 scripts/base/frameworks/signatures/__load__.bro
|
||||||
|
|
|
@ -1,6 +1,12 @@
|
||||||
# @TEST-EXEC: bro %INPUT >output
|
# @TEST-EXEC: bro %INPUT >output
|
||||||
# @TEST-EXEC: btest-diff output
|
# @TEST-EXEC: btest-diff output
|
||||||
|
# @TEST-EXEC: btest-diff test.txt
|
||||||
|
|
||||||
|
|
||||||
global cmds = "print \"hello world\";";
|
global cmds = "print \"hello world\";";
|
||||||
cmds = string_cat(cmds, "\nprint \"foobar\";");
|
cmds = string_cat(cmds, "\nprint \"foobar\";");
|
||||||
piped_exec("bro", cmds);
|
piped_exec("bro", cmds);
|
||||||
|
|
||||||
|
# Test null output.
|
||||||
|
piped_exec("cat > test.txt", "\x00\x00hello\x00\x00");
|
||||||
|
|
||||||
|
|
|
@ -9,11 +9,11 @@
|
||||||
@TEST-END-FILE
|
@TEST-END-FILE
|
||||||
|
|
||||||
@TEST-START-FILE lcl.base.utils.site.bro
|
@TEST-START-FILE lcl.base.utils.site.bro
|
||||||
print "loaded lcl.base.site.bro";
|
print "loaded lcl.base.utils.site.bro";
|
||||||
@TEST-END-FILE
|
@TEST-END-FILE
|
||||||
|
|
||||||
@TEST-START-FILE lcl2.base.utils.site.bro
|
@TEST-START-FILE lcl2.base.utils.site.bro
|
||||||
print "loaded lcl2.base.site.bro";
|
print "loaded lcl2.base.utils.site.bro";
|
||||||
@TEST-END-FILE
|
@TEST-END-FILE
|
||||||
|
|
||||||
@TEST-START-FILE lcl.base.protocols.http.bro
|
@TEST-START-FILE lcl.base.protocols.http.bro
|
||||||
|
|
10
testing/btest/core/reporter-fmt-strings.bro
Normal file
10
testing/btest/core/reporter-fmt-strings.bro
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
# The format string below should end up as a literal part of the reporter's
|
||||||
|
# error message to stderr and shouldn't be replaced internally.
|
||||||
|
#
|
||||||
|
# @TEST-EXEC-FAIL: bro %INPUT >output 2>&1
|
||||||
|
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output
|
||||||
|
|
||||||
|
event bro_init()
|
||||||
|
{
|
||||||
|
event dont_interpret_this("%s");
|
||||||
|
}
|
8
testing/btest/doc/blacklist-reminder.test
Normal file
8
testing/btest/doc/blacklist-reminder.test
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
# This test will fail if there are Bro scripts that have been temporarily
|
||||||
|
# blacklisted from the documentation generation process for some reason
|
||||||
|
# (e.g. they're a work-in-progress or otherwise fail to parse). It's meant
|
||||||
|
# to serve as a reminder that some future action may be needed to generate
|
||||||
|
# documentation for the blacklisted scripts.
|
||||||
|
#
|
||||||
|
# @TEST-EXEC: $DIST/doc/scripts/genDocSourcesList.sh
|
||||||
|
# @TEST-EXEC: btest-diff .stderr
|
|
@ -4,11 +4,13 @@
|
||||||
type Foo: record {
|
type Foo: record {
|
||||||
a: count;
|
a: count;
|
||||||
b: count &optional;
|
b: count &optional;
|
||||||
|
myset: set[count] &default=set();
|
||||||
};
|
};
|
||||||
|
|
||||||
redef record Foo += {
|
redef record Foo += {
|
||||||
c: count &default=42;
|
c: count &default=42;
|
||||||
d: count &optional;
|
d: count &optional;
|
||||||
|
anotherset: set[count] &default=set();
|
||||||
};
|
};
|
||||||
|
|
||||||
global f1: Foo = [$a=21];
|
global f1: Foo = [$a=21];
|
||||||
|
|
20
testing/btest/language/vector-list-init-records.bro
Normal file
20
testing/btest/language/vector-list-init-records.bro
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
# Initializing a vector with a list of records should promote elements as
|
||||||
|
# necessary to match the vector's yield type.
|
||||||
|
|
||||||
|
# @TEST-EXEC: bro %INPUT >output
|
||||||
|
# @TEST-EXEC: btest-diff output
|
||||||
|
|
||||||
|
type Foo: record {
|
||||||
|
s: string;
|
||||||
|
o: string &optional;
|
||||||
|
};
|
||||||
|
|
||||||
|
const v: vector of Foo = {
|
||||||
|
[$s="bar", $o="check"],
|
||||||
|
[$s="baz"]
|
||||||
|
};
|
||||||
|
|
||||||
|
for ( i in v )
|
||||||
|
print fmt("element %d = %s", i, v[i]);
|
||||||
|
|
||||||
|
print v;
|
38
testing/btest/policy/frameworks/metrics/basic-cluster.bro
Normal file
38
testing/btest/policy/frameworks/metrics/basic-cluster.bro
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT
|
||||||
|
# @TEST-EXEC: sleep 1
|
||||||
|
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-bg-wait -k 6
|
||||||
|
# @TEST-EXEC: btest-diff manager-1/metrics.log
|
||||||
|
|
||||||
|
@TEST-START-FILE cluster-layout.bro
|
||||||
|
redef Cluster::nodes = {
|
||||||
|
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")],
|
||||||
|
["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1")],
|
||||||
|
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"],
|
||||||
|
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"],
|
||||||
|
};
|
||||||
|
@TEST-END-FILE
|
||||||
|
|
||||||
|
redef enum Metrics::ID += {
|
||||||
|
TEST_METRIC,
|
||||||
|
};
|
||||||
|
|
||||||
|
event bro_init() &priority=5
|
||||||
|
{
|
||||||
|
Metrics::add_filter(TEST_METRIC,
|
||||||
|
[$name="foo-bar",
|
||||||
|
$break_interval=3secs]);
|
||||||
|
}
|
||||||
|
|
||||||
|
@if ( Cluster::local_node_type() == Cluster::WORKER )
|
||||||
|
|
||||||
|
event bro_init()
|
||||||
|
{
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@endif
|
16
testing/btest/policy/frameworks/metrics/basic.bro
Normal file
16
testing/btest/policy/frameworks/metrics/basic.bro
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
# @TEST-EXEC: bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-diff metrics.log
|
||||||
|
|
||||||
|
redef enum Metrics::ID += {
|
||||||
|
TEST_METRIC,
|
||||||
|
};
|
||||||
|
|
||||||
|
event bro_init() &priority=5
|
||||||
|
{
|
||||||
|
Metrics::add_filter(TEST_METRIC,
|
||||||
|
[$name="foo-bar",
|
||||||
|
$break_interval=3secs]);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1);
|
||||||
|
}
|
23
testing/btest/policy/frameworks/metrics/notice.bro
Normal file
23
testing/btest/policy/frameworks/metrics/notice.bro
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
# @TEST-EXEC: bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-diff notice.log
|
||||||
|
|
||||||
|
redef enum Notice::Type += {
|
||||||
|
Test_Notice,
|
||||||
|
};
|
||||||
|
|
||||||
|
redef enum Metrics::ID += {
|
||||||
|
TEST_METRIC,
|
||||||
|
};
|
||||||
|
|
||||||
|
event bro_init() &priority=5
|
||||||
|
{
|
||||||
|
Metrics::add_filter(TEST_METRIC,
|
||||||
|
[$name="foo-bar",
|
||||||
|
$break_interval=3secs,
|
||||||
|
$note=Test_Notice,
|
||||||
|
$notice_threshold=1,
|
||||||
|
$log=F]);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);
|
||||||
|
Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1);
|
||||||
|
}
|
|
@ -21,11 +21,11 @@ export {
|
||||||
|
|
||||||
global c = -1;
|
global c = -1;
|
||||||
|
|
||||||
function path_func(id: Log::ID, path: string) : string
|
function path_func(id: Log::ID, path: string, rec: Log) : string
|
||||||
{
|
{
|
||||||
c = (c + 1) % 3;
|
c = (c + 1) % 3;
|
||||||
|
|
||||||
return fmt("%s-%d", path, c);
|
return fmt("%s-%d-%s", path, c, rec$country);
|
||||||
}
|
}
|
||||||
|
|
||||||
event bro_init()
|
event bro_init()
|
||||||
|
|
|
@ -29,9 +29,18 @@ export {
|
||||||
se: set[string];
|
se: set[string];
|
||||||
vc: vector of count;
|
vc: vector of count;
|
||||||
ve: vector of string;
|
ve: vector of string;
|
||||||
|
f: function(i: count) : string;
|
||||||
} &log;
|
} &log;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function foo(i : count) : string
|
||||||
|
{
|
||||||
|
if ( i > 0 )
|
||||||
|
return "Foo";
|
||||||
|
else
|
||||||
|
return "Bar";
|
||||||
|
}
|
||||||
|
|
||||||
event bro_init()
|
event bro_init()
|
||||||
{
|
{
|
||||||
Log::create_stream(SSH, [$columns=Log]);
|
Log::create_stream(SSH, [$columns=Log]);
|
||||||
|
@ -56,7 +65,8 @@ event bro_init()
|
||||||
$ss=set("AA", "BB", "CC"),
|
$ss=set("AA", "BB", "CC"),
|
||||||
$se=empty_set,
|
$se=empty_set,
|
||||||
$vc=vector(10, 20, 30),
|
$vc=vector(10, 20, 30),
|
||||||
$ve=empty_vector
|
$ve=empty_vector,
|
||||||
|
$f=foo
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue