mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 14:48:21 +00:00
Merge remote-tracking branch 'origin/master' into topic/johanna/tls-more-data
This commit is contained in:
commit
b1dbd757a6
1468 changed files with 41493 additions and 19065 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,2 +1,3 @@
|
|||
build
|
||||
tmp
|
||||
*.gcov
|
||||
|
|
6
.gitmodules
vendored
6
.gitmodules
vendored
|
@ -19,12 +19,12 @@
|
|||
[submodule "src/3rdparty"]
|
||||
path = src/3rdparty
|
||||
url = git://git.bro.org/bro-3rdparty
|
||||
[submodule "aux/plugins"]
|
||||
path = aux/plugins
|
||||
url = git://git.bro.org/bro-plugins
|
||||
[submodule "aux/broker"]
|
||||
path = aux/broker
|
||||
url = git://git.bro.org/broker
|
||||
[submodule "aux/netcontrol-connectors"]
|
||||
path = aux/netcontrol-connectors
|
||||
url = git://git.bro.org/bro-netcontrol
|
||||
[submodule "aux/bifcl"]
|
||||
path = aux/bifcl
|
||||
url = git://git.bro.org/bifcl
|
||||
|
|
34
.travis.yml
Normal file
34
.travis.yml
Normal file
|
@ -0,0 +1,34 @@
|
|||
language: cpp
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libpcap-dev
|
||||
- libssl-dev
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
notifications:
|
||||
email:
|
||||
recipients:
|
||||
- bro-commits-internal@bro.org
|
||||
|
||||
# Build Bro and run tests in the following Linux distros (specifying "travis"
|
||||
# builds bro in Travis without using docker).
|
||||
env:
|
||||
- distro: centos_7
|
||||
- distro: debian_9
|
||||
- distro: fedora_28
|
||||
- distro: ubuntu_16.04
|
||||
- distro: ubuntu_18.04
|
||||
|
||||
install: sh testing/scripts/travis-job install $distro
|
||||
|
||||
before_script: sh testing/scripts/travis-job build $distro
|
||||
|
||||
script: sh testing/scripts/travis-job run $distro
|
115
CMakeLists.txt
115
CMakeLists.txt
|
@ -2,13 +2,25 @@ project(Bro C CXX)
|
|||
|
||||
# When changing the minimum version here, also adapt
|
||||
# aux/bro-aux/plugin-support/skeleton/CMakeLists.txt
|
||||
cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
|
||||
cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR)
|
||||
|
||||
include(cmake/CommonCMakeConfig.cmake)
|
||||
|
||||
########################################################################
|
||||
## Project/Build Configuration
|
||||
|
||||
if ( ENABLE_CCACHE )
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
|
||||
if ( NOT CCACHE_PROGRAM )
|
||||
message(FATAL_ERROR "ccache not found")
|
||||
endif ()
|
||||
|
||||
message(STATUS "Using ccache: ${CCACHE_PROGRAM}")
|
||||
set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE_PROGRAM})
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_PROGRAM})
|
||||
endif ()
|
||||
|
||||
set(BRO_ROOT_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
if (NOT BRO_SCRIPT_INSTALL_PATH)
|
||||
# set the default Bro script installation path (user did not specify one)
|
||||
|
@ -40,12 +52,26 @@ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh
|
|||
"setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n")
|
||||
|
||||
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1)
|
||||
execute_process(COMMAND grep "^#define *BRO_PLUGIN_API_VERSION"
|
||||
INPUT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/src/plugin/Plugin.h
|
||||
OUTPUT_VARIABLE API_VERSION
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(REGEX REPLACE "^#define.*VERSION *" "" API_VERSION "${API_VERSION}")
|
||||
|
||||
string(REPLACE "." " " version_numbers ${VERSION})
|
||||
separate_arguments(version_numbers)
|
||||
list(GET version_numbers 0 VERSION_MAJOR)
|
||||
list(GET version_numbers 1 VERSION_MINOR)
|
||||
set(VERSION_MAJ_MIN "${VERSION_MAJOR}.${VERSION_MINOR}")
|
||||
|
||||
set(VERSION_C_IDENT "${VERSION}_plugin_${API_VERSION}")
|
||||
string(REGEX REPLACE "-[0-9]*$" "_git" VERSION_C_IDENT "${VERSION_C_IDENT}")
|
||||
string(REGEX REPLACE "[^a-zA-Z0-9_\$]" "_" VERSION_C_IDENT "${VERSION_C_IDENT}")
|
||||
|
||||
if(${ENABLE_DEBUG})
|
||||
set(VERSION_C_IDENT "${VERSION_C_IDENT}_debug")
|
||||
endif()
|
||||
|
||||
########################################################################
|
||||
## Dependency Configuration
|
||||
|
||||
|
@ -69,16 +95,34 @@ FindRequiredPackage(OpenSSL)
|
|||
FindRequiredPackage(BIND)
|
||||
FindRequiredPackage(ZLIB)
|
||||
|
||||
if (NOT BinPAC_ROOT_DIR AND
|
||||
find_package(CAF COMPONENTS core io openssl)
|
||||
if (CAF_FOUND)
|
||||
# e.g. if not using embedded CAF, then need to know where to look
|
||||
# for CAF headers since that may differ from where Broker headers
|
||||
# are found (and including a Broker header may pull in CAF headers).
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIRS})
|
||||
endif ()
|
||||
|
||||
if (NOT BINPAC_EXE_PATH AND
|
||||
EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/aux/binpac/CMakeLists.txt)
|
||||
add_subdirectory(aux/binpac)
|
||||
endif ()
|
||||
FindRequiredPackage(BinPAC)
|
||||
|
||||
if ( NOT BIFCL_EXE_PATH )
|
||||
add_subdirectory(aux/bifcl)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_JEMALLOC)
|
||||
find_package(JeMalloc)
|
||||
endif ()
|
||||
|
||||
if ( BISON_VERSION AND BISON_VERSION VERSION_LESS 2.5 )
|
||||
set(MISSING_PREREQS true)
|
||||
list(APPEND MISSING_PREREQ_DESCS
|
||||
" Could not find prerequisite package Bison >= 2.5, found: ${BISON_VERSION}")
|
||||
endif ()
|
||||
|
||||
if (MISSING_PREREQS)
|
||||
foreach (prereq ${MISSING_PREREQ_DESCS})
|
||||
message(SEND_ERROR ${prereq})
|
||||
|
@ -88,7 +132,6 @@ endif ()
|
|||
|
||||
include_directories(BEFORE
|
||||
${PCAP_INCLUDE_DIR}
|
||||
${OPENSSL_INCLUDE_DIR}
|
||||
${BIND_INCLUDE_DIR}
|
||||
${BinPAC_INCLUDE_DIR}
|
||||
${ZLIB_INCLUDE_DIR}
|
||||
|
@ -98,11 +141,20 @@ include_directories(BEFORE
|
|||
# Optional Dependencies
|
||||
|
||||
set(USE_GEOIP false)
|
||||
find_package(LibGeoIP)
|
||||
if (LIBGEOIP_FOUND)
|
||||
find_package(LibMMDB)
|
||||
if (LibMMDB_FOUND)
|
||||
set(USE_GEOIP true)
|
||||
include_directories(BEFORE ${LibGeoIP_INCLUDE_DIR})
|
||||
list(APPEND OPTLIBS ${LibGeoIP_LIBRARY})
|
||||
include_directories(BEFORE ${LibMMDB_INCLUDE_DIR})
|
||||
list(APPEND OPTLIBS ${LibMMDB_LIBRARY})
|
||||
endif ()
|
||||
|
||||
set(USE_KRB5 false)
|
||||
if ( ${CMAKE_SYSTEM_NAME} MATCHES Linux )
|
||||
find_package(LibKrb5)
|
||||
if (LibKrb5_FOUND)
|
||||
set(USE_KRB5 true)
|
||||
list(APPEND OPTLIBS ${LibKrb5_LIBRARY})
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
set(HAVE_PERFTOOLS false)
|
||||
|
@ -138,6 +190,12 @@ if (ENABLE_PERFTOOLS_DEBUG OR ENABLE_PERFTOOLS)
|
|||
# perftools weren't found
|
||||
endif ()
|
||||
|
||||
# Making sure any non-standard OpenSSL includes get searched earlier
|
||||
# than other dependencies which tend to be in standard system locations
|
||||
# and thus cause the system OpenSSL headers to still be picked up even
|
||||
# if one specifies --with-openssl (which may be common).
|
||||
include_directories(BEFORE ${OPENSSL_INCLUDE_DIR})
|
||||
|
||||
set(brodeps
|
||||
${BinPAC_LIBRARY}
|
||||
${PCAP_LIBRARY}
|
||||
|
@ -167,6 +225,10 @@ include(CheckNameserCompat)
|
|||
include(GetArchitecture)
|
||||
include(RequireCXX11)
|
||||
|
||||
if ( (OPENSSL_VERSION VERSION_EQUAL "1.1.0") OR (OPENSSL_VERSION VERSION_GREATER "1.1.0") )
|
||||
set(BRO_HAVE_OPENSSL_1_1 true CACHE INTERNAL "" FORCE)
|
||||
endif()
|
||||
|
||||
# Tell the plugin code that we're building as part of the main tree.
|
||||
set(BRO_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)
|
||||
|
||||
|
@ -178,22 +240,40 @@ endif ()
|
|||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.h.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/bro-config.h)
|
||||
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/bro-config.h DESTINATION include/bro)
|
||||
|
||||
if ( CAF_ROOT_DIR )
|
||||
set(BRO_CONFIG_CAF_ROOT_DIR ${CAF_ROOT_DIR})
|
||||
else ()
|
||||
set(BRO_CONFIG_CAF_ROOT_DIR ${BRO_ROOT_DIR})
|
||||
endif ()
|
||||
|
||||
if ( BinPAC_ROOT_DIR )
|
||||
set(BRO_CONFIG_BINPAC_ROOT_DIR ${BinPAC_ROOT_DIR})
|
||||
else ()
|
||||
set(BRO_CONFIG_BINPAC_ROOT_DIR ${BRO_ROOT_DIR})
|
||||
endif ()
|
||||
|
||||
set(BRO_CONFIG_BROKER_ROOT_DIR ${BRO_ROOT_DIR})
|
||||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/bro-config @ONLY)
|
||||
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/bro-config DESTINATION bin)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cmake DESTINATION share/bro
|
||||
USE_SOURCE_PERMISSIONS)
|
||||
|
||||
########################################################################
|
||||
## Recurse on sub-directories
|
||||
|
||||
if ( ENABLE_BROKER )
|
||||
add_subdirectory(aux/broker)
|
||||
set(brodeps ${brodeps} broker)
|
||||
add_definitions(-DENABLE_BROKER)
|
||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker)
|
||||
endif ()
|
||||
add_subdirectory(aux/broker)
|
||||
set(brodeps ${brodeps} broker)
|
||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker
|
||||
${CMAKE_CURRENT_BINARY_DIR}/aux/broker)
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIR_CORE})
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIR_IO})
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIR_OPENSSL})
|
||||
|
||||
add_subdirectory(src)
|
||||
add_subdirectory(scripts)
|
||||
|
@ -232,6 +312,8 @@ endif ()
|
|||
message(
|
||||
"\n====================| Bro Build Summary |====================="
|
||||
"\n"
|
||||
"\nBuild type: ${CMAKE_BUILD_TYPE}"
|
||||
"\nBuild dir: ${CMAKE_BINARY_DIR}"
|
||||
"\nInstall prefix: ${CMAKE_INSTALL_PREFIX}"
|
||||
"\nBro Script Path: ${BRO_SCRIPT_INSTALL_PATH}"
|
||||
"\nDebug mode: ${ENABLE_DEBUG}"
|
||||
|
@ -242,13 +324,12 @@ message(
|
|||
"\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}"
|
||||
"\nCPP: ${CMAKE_CXX_COMPILER}"
|
||||
"\n"
|
||||
"\nBroker: ${ENABLE_BROKER}"
|
||||
"\nBroker Python: ${BROKER_PYTHON_BINDINGS}"
|
||||
"\nBroccoli: ${INSTALL_BROCCOLI}"
|
||||
"\nBroctl: ${INSTALL_BROCTL}"
|
||||
"\nAux. Tools: ${INSTALL_AUX_TOOLS}"
|
||||
"\n"
|
||||
"\nGeoIP: ${USE_GEOIP}"
|
||||
"\nlibmaxminddb: ${USE_GEOIP}"
|
||||
"\nKerberos: ${USE_KRB5}"
|
||||
"\ngperftools found: ${HAVE_PERFTOOLS}"
|
||||
"\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
|
||||
"\n debugging: ${USE_PERFTOOLS_DEBUG}"
|
||||
|
|
495
NEWS
495
NEWS
|
@ -4,6 +4,501 @@ release. For an exhaustive list of changes, see the ``CHANGES`` file
|
|||
(note that submodules, such as BroControl and Broccoli, come with
|
||||
their own ``CHANGES``.)
|
||||
|
||||
|
||||
Bro 2.6 (in progress)
|
||||
=====================
|
||||
|
||||
New Functionality
|
||||
-----------------
|
||||
|
||||
- Bro has switched to using the new Broker library for all its
|
||||
communication. Broker's API has been completely redesigned (compared
|
||||
to the version in 2.5), and much of its implementation has been
|
||||
redone. There's a new script-level "broker" framework that
|
||||
supersedes the old "communication" framework, which is now
|
||||
deprecated. The "cluster" and "control" frameworks have been ported
|
||||
to Broker; same for BroControl. For more about the new Broker
|
||||
framework, see doc/frameworks/broker.rst (there's also a guide there
|
||||
for porting existing Bro scripts to Broker). For more about Broker
|
||||
itself, including its API for external applications, see
|
||||
aux/broker/doc.
|
||||
|
||||
TODO: Replace documentation paths with URLs once these are available
|
||||
online.
|
||||
|
||||
When using BroControl, the meaning of proxies has changed with
|
||||
Broker. If you are upgrading and have configured more than one proxy
|
||||
currenty, we recommend going back down to a single proxy node now.
|
||||
Unless you are using custom scripts doing significant data
|
||||
distribution themselves through the new cluster framework, that
|
||||
should be fine.
|
||||
|
||||
- Bro now has new "is" and "as" script operators for dynamic
|
||||
type-checking and casting.
|
||||
|
||||
- "v as T" casts a value v into a value of type T, assuming that's
|
||||
possible (if not, it triggers a runtime error).
|
||||
|
||||
- "v is T" returns a boolean indicating whether value v can be
|
||||
casted into type T (i.e., if true then "v as T" will succeed).
|
||||
|
||||
This casting supports three cases currently: (1) a value of
|
||||
declared type "any" can be casted to its actual underlying type;
|
||||
(2) Broker values can be casted to their corresponding script
|
||||
types; and (3) all values can be casted to their declared types
|
||||
(i.e., a no-op).
|
||||
|
||||
Example for "any"::
|
||||
|
||||
# cat a.bro
|
||||
function check(a: any)
|
||||
{
|
||||
local s: string = "default";
|
||||
|
||||
if ( a is string )
|
||||
s = (a as string);
|
||||
|
||||
print fmt("s=%s", s);
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
check("Foo");
|
||||
check(1);
|
||||
}
|
||||
|
||||
# bro a.bro
|
||||
s=Foo
|
||||
s=default
|
||||
|
||||
- The existing "switch" got extended to now also support switching by
|
||||
type rather than value. The new syntax supports two type-based versions
|
||||
of "case":
|
||||
|
||||
- "case type T: ...": Take branch if operand can be casted to type T.
|
||||
|
||||
- "case type T as x: ... ": Take branch if operand can be casted
|
||||
to type T, and make the casted value available through ID "x".
|
||||
|
||||
Multiple types can be listed per branch, separated by commas.
|
||||
However, one cannot mix cases with expressions and types inside a
|
||||
single switch statement.
|
||||
|
||||
Example::
|
||||
|
||||
function switch_one(v: any)
|
||||
{
|
||||
switch (v) {
|
||||
case type string:
|
||||
print "It's a string!";
|
||||
break;
|
||||
|
||||
case type count as c:
|
||||
print "It's a count!", c;
|
||||
break;
|
||||
|
||||
case type bool, type addr:
|
||||
print "It's a bool or address!";
|
||||
break;
|
||||
|
||||
default:
|
||||
print "Something else!";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
- Bro now comes with a new "configuration framework" that allows
|
||||
updating script options dynamically at runtime. This functionality
|
||||
consists of three larger pieces working together:
|
||||
|
||||
- Option variables: The new "option" keyword allows variables to be
|
||||
declared as runtime options. Such variables cannot be changed
|
||||
using normal assignments. Instead, they can be changed using the
|
||||
new function Option::set.
|
||||
|
||||
It is possible to "subscribe" to an option through
|
||||
Option::set_change_handler, which will trigger a handler callback
|
||||
when an option changes. Change handlers can optionally modify
|
||||
values before they are applied by returning the desired value, or
|
||||
reject updates by returning the old value. Priorities can be
|
||||
specified if there are several handlers for one option.
|
||||
|
||||
Example script::
|
||||
|
||||
option testbool: bool = T;
|
||||
|
||||
function option_changed(ID: string, new_value: bool): bool
|
||||
{
|
||||
print fmt("Value of %s changed from %s to %s", ID, testbool, new_value);
|
||||
return new_value;
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
print "Old value", testbool;
|
||||
Option::set_change_handler("testbool", option_changed);
|
||||
Option::set("testbool", F);
|
||||
print "New value", testbool;
|
||||
}
|
||||
|
||||
- Script-level configuration framework: The new script framework
|
||||
base/framework/config facilitates reading in new option values
|
||||
from external files at runtime. The format for these files looks
|
||||
like this:
|
||||
|
||||
[option name][tab/spaces][new variable value]
|
||||
|
||||
Configuration files to read can be specified by adding them to
|
||||
Config::config_files.
|
||||
|
||||
Usage example::
|
||||
|
||||
redef Config::config_files += { "/path/to/config.dat" };
|
||||
|
||||
module TestConfig;
|
||||
|
||||
export {
|
||||
option testbool: bool = F;
|
||||
}
|
||||
|
||||
The specified file will now be monitored continuously for changes, so
|
||||
that writing "testbool T" into /path/to/config.dat will
|
||||
automatically update the option's value accordingly.
|
||||
|
||||
The configuration framework creates a config.log that shows all
|
||||
value changes that took place.
|
||||
|
||||
- Config reader: Internally, the configuration framework uses a new
|
||||
type of input reader to read such configuration files into Bro.
|
||||
The reader uses the option name to look up the type that variable
|
||||
has, converts the read value to the correct type, and then updates
|
||||
the option's value. Example script use::
|
||||
|
||||
type Idx: record {
|
||||
option_name: string;
|
||||
};
|
||||
|
||||
type Val: record {
|
||||
option_val: string;
|
||||
};
|
||||
|
||||
global currconfig: table[string] of string = table();
|
||||
|
||||
event InputConfig::new_value(name: string, source: string, id: string, value: any)
|
||||
{
|
||||
print id, value;
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Input::add_table([$reader=Input::READER_CONFIG, $source="../configfile", $name="configuration", $idx=Idx, $val=Val, $destination=currconfig, $want_record=F]);
|
||||
}
|
||||
|
||||
- Support for OCSP and Signed Certificate Timestamp. This adds the
|
||||
following events and BIFs:
|
||||
|
||||
- Events: ocsp_request, ocsp_request_certificate,
|
||||
ocsp_response_status, ocsp_response_bytes
|
||||
ocsp_response_certificate ocsp_extension
|
||||
x509_ocsp_ext_signed_certificate_timestamp
|
||||
ssl_extension_signed_certificate_timestamp
|
||||
|
||||
- Functions: sct_verify, x509_subject_name_hash,
|
||||
x509_issuer_name_hash x509_spki_hash
|
||||
|
||||
- The SSL scripts provide a new hook "ssl_finishing(c: connection)"
|
||||
to trigger actions after the handshake has concluded.
|
||||
|
||||
- New functionality has been added to the TLS parser, adding several
|
||||
events. These events mostly extract information from the server and client
|
||||
key exchange messages. The new events are:
|
||||
|
||||
ssl_ecdh_server_params, ssl_dh_server_params, ssl_server_signature,
|
||||
ssl_ecdh_client_params, ssl_dh_client_params, ssl_rsa_client_pms
|
||||
|
||||
Since ssl_ecdh_server_params contains more information than the old
|
||||
ssl_server_curve event, ssl_server_curve is now marked as deprecated.
|
||||
|
||||
- Functions for retrieving files by their ID have been added:
|
||||
|
||||
Files::file_exists, Files::lookup_File
|
||||
|
||||
- New functions in the logging API: Log::get_filter_names, Log::enable_stream
|
||||
|
||||
- HTTP now recognizes and skips upgraded/websocket connections. A new event,
|
||||
http_connection_upgrade, is raised in such cases.
|
||||
|
||||
- Added a MOUNT3 protocol parser
|
||||
|
||||
- This is not enabled by default (no ports are registered and no
|
||||
DPD signatures exist, so no connections will end up attaching the
|
||||
new Mount analyzer). If it were to be activated by users, the
|
||||
following events are available: mount_proc_null, mount_proc_mnt,
|
||||
mount_proc_umnt, mount_proc_umnt_all, mount_proc_not_implemented,
|
||||
mount_reply_status.
|
||||
|
||||
- Added new NFS events: nfs_proc_symlink, nfs_proc_link, nfs_proc_sattr
|
||||
|
||||
- The SMB scripts in policy/protocols/smb are now moved into base/protocols/smb
|
||||
and loaded/enabled by default.
|
||||
|
||||
- Added new SMB events: smb1_transaction_secondary_request,
|
||||
smb1_transaction2_secondary_request, smb1_transaction_response
|
||||
|
||||
- Bro can now decrypt Kerberos tickets, and retrieve the authentication from
|
||||
them, given a suitable keytab file.
|
||||
|
||||
- Added support for bitwise operations on "count" values. '&', '|' and
|
||||
'^' are binary "and", "or" and "xor" operators, and '~' is a unary
|
||||
ones-complement operator.
|
||||
|
||||
- The '&' and '|' operators can apply to patterns, too. p1 & p2 yields
|
||||
a pattern that represents matching p1 followed by p2, and p1 | p2 yields
|
||||
a pattern representing matching p1 or p2. The p1 | p2 functionality was
|
||||
semi-present in previous versions of Bro, but required constants as
|
||||
its operands; now you can use any pattern-valued expressions.
|
||||
|
||||
- You can now specify that a pattern matches in a case-insensitive
|
||||
fashion by adding 'i' to the end of its specification. So for example
|
||||
/fOO/i == "Foo" yields T, as does /fOO/i in "xFoObar". Characters
|
||||
enclosed in quotes however keep their casing, so /"fOO"/i in "xFoObar"
|
||||
yields F, though it yields T for "xfOObar".
|
||||
|
||||
You can achieve the same functionality for a subpattern enclosed in
|
||||
parentheses by adding "?i:" to the open parenthesis. So for example
|
||||
"/foo|(?i:bar)/" will match "BaR", but not "FoO".
|
||||
|
||||
For both ways of specifying case-insensitivity, characters enclosed in
|
||||
double quotes maintain their case-sensitivity. So for example /"foo"/i
|
||||
will not match "Foo", but it will match "foo".
|
||||
|
||||
- "make install" now installs Bro's include headers (and more) into
|
||||
--prefix so that compiling plugins does no longer need access to a
|
||||
source/build tree. For OS distributions, this also facilitates
|
||||
creating "bro-devel" packages providing all files necessary to build
|
||||
plugins.
|
||||
|
||||
- Bro now supports PPPoE over QinQ.
|
||||
|
||||
- Bro now supports OpenSSL 1.1.
|
||||
|
||||
- The new connection/conn.log history character 'W' indicates that
|
||||
the originator ('w' = responder) advertised a TCP zero window
|
||||
(instructing the peer to not send any data until receiving a
|
||||
non-zero window).
|
||||
|
||||
- The connection/conn.log history characters 'C' (checksum error seen),
|
||||
'T' (retransmission seen), and 'W' (zero window advertised) are now
|
||||
repeated in a logarithmic fashion upon seeing multiple instances
|
||||
of the corresponding behavior. Thus a connection with 2 C's in its
|
||||
history means that the originator sent >= 10 packets with checksum
|
||||
errors; 3 C's means >= 100, etc.
|
||||
|
||||
- The above connection history behaviors occurring multiple times
|
||||
(i.e., starting at 10 instances, than again for 100 instances,
|
||||
etc.) generate corresponding events: tcp_multiple_checksum_errors,
|
||||
udp_multiple_checksum_errors, tcp_multiple_zero_windows, and
|
||||
tcp_multiple_retransmissions. Each has the same form, e.g.
|
||||
|
||||
event tcp_multiple_retransmissions(c: connection, is_orig: bool,
|
||||
threshold: count);
|
||||
|
||||
- Added support for set union, intersection, difference, and comparison
|
||||
operations. The corresponding operators for the first three are
|
||||
"s1 | s2", "s1 & s2", and "s1 - s2". Relationals are in terms
|
||||
of subsets, so "s1 < s2" yields true if s1 is a proper subset of s2
|
||||
and "s1 == s2" if the two sets have exactly the same elements.
|
||||
"s1 <= s2" holds for subsets or equality, and similarly "s1 != s2",
|
||||
"s1 > s2", and "s1 >= s2" have the expected meanings in terms
|
||||
of non-equality, proper superset, and superset-or-equal.
|
||||
|
||||
- An expression of the form "v += e" will append the value of the expression
|
||||
"e" to the end of the vector "v" (of course assuming type-compatbility).
|
||||
|
||||
Changed Functionality
|
||||
---------------------
|
||||
|
||||
- All communication is now handled through Broker, requiring changes
|
||||
to existing scripts to port them over to the new API. The Broker
|
||||
framework documentation comes with a porting guide.
|
||||
|
||||
- The DHCP analyzer and its script-layer interface have been rewritten.
|
||||
|
||||
- Supports more DHCP options than before.
|
||||
|
||||
- The DHCP log now represents DHCP sessions based on transaction ID
|
||||
and works on Bro cluster deployments.
|
||||
|
||||
- Removed the policy/protocols/dhcp/known-devices-and-hostnames.bro
|
||||
script since it's generally less relevant now with the updated log.
|
||||
|
||||
- Removed the base/protocols/dhcp/utils.bro script and thus the
|
||||
'reverse_ip' function.
|
||||
|
||||
- Replaced all DHCP events with the single 'dhcp_message' event.
|
||||
The list of removed events includes:
|
||||
|
||||
- dhcp_discover
|
||||
- dhcp_offer
|
||||
- dhcp_request
|
||||
- dhcp_decline
|
||||
- dhcp_ack
|
||||
- dhcp_nak
|
||||
- dhcp_release
|
||||
- dhcp_inform
|
||||
|
||||
- Removed policy/misc/known-devices.bro script and thus
|
||||
known_devices.log will no longer be created.
|
||||
|
||||
- The --with-binpac= configure option has changed to mean "path
|
||||
to the binpac executable" instead of "path to binpac installation root".
|
||||
|
||||
- The MIME types used to identify X.509 certificates in SSL
|
||||
connections changed from "application/pkix-cert" to
|
||||
"application/x-x509-user-cert" for host certificates and
|
||||
"application/x-x509-ca-cert" for CA certificates.
|
||||
|
||||
- With the new ssl_ecdh_server_params event, the ssl_server_curve
|
||||
event is considered deprecated and will be removed in a future
|
||||
version of Bro.
|
||||
|
||||
- The Socks analyzer no longer logs passwords by default. This
|
||||
brings its behavior in line with the FTP/HTTP analyzers which also
|
||||
do not log passwords by default.
|
||||
|
||||
To restore the previous behavior and log Socks passwords, use:
|
||||
|
||||
redef SOCKS::default_capture_password = T;
|
||||
|
||||
- The DNS base scripts no longer generate some noisy and annoying
|
||||
weirds (dns_unmatched_msg, dns_unmatched_msg_quantity, dns_unmatched_reply)
|
||||
|
||||
- The 'tunnel_parents' field of conn.log is now marked &optional, so, for
|
||||
the default configuration of logs, this field will show "-" instead of
|
||||
"(empty)" for connections that lack any tunneling.
|
||||
|
||||
- SMB event argument changes:
|
||||
|
||||
- smb1_transaction_request now has two additional arguments, "parameters"
|
||||
and "data" strings
|
||||
|
||||
- smb1_transaction2_request now has an additional "args" record argument
|
||||
|
||||
- The SMB::write_cmd_log option has been removed and the corresponding
|
||||
logic moving to policy/protocols/smb/log-cmds.bro which can simply
|
||||
be loaded to produce the same effect of toggling the old flag on.
|
||||
|
||||
- SSL event argument changes:
|
||||
|
||||
- event ssl_server_signature now has an additional argument
|
||||
"signature_and_hashalgorithm".
|
||||
|
||||
- The "dnp3_header_block" event no longer has the "start" parameter
|
||||
|
||||
- The string_to_pattern() built-in (and the now-deprecated merge_pattern()
|
||||
built-in) is no longer restricted to only be called at initialization time.
|
||||
|
||||
- GeoIP Legacy Database support has been replaced with GeoIP2 MaxMind DB
|
||||
format support.
|
||||
|
||||
- This updates the "lookup_location" and "lookup_asn" BIFs to use
|
||||
libmaxminddb. The motivation for this is that MaxMind is discontinuing
|
||||
GeoLite Legacy databases: no updates after April 1, 2018, no downloads
|
||||
after January 2, 2019. It's also noted that all GeoIP Legacy databases
|
||||
may be discontinued as they are superseded by GeoIP2.
|
||||
|
||||
Removed Functionality
|
||||
---------------------
|
||||
|
||||
- We no longer maintain any Bro plugins as part of the Bro
|
||||
distribution. Most of the plugins that used to be in aux/plugins have
|
||||
been moved over to use the Bro Package Manager instead. See
|
||||
https://github.com/bro/packages for a list of Bro packages currently
|
||||
available.
|
||||
|
||||
- BroControl: The option 'IPv6Comm' and 'ZoneID' options are no longer
|
||||
available (though Broker should be able to handle IPv6 automatically).
|
||||
|
||||
- The "ocsp_request" event no longer has "requestorName" parameter.
|
||||
|
||||
Deprecated Functionality
|
||||
------------------------
|
||||
|
||||
- The old communication system is now deprecated and scheduled for
|
||||
removal with the next Bro release. This includes the "communication"
|
||||
framework, the &sychronized attributes, and the existing
|
||||
communication-related BiFs. Use Broker instead.
|
||||
|
||||
- The infrastructure for serializing Bro values into a binary
|
||||
representation is now deprecated and scheduled for removal with the
|
||||
next Bro release. This includes the &persistent attribute, as well
|
||||
as BiFs like send_id(). Use Broker data stores and the new
|
||||
configuration framework instead.
|
||||
|
||||
- BroControl: The 'update' command is deprecated and scheduled for
|
||||
removal with the next Bro release. Bro's new configuration framework
|
||||
is taking its place.
|
||||
|
||||
- Mixing of scalars and vectors, such as "v + e" yielding a vector
|
||||
corresponding to the vector v with the scalar e added to each of
|
||||
its elements, has been deprecated.
|
||||
|
||||
- The built-in function merge_pattern() has been deprecated. It will
|
||||
be replaced by the '&' operator for patterns.
|
||||
|
||||
- The undocumented feature of using "&&" and "||" operators for patterns
|
||||
has been deprecated.
|
||||
|
||||
Bro 2.5.1
|
||||
=========
|
||||
|
||||
New Functionality
|
||||
-----------------
|
||||
|
||||
- Bro now includes bifs for rename, unlink, and rmdir.
|
||||
|
||||
- Bro now includes events for two extensions used by TLS 1.3:
|
||||
ssl_extension_supported_versions and ssl_extension_psk_key_exchange_modes
|
||||
|
||||
- Bro now includes hooks that can be used to interact with log processing
|
||||
on the C++ level.
|
||||
|
||||
- Bro now supports ERSPAN. Currently this ignores the ethernet header that is
|
||||
carried over the tunnel; if a MAC is logged currently only the outer MAC
|
||||
is returned.
|
||||
|
||||
- Added a new BroControl option CrashExpireInterval to enable
|
||||
"broctl cron" to remove crash directories that are older than the
|
||||
specified number of days (the default value is 0, which means crash
|
||||
directories never expire).
|
||||
|
||||
- Added a new BroControl option MailReceivingPackets to control
|
||||
whether or not "broctl cron" will mail a warning when it notices
|
||||
that no packets were seen on an interface.
|
||||
|
||||
- There is a new broctl command-line option "--version" which outputs
|
||||
the BroControl version.
|
||||
|
||||
Changed Functionality
|
||||
---------------------
|
||||
|
||||
- The input framework's Ascii reader is now more resilient. If an input
|
||||
is marked to reread a file when it changes and the file didn't exist
|
||||
during a check Bro would stop watching the file in previous versions.
|
||||
The same could happen with bad data in a line of a file. These
|
||||
situations do not cause Bro to stop watching input files anymore. The
|
||||
old behavior is available through settings in the Ascii reader.
|
||||
|
||||
- The RADIUS scripts have been reworked. Requests are now logged even if
|
||||
there is no response. The new framed_addr field in the log indicates
|
||||
if the radius server is hinting at an address for the client. The ttl
|
||||
field indicates how quickly the server is replying to the network access
|
||||
server.
|
||||
|
||||
- With the introduction of the Bro package manager, the Bro plugin repository
|
||||
is considered deprecated. The af_packet, postgresql, and tcprs plugins have
|
||||
already been removed and are available via bro-pkg.
|
||||
|
||||
Bro 2.5
|
||||
=======
|
||||
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.5-39
|
||||
2.5-850
|
||||
|
|
1
aux/bifcl
Submodule
1
aux/bifcl
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit e99152c00aad8f81c684a01bc4d40790a295f85c
|
|
@ -1 +1 @@
|
|||
Subproject commit a0990e61ad4a3705bda4cc5a20059af2d1bda4c3
|
||||
Subproject commit 74cf55ace0de2bf061bbbf285ccf47cba122955f
|
|
@ -1 +1 @@
|
|||
Subproject commit 7660b5f4c5be40aa5f3a7c8746fdcf68331f9b93
|
||||
Subproject commit 53aae820242c02790089e384a9fe2d3174799ab1
|
|
@ -1 +1 @@
|
|||
Subproject commit 765eab50f7796fdb3c308fe9232cd7891f098c67
|
||||
Subproject commit edf754ea6e89a84ad74eff69a454c5e285c4b81b
|
|
@ -1 +1 @@
|
|||
Subproject commit f6d451520eaaaae97aab6df2bb4e0aecb6b63e66
|
||||
Subproject commit 70a8b2e15105f4c238765a882151718162e46208
|
|
@ -1 +1 @@
|
|||
Subproject commit 68a36ed81480ba935268bcaf7b6f2249d23436da
|
||||
Subproject commit e0f9f6504db9285a48e0be490abddf959999a404
|
|
@ -1 +1 @@
|
|||
Subproject commit 32e582514ae044befa8e0511083bf11a51408a1d
|
||||
Subproject commit 99ec0e1ea89e166af4cb6ebc2d923d123424123d
|
|
@ -1 +1 @@
|
|||
Subproject commit 9f3d6fce49cad3b45b5ddd0fe1f3c79186e1d2e7
|
||||
Subproject commit a432ae2f9a06e7b1664df5fc4ce1b694acb7b099
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 0a2f0215270e6ceaf9c1312f705b95d2cce1b530
|
|
@ -108,11 +108,8 @@
|
|||
/* GeoIP geographic lookup functionality */
|
||||
#cmakedefine USE_GEOIP
|
||||
|
||||
/* Whether the found GeoIP API supports IPv6 Country Edition */
|
||||
#cmakedefine HAVE_GEOIP_COUNTRY_EDITION_V6
|
||||
|
||||
/* Whether the found GeoIP API supports IPv6 City Edition */
|
||||
#cmakedefine HAVE_GEOIP_CITY_EDITION_REV0_V6
|
||||
/* Define if KRB5 is available */
|
||||
#cmakedefine USE_KRB5
|
||||
|
||||
/* Use Google's perftools */
|
||||
#cmakedefine USE_PERFTOOLS_DEBUG
|
||||
|
@ -229,3 +226,14 @@
|
|||
#ifndef BRO_PLUGIN_INTERNAL_BUILD
|
||||
#define BRO_PLUGIN_INTERNAL_BUILD @BRO_PLUGIN_INTERNAL_BUILD@
|
||||
#endif
|
||||
|
||||
/* A C function that has the Bro version encoded into its name. */
|
||||
#define BRO_VERSION_FUNCTION bro_version_@VERSION_C_IDENT@
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
extern const char* BRO_VERSION_FUNCTION();
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -7,11 +7,16 @@ site_dir=@BRO_SCRIPT_INSTALL_PATH@/site
|
|||
plugin_dir=@BRO_PLUGIN_INSTALL_PATH@
|
||||
config_dir=@BRO_ETC_INSTALL_DIR@
|
||||
python_dir=@PY_MOD_INSTALL_DIR@
|
||||
cmake_dir=@CMAKE_INSTALL_PREFIX@/share/bro/cmake
|
||||
include_dir=@CMAKE_INSTALL_PREFIX@/include/bro
|
||||
bropath=@DEFAULT_BROPATH@
|
||||
bro_dist=@BRO_DIST@
|
||||
binpac_root=@BRO_CONFIG_BINPAC_ROOT_DIR@
|
||||
caf_root=@BRO_CONFIG_CAF_ROOT_DIR@
|
||||
broker_root=@BRO_CONFIG_BROKER_ROOT_DIR@
|
||||
|
||||
usage="\
|
||||
Usage: bro-config [--version] [--prefix] [--script_dir] [--site_dir] [--plugin_dir] [--config_dir] [--python_dir] [--bropath] [--bro_dist]"
|
||||
Usage: bro-config [--version] [--prefix] [--script_dir] [--site_dir] [--plugin_dir] [--config_dir] [--python_dir] [--include_dir] [--cmake_dir] [--bropath] [--bro_dist] [--binpac_root] [--caf_root] [--broker_root]"
|
||||
|
||||
if [ $# -eq 0 ] ; then
|
||||
echo "${usage}" 1>&2
|
||||
|
@ -46,12 +51,27 @@ while [ $# -ne 0 ]; do
|
|||
--python_dir)
|
||||
echo $python_dir
|
||||
;;
|
||||
--cmake_dir)
|
||||
echo $cmake_dir
|
||||
;;
|
||||
--include_dir)
|
||||
echo $include_dir
|
||||
;;
|
||||
--bropath)
|
||||
echo $bropath
|
||||
;;
|
||||
--bro_dist)
|
||||
echo $bro_dist
|
||||
;;
|
||||
--binpac_root)
|
||||
echo $binpac_root
|
||||
;;
|
||||
--caf_root)
|
||||
echo $caf_root
|
||||
;;
|
||||
--broker_root)
|
||||
echo $broker_root
|
||||
;;
|
||||
*)
|
||||
echo "${usage}" 1>&2
|
||||
exit 1
|
||||
|
|
2
cmake
2
cmake
|
@ -1 +1 @@
|
|||
Subproject commit d29fbf6152e54fbb536910af02a80874b1917311
|
||||
Subproject commit 4cc3e344cf2698010a46684d32a2907a943430e3
|
103
configure
vendored
103
configure
vendored
|
@ -18,7 +18,17 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
|
|||
|
||||
Build Options:
|
||||
--builddir=DIR place build files in directory [build]
|
||||
--build-type=TYPE set CMake build type [RelWithDebInfo]:
|
||||
- Debug: optimizations off, debug symbols + flags
|
||||
- MinSizeRel: size optimizations, debugging off
|
||||
- Release: optimizations on, debugging off
|
||||
- RelWithDebInfo: optimizations on,
|
||||
debug symbols on, debug flags off
|
||||
--generator=GENERATOR CMake generator to use (see cmake --help)
|
||||
--ccache use ccache to speed up recompilation (requires
|
||||
ccache installation and CMake 3.10+)
|
||||
--toolchain=PATH path to a CMAKE_TOOLCHAIN_FILE
|
||||
(useful for cross-compiling)
|
||||
|
||||
Installation Directories:
|
||||
--prefix=PREFIX installation directory [/usr/local/bro]
|
||||
|
@ -34,42 +44,41 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
|
|||
--conf-files-dir=PATH config files installation directory [PREFIX/etc]
|
||||
|
||||
Optional Features:
|
||||
--enable-debug compile in debugging mode
|
||||
--enable-debug compile in debugging mode (like --build-type=Debug)
|
||||
--enable-coverage compile with code coverage support (implies debugging mode)
|
||||
--enable-mobile-ipv6 analyze mobile IPv6 features defined by RFC 6275
|
||||
--enable-perftools force use of Google perftools on non-Linux systems
|
||||
(automatically on when perftools is present on Linux)
|
||||
--enable-perftools-debug use Google's perftools for debugging
|
||||
--enable-jemalloc link against jemalloc
|
||||
--enable-ruby build ruby bindings for broccoli (deprecated)
|
||||
--enable-broker enable use of the Broker communication library
|
||||
(requires C++ Actor Framework)
|
||||
--disable-broccoli don't build or install the Broccoli library
|
||||
--enable-broccoli build or install the Broccoli library (deprecated)
|
||||
--disable-broctl don't install Broctl
|
||||
--disable-auxtools don't build or install auxiliary tools
|
||||
--disable-perftools don't try to build with Google Perftools
|
||||
--disable-python don't try to build python bindings for broccoli
|
||||
--disable-pybroker don't try to build python bindings for broker
|
||||
--disable-python don't try to build python bindings for broker
|
||||
--disable-broker-tests don'e try to build Broker unit tests
|
||||
|
||||
Required Packages in Non-Standard Locations:
|
||||
--with-openssl=PATH path to OpenSSL install root
|
||||
--with-bind=PATH path to BIND install root
|
||||
--with-pcap=PATH path to libpcap install root
|
||||
--with-binpac=PATH path to BinPAC install root
|
||||
--with-binpac=PATH path to BinPAC executable
|
||||
(useful for cross-compiling)
|
||||
--with-bifcl=PATH path to Bro BIF compiler executable
|
||||
(useful for cross-compiling)
|
||||
--with-flex=PATH path to flex executable
|
||||
--with-bison=PATH path to bison executable
|
||||
--with-python=PATH path to Python executable
|
||||
--with-caf=PATH path to C++ Actor Framework installation for using external version
|
||||
(a required Broker dependency)
|
||||
|
||||
Optional Packages in Non-Standard Locations:
|
||||
--with-caf=PATH path to C++ Actor Framework installation
|
||||
(a required Broker dependency)
|
||||
--with-geoip=PATH path to the libGeoIP install root
|
||||
--with-geoip=PATH path to the libmaxminddb install root
|
||||
--with-krb5=PATH path to krb5 install root
|
||||
--with-perftools=PATH path to Google Perftools install root
|
||||
--with-jemalloc=PATH path to jemalloc install root
|
||||
--with-python-lib=PATH path to libpython
|
||||
--with-python-inc=PATH path to Python headers
|
||||
--with-ruby=PATH path to ruby interpreter
|
||||
--with-ruby-lib=PATH path to ruby library
|
||||
--with-ruby-inc=PATH path to ruby headers
|
||||
--with-swig=PATH path to SWIG executable
|
||||
--with-rocksdb=PATH path to RocksDB installation
|
||||
(an optional Broker dependency)
|
||||
|
@ -122,21 +131,19 @@ append_cache_entry BRO_ROOT_DIR PATH $prefix
|
|||
append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl
|
||||
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
|
||||
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
|
||||
append_cache_entry BROKER_PYTHON_BINDINGS BOOL false
|
||||
append_cache_entry ENABLE_DEBUG BOOL false
|
||||
append_cache_entry ENABLE_PERFTOOLS BOOL false
|
||||
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
|
||||
append_cache_entry ENABLE_JEMALLOC BOOL false
|
||||
append_cache_entry ENABLE_BROKER BOOL false
|
||||
append_cache_entry BinPAC_SKIP_INSTALL BOOL true
|
||||
append_cache_entry BUILD_SHARED_LIBS BOOL true
|
||||
append_cache_entry INSTALL_BROCCOLI BOOL false
|
||||
append_cache_entry INSTALL_AUX_TOOLS BOOL true
|
||||
append_cache_entry INSTALL_BROCCOLI BOOL true
|
||||
append_cache_entry INSTALL_BROCTL BOOL true
|
||||
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
|
||||
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
|
||||
append_cache_entry DISABLE_PERFTOOLS BOOL false
|
||||
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true
|
||||
append_cache_entry ENABLE_COVERAGE BOOL false
|
||||
|
||||
# parse arguments
|
||||
while [ $# -ne 0 ]; do
|
||||
|
@ -153,9 +160,22 @@ while [ $# -ne 0 ]; do
|
|||
--builddir=*)
|
||||
builddir=$optarg
|
||||
;;
|
||||
--build-type=*)
|
||||
append_cache_entry CMAKE_BUILD_TYPE STRING $optarg
|
||||
|
||||
if [ $(echo "$optarg" | tr [:upper:] [:lower:]) = "debug" ]; then
|
||||
append_cache_entry ENABLE_DEBUG BOOL true
|
||||
fi
|
||||
;;
|
||||
--generator=*)
|
||||
CMakeGenerator="$optarg"
|
||||
;;
|
||||
--ccache)
|
||||
append_cache_entry ENABLE_CCACHE BOOL true
|
||||
;;
|
||||
--toolchain=*)
|
||||
append_cache_entry CMAKE_TOOLCHAIN_FILE PATH $optarg
|
||||
;;
|
||||
--prefix=*)
|
||||
prefix=$optarg
|
||||
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
|
||||
|
@ -179,6 +199,10 @@ while [ $# -ne 0 ]; do
|
|||
--logdir=*)
|
||||
append_cache_entry BRO_LOG_DIR PATH $optarg
|
||||
;;
|
||||
--enable-coverage)
|
||||
append_cache_entry ENABLE_COVERAGE BOOL true
|
||||
append_cache_entry ENABLE_DEBUG BOOL true
|
||||
;;
|
||||
--enable-debug)
|
||||
append_cache_entry ENABLE_DEBUG BOOL true
|
||||
;;
|
||||
|
@ -195,15 +219,10 @@ while [ $# -ne 0 ]; do
|
|||
--enable-jemalloc)
|
||||
append_cache_entry ENABLE_JEMALLOC BOOL true
|
||||
;;
|
||||
--enable-broker)
|
||||
append_cache_entry ENABLE_BROKER BOOL true
|
||||
--enable-broccoli)
|
||||
append_cache_entry INSTALL_BROCCOLI BOOL yes
|
||||
;;
|
||||
--disable-broker)
|
||||
;;
|
||||
--disable-broccoli)
|
||||
append_cache_entry INSTALL_BROCCOLI BOOL false
|
||||
;;
|
||||
--disable-broctl)
|
||||
--disable-broctl)
|
||||
append_cache_entry INSTALL_BROCTL BOOL false
|
||||
;;
|
||||
--disable-auxtools)
|
||||
|
@ -215,11 +234,8 @@ while [ $# -ne 0 ]; do
|
|||
--disable-python)
|
||||
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
|
||||
;;
|
||||
--disable-pybroker)
|
||||
append_cache_entry DISABLE_PYBROKER BOOL true
|
||||
;;
|
||||
--enable-ruby)
|
||||
append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
|
||||
--disable-broker-tests)
|
||||
append_cache_entry BROKER_DISABLE_TESTS BOOL true
|
||||
;;
|
||||
--with-openssl=*)
|
||||
append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
|
||||
|
@ -231,7 +247,11 @@ while [ $# -ne 0 ]; do
|
|||
append_cache_entry PCAP_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
--with-binpac=*)
|
||||
append_cache_entry BinPAC_ROOT_DIR PATH $optarg
|
||||
append_cache_entry BINPAC_EXE_PATH PATH $optarg
|
||||
append_cache_entry BinPAC_ROOT_DIR PATH "$(dirname $optarg)/.."
|
||||
;;
|
||||
--with-bifcl=*)
|
||||
append_cache_entry BIFCL_EXE_PATH PATH $optarg
|
||||
;;
|
||||
--with-flex=*)
|
||||
append_cache_entry FLEX_EXECUTABLE PATH $optarg
|
||||
|
@ -240,7 +260,10 @@ while [ $# -ne 0 ]; do
|
|||
append_cache_entry BISON_EXECUTABLE PATH $optarg
|
||||
;;
|
||||
--with-geoip=*)
|
||||
append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg
|
||||
append_cache_entry LibMMDB_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
--with-krb5=*)
|
||||
append_cache_entry LibKrb5_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
--with-perftools=*)
|
||||
append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg
|
||||
|
@ -259,26 +282,12 @@ while [ $# -ne 0 ]; do
|
|||
append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg
|
||||
append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg
|
||||
;;
|
||||
--with-ruby=*)
|
||||
append_cache_entry RUBY_EXECUTABLE PATH $optarg
|
||||
;;
|
||||
--with-ruby-lib=*)
|
||||
append_cache_entry RUBY_LIBRARY PATH $optarg
|
||||
;;
|
||||
--with-ruby-inc=*)
|
||||
append_cache_entry RUBY_INCLUDE_DIRS PATH $optarg
|
||||
append_cache_entry RUBY_INCLUDE_PATH PATH $optarg
|
||||
;;
|
||||
--with-swig=*)
|
||||
append_cache_entry SWIG_EXECUTABLE PATH $optarg
|
||||
;;
|
||||
--with-caf=*)
|
||||
append_cache_entry CAF_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
--with-libcaf=*)
|
||||
echo "warning: --with-libcaf deprecated, use --with-caf instead"
|
||||
append_cache_entry CAF_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
--with-rocksdb=*)
|
||||
append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
set(BROCCOLI_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html)
|
||||
set(BROCCOLI_DOCS_DST ${CMAKE_BINARY_DIR}/html/broccoli-api)
|
||||
set(SPHINX_INPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_input)
|
||||
set(SPHINX_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_output)
|
||||
set(BROXYGEN_SCRIPT_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/broxygen_script_output)
|
||||
set(BROXYGEN_CACHE_DIR ${CMAKE_CURRENT_BINARY_DIR}/broxygen_cache)
|
||||
set(BROCCOLI_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html)
|
||||
set(BROCCOLI_DOCS_DST ${CMAKE_BINARY_DIR}/html/broccoli-api)
|
||||
|
||||
# Find out what BROPATH to use when executing bro.
|
||||
execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev
|
||||
|
@ -61,10 +61,6 @@ add_custom_target(sphinxdoc
|
|||
COMMAND "${CMAKE_COMMAND}" -E create_symlink
|
||||
${SPHINX_OUTPUT_DIR}/html
|
||||
${CMAKE_BINARY_DIR}/html
|
||||
# Copy Broccoli API reference into output dir if it exists.
|
||||
COMMAND test -d ${BROCCOLI_DOCS_SRC} &&
|
||||
( rm -rf ${BROCCOLI_DOCS_DST} &&
|
||||
cp -r ${BROCCOLI_DOCS_SRC} ${BROCCOLI_DOCS_DST} ) || true
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "[Sphinx] Generate HTML documentation in ${CMAKE_BINARY_DIR}/html")
|
||||
|
||||
|
@ -77,7 +73,10 @@ add_custom_target(sphinxdoc_clean
|
|||
COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_CACHE_DIR}
|
||||
VERBATIM)
|
||||
|
||||
add_custom_target(doc)
|
||||
if (NOT TARGET doc)
|
||||
add_custom_target(doc)
|
||||
endif ()
|
||||
|
||||
add_custom_target(docclean)
|
||||
add_dependencies(doc sphinxdoc)
|
||||
add_dependencies(docclean sphinxdoc_clean)
|
||||
|
|
12
doc/_static/broxygen.css
vendored
12
doc/_static/broxygen.css
vendored
|
@ -152,12 +152,10 @@ sup, sub {
|
|||
|
||||
pre, code {
|
||||
white-space: pre;
|
||||
overflow: auto;
|
||||
margin-left: 2em;
|
||||
margin-right: 2em;
|
||||
margin-top: .5em;
|
||||
margin-bottom: 1.5em;
|
||||
word-wrap: normal;
|
||||
overflow: auto;
|
||||
margin-left: 0.25em;
|
||||
margin-right: 0.25em;
|
||||
word-wrap: normal;
|
||||
}
|
||||
|
||||
pre, code, tt {
|
||||
|
@ -482,4 +480,4 @@ li {
|
|||
|
||||
.btest-cmd .code pre, .btest-include .code pre {
|
||||
margin-left: 0px;
|
||||
}
|
||||
}
|
||||
|
|
4
doc/_templates/layout.html
vendored
4
doc/_templates/layout.html
vendored
|
@ -10,7 +10,7 @@
|
|||
{% endblock %}
|
||||
|
||||
{% block header %}
|
||||
<iframe src="//www.bro.org/frames/header-no-logo.html" width="100%" height="100px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
|
||||
<iframe src="https://www.bro.org/frames/header-no-logo.html" width="100%" height="100px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
|
||||
</iframe>
|
||||
{% endblock %}
|
||||
|
||||
|
@ -108,6 +108,6 @@
|
|||
{% endblock %}
|
||||
|
||||
{% block footer %}
|
||||
<iframe src="//www.bro.org/frames/footer.html" width="100%" height="420px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
|
||||
<iframe src="https://www.bro.org/frames/footer.html" width="100%" height="420px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
|
||||
</iframe>
|
||||
{% endblock %}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../../../aux/plugins/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/af_packet/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/elasticsearch/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/kafka/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/myricom/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/netmap/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/pf_ring/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/postgresql/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/redis/README
|
|
@ -1 +0,0 @@
|
|||
../../../../aux/plugins/tcprs/README
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/bindings/broccoli-python/README
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/bindings/broccoli-ruby/README
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/README
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/doc/broccoli-manual.rst
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broker/broker-manual.rst
|
|
@ -13,19 +13,10 @@ current, independent component releases.
|
|||
:maxdepth: 1
|
||||
|
||||
BinPAC - A protocol parser generator <binpac/README>
|
||||
Broccoli - The Bro Client Communication Library (README) <broccoli/README>
|
||||
Broccoli - User Manual <broccoli/broccoli-manual>
|
||||
Broccoli Python Bindings <broccoli-python/README>
|
||||
Broccoli Ruby Bindings <broccoli-ruby/README>
|
||||
Broker - Bro's (New) Messaging Library (README) <broker/README>
|
||||
Broker - User Manual <broker/broker-manual.rst>
|
||||
Broker - Bro's (New) Messaging Library <broker/README>
|
||||
BroControl - Interactive Bro management shell <broctl/README>
|
||||
Bro-Aux - Small auxiliary tools for Bro <bro-aux/README>
|
||||
Bro-Plugins - A collection of plugins for Bro <bro-plugins/README>
|
||||
BTest - A unit testing framework <btest/README>
|
||||
Capstats - Command-line packet statistic tool <capstats/README>
|
||||
PySubnetTree - Python module for CIDR lookups<pysubnettree/README>
|
||||
trace-summary - Script for generating break-downs of network traffic <trace-summary/README>
|
||||
|
||||
The `Broccoli API Reference <../broccoli-api/index.html>`_ may also be of
|
||||
interest.
|
||||
|
|
|
@ -195,8 +195,6 @@ html_sidebars = {
|
|||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Broxygen'
|
||||
|
||||
html_add_permalinks = None
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
|
|
|
@ -259,9 +259,14 @@ class BroDomain(Domain):
|
|||
}
|
||||
|
||||
def clear_doc(self, docname):
|
||||
to_delete = []
|
||||
|
||||
for (typ, name), doc in self.data['objects'].items():
|
||||
if doc == docname:
|
||||
del self.data['objects'][typ, name]
|
||||
to_delete.append((typ, name))
|
||||
|
||||
for (typ, name) in to_delete:
|
||||
del self.data['objects'][typ, name]
|
||||
|
||||
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
|
||||
contnode):
|
||||
|
|
|
@ -1,174 +1,360 @@
|
|||
.. _CAF: https://github.com/actor-framework/actor-framework
|
||||
|
||||
.. _brokercomm-framework:
|
||||
|
||||
======================================
|
||||
Broker-Enabled Communication Framework
|
||||
======================================
|
||||
==============================================
|
||||
Broker-Enabled Communication/Cluster Framework
|
||||
==============================================
|
||||
|
||||
.. rst-class:: opening
|
||||
|
||||
Bro can now use the `Broker Library
|
||||
Bro now uses the `Broker Library
|
||||
<../components/broker/README.html>`_ to exchange information with
|
||||
other Bro processes.
|
||||
other Bro processes. Broker itself uses CAF_ (C++ Actor Framework)
|
||||
internally for connecting nodes and exchanging arbitrary data over
|
||||
networks. Broker then introduces, on top of CAF, a topic-based
|
||||
publish/subscribe communication pattern using a data model that is
|
||||
compatible to Bro's. Broker itself can be utilized outside the
|
||||
context of Bro, with Bro itself making use of only a few predefined
|
||||
Broker message formats that represent Bro events, log entries, etc.
|
||||
|
||||
In summary, the Bro's Broker framework provides basic facilities for
|
||||
connecting broker-enabled peers (e.g. Bro instances) to each other
|
||||
and exchanging messages (e.g. events and logs). With this comes
|
||||
changes in how clusters operate and, since Broker significantly
|
||||
differs from the previous communication framework, there are several
|
||||
changes in the set of scripts that Bro ships with that may break
|
||||
your own customizations. This document aims to describe the changes
|
||||
that have been made, making it easier to port your own scripts. It
|
||||
also gives examples of Broker and the new cluster framework that
|
||||
show off all the new features and capabilities.
|
||||
|
||||
.. contents::
|
||||
|
||||
Porting Guide
|
||||
=============
|
||||
|
||||
Review and use the points below as a guide to port your own scripts
|
||||
to the latest version of Bro, which uses the new cluster and Broker
|
||||
communication framework.
|
||||
|
||||
General Porting Tips
|
||||
--------------------
|
||||
|
||||
- ``@load policy/frameworks/communication/listen`` and
|
||||
``@load base/frameworks/communication`` indicates use of the
|
||||
old communication framework, consider porting to
|
||||
``@load base/frameworks/broker`` and using the Broker API:
|
||||
:doc:`/scripts/base/frameworks/broker/main.bro`
|
||||
|
||||
- The ``&synchronized`` and ``&persistent`` attributes are deprecated,
|
||||
consider using `Data Stores`_ instead.
|
||||
|
||||
- Usages of the old communications system features are all deprecated,
|
||||
however, they also do not work in the default Bro configuration unless
|
||||
you manually take action to set up the old communication system.
|
||||
To aid in porting, such usages will default to raising a fatal error
|
||||
unless you explicitly acknowledge that such usages of the old system
|
||||
are ok. Set the :bro:see:`old_comm_usage_is_ok` flag in this case.
|
||||
|
||||
- Instead of using e.g. ``Cluster::manager2worker_events`` (and all
|
||||
permutations for every node type), what you'd now use is either
|
||||
:bro:see:`Broker::publish` or :bro:see:`Broker::auto_publish` with
|
||||
either the topic associated with a specific node or class of nodes,
|
||||
like :bro:see:`Cluster::node_topic` or
|
||||
:bro:see:`Cluster::worker_topic`.
|
||||
|
||||
- Instead of using the ``send_id`` BIF, use :bro:see:`Broker::publish_id`.
|
||||
|
||||
- Use :bro:see:`terminate` instead of :bro:see:`terminate_communication`.
|
||||
The latter refers to the old communication system and no longer affects
|
||||
the new Broker-based system.
|
||||
|
||||
- For replacing :bro:see:`remote_connection_established` and
|
||||
:bro:see:`remote_connection_closed`, consider :bro:see:`Broker::peer_added`
|
||||
or :bro:see:`Broker::peer_lost`. There's also :bro:see:`Cluster::node_up`
|
||||
and :bro:see:`Cluster::node_down`.
|
||||
|
||||
Notable / Specific Script API Changes
|
||||
-------------------------------------
|
||||
|
||||
- :bro:see:`Software::tracked` is now partitioned among proxy nodes
|
||||
instead of synchronized in its entirety to all nodes.
|
||||
|
||||
- ``Known::known_hosts`` is renamed to :bro:see:`Known::host_store` and
|
||||
implemented via the new Broker data store interface.
|
||||
|
||||
- ``Known::known_services`` is renamed to :bro:see:`Known::service_store`
|
||||
and implemented via the new Broker data store interface.
|
||||
|
||||
- ``Known::certs`` is renamed to :bro:see:`Known::cert_store`
|
||||
and implemented via the new Broker data store interface.
|
||||
|
||||
New Cluster Layout / API
|
||||
========================
|
||||
|
||||
Layout / Topology
|
||||
-----------------
|
||||
|
||||
The cluster topology has changed.
|
||||
|
||||
- Proxy nodes no longer connect with each other.
|
||||
|
||||
- Each worker node connects to all proxies.
|
||||
|
||||
- All node types connect to all logger nodes and the manager node.
|
||||
|
||||
This looks like:
|
||||
|
||||
.. figure:: broker/cluster-layout.png
|
||||
|
||||
Some general suggestions as to the purpose/utilization of each node type:
|
||||
|
||||
- Workers: are a good first choice for doing the brunt of any work you need
|
||||
done. They should be spending a lot of time performing the actual job
|
||||
of parsing/analyzing incoming data from packets, so you might choose
|
||||
to look at them as doing a "first pass" analysis and then deciding how
|
||||
the results should be shared with other nodes in the cluster.
|
||||
|
||||
- Proxies: serve as intermediaries for data storage and work/calculation
|
||||
offloading. Good for helping offload work or data in a scalable and
|
||||
distributed way. Since any given worker is connected to all
|
||||
proxies and can agree on an "arbitrary key -> proxy node" mapping
|
||||
(more on that later), you can partition work or data amongst them in a
|
||||
uniform manner. e.g. you might choose to use proxies as a method of
|
||||
sharing non-persistent state or as a "second pass" analysis for any
|
||||
work that you don't want interfering with the workers' capacity to
|
||||
keep up with capturing and parsing packets. Note that the default scripts
|
||||
that come with Bro don't utilize proxies themselves, so if you are coming
|
||||
from a previous BroControl deployment, you may want to try reducing down
|
||||
to a single proxy node. If you come to have custom/community scripts
|
||||
that utilize proxies, that would be the time to start considering scaling
|
||||
up the number of proxies to meet demands.
|
||||
|
||||
- Manager: this node will be good at performing decisions that require a
|
||||
global view of things since it is in a centralized location, connected
|
||||
to everything. However, that also makes it easy to overload, so try
|
||||
to use it sparingly and only for tasks that must be done in a
|
||||
centralized or authoritative location. Optionally, for some
|
||||
deployments, the Manager can also serve as the sole Logger.
|
||||
|
||||
- Loggers: these nodes should simply be spending their time writing out
|
||||
logs to disk and not used for much else. In the default cluster
|
||||
configuration, logs get distributed among available loggers in a
|
||||
round-robin fashion, providing failover capability should any given
|
||||
logger temporarily go offline.
|
||||
|
||||
Data Management/Sharing Strategies
|
||||
==================================
|
||||
|
||||
There's maybe no single, best approach or pattern to use when you need a
|
||||
Bro script to store or share long-term state and data. The two
|
||||
approaches that were previously used were either using the ``&synchronized``
|
||||
attribute on tables/sets or by explicitly sending events to specific
|
||||
nodes on which you wanted data to be stored. The former is no longer
|
||||
possible, though there are several new possibilities that the new
|
||||
Broker/Cluster framework offer, namely distributed data store and data
|
||||
partitioning APIs.
|
||||
|
||||
Data Stores
|
||||
-----------
|
||||
|
||||
Broker provides a distributed key-value store interface with optional
|
||||
choice of using a persistent backend. For more detail, see
|
||||
:ref:`this example <data_store_example>`.
|
||||
|
||||
Some ideas/considerations/scenarios when deciding whether to use
|
||||
a data store for your use-case:
|
||||
|
||||
* If you need the full data set locally in order to achieve low-latency
|
||||
queries using data store "clones" can provide that.
|
||||
|
||||
* If you need data that persists across restarts of Bro processes, then
|
||||
data stores can also provide that.
|
||||
|
||||
* If the data you want to store is complex (tables, sets, records) or
|
||||
you expect to read, modify, and store back, then data stores may not
|
||||
be able to provide simple, race-free methods of performing the pattern
|
||||
of logic that you want.
|
||||
|
||||
* If the data set you want to store is excessively large, that's still
|
||||
problematic even for stores that use a persistent backend as they are
|
||||
implemented in a way that requires a full snapshot of the store's
|
||||
contents to fit in memory (this limitation may change in the future).
|
||||
|
||||
Data Partitioning
|
||||
-----------------
|
||||
|
||||
New data partitioning strategies are available using the API in
|
||||
:doc:`/scripts/base/frameworks/cluster/pools.bro`. Using that API, developers
|
||||
of custom Bro scripts can define a custom pool of nodes that best fits the
|
||||
needs of their script.
|
||||
|
||||
One example strategy is to use Highest Random Weight (HRW) hashing to
|
||||
partition data tables amongst the pool of all proxy nodes. e.g. using
|
||||
:bro:see:`Cluster::publish_hrw`. This could allow clusters to
|
||||
be scaled more easily than the approach of "the entire data set gets
|
||||
synchronized to all nodes" as the solution to memory limitations becomes
|
||||
"just add another proxy node". It may also take away some of the
|
||||
messaging load that used to be required to synchronize data sets across
|
||||
all nodes.
|
||||
|
||||
The tradeoff of this approach, is that nodes that leave the pool (due to
|
||||
crashing, etc.) cause a temporary gap in the total data set until
|
||||
workers start hashing keys to a new proxy node that is still alive,
|
||||
causing data to now be located and updated there.
|
||||
|
||||
If the developer of a script expects its workload to be particularly
|
||||
intensive, wants to ensure that their operations get exclusive
|
||||
access to nodes, or otherwise set constraints on the number of nodes within
|
||||
a pool utilized by their script, then the :bro:see:`Cluster::PoolSpec`
|
||||
structure will allow them to do that while still allowing users of that script
|
||||
to override the default suggestions made by the original developer.
|
||||
|
||||
Broker Framework Examples
|
||||
=========================
|
||||
|
||||
The broker framework provides basic facilities for connecting Bro instances
|
||||
to each other and exchanging messages, like events or logs.
|
||||
|
||||
See :doc:`/scripts/base/frameworks/broker/main.bro` for an overview
|
||||
of the main Broker API.
|
||||
|
||||
.. _broker_topic_naming:
|
||||
|
||||
Topic Naming Conventions
|
||||
------------------------
|
||||
|
||||
All Broker-based messaging involves two components: the information you
|
||||
want to send (e.g. an event w/ its arguments) along with an associated
|
||||
topic name string. The topic strings are used as a filtering mechanism:
|
||||
Broker uses a publish/subscribe communication pattern where peers
|
||||
advertise interest in topic **prefixes** and only receive messages which
|
||||
match one of their prefix subscriptions.
|
||||
|
||||
Broker itself supports arbitrary topic strings, however Bro generally
|
||||
follows certain conventions in choosing these topics to help avoid
|
||||
conflicts and generally make them easier to remember.
|
||||
|
||||
As a reminder of how topic subscriptions work, subscribers advertise
|
||||
interest in a topic **prefix** and then receive any messages publish by a
|
||||
peer to a topic name that starts with that prefix. E.g. Alice
|
||||
subscribes to the "alice/dogs" prefix, then would receive the following
|
||||
message topics published by Bob:
|
||||
|
||||
- topic "alice/dogs/corgi"
|
||||
- topic "alice/dogs"
|
||||
- topic "alice/dogsarecool/oratleastilikethem"
|
||||
|
||||
Alice would **not** receive the following message topics published by Bob:
|
||||
|
||||
- topic "alice/cats/siamese"
|
||||
- topic "alice/cats"
|
||||
- topic "alice/dog"
|
||||
- topic "alice"
|
||||
|
||||
Note that the topics aren't required to form a slash-delimited hierarchy,
|
||||
the subscription matching is purely a byte-per-byte prefix comparison.
|
||||
|
||||
However, Bro scripts generally will follow a topic naming hierarchy and
|
||||
any given script will make the topic names it uses apparent via some
|
||||
redef'able constant in its export section. Generally topics that Bro
|
||||
scripts use will be along the lines of "bro/<namespace>/<specifics>"
|
||||
with "<namespace>" being the script's module name (in all-undercase).
|
||||
For example, you might expect an imaginary "Pretend" framework to
|
||||
publish/subscribe using topic names like "bro/pretend/my_cool_event".
|
||||
|
||||
For cluster operation, see :doc:`/scripts/base/frameworks/cluster/main.bro`
|
||||
for a list of topics that are useful for steering published events to
|
||||
the various node classes. E.g. you have the ability to broadcast to all
|
||||
directly-connected nodes, only those of a given class (e.g. just workers),
|
||||
or to a specific node within a class.
|
||||
|
||||
The topic names that logs get published under are a bit nuanced. In the
|
||||
default cluster configuration, they are round-robin published to
|
||||
explicit topic names that identify a single logger. In standalone Bro
|
||||
processes, logs get published to the topic indicated by
|
||||
:bro:see:`Broker::default_log_topic_prefix`.
|
||||
|
||||
For those writing their own scripts which need new topic names, a
|
||||
suggestion would be to avoid prefixing any new topics/prefixes with
|
||||
"bro/" as any changes in scripts shipping with Bro will use that prefix
|
||||
and it's better to not risk unintended conflicts.
|
||||
|
||||
Connecting to Peers
|
||||
===================
|
||||
-------------------
|
||||
|
||||
Communication via Broker must first be turned on via
|
||||
:bro:see:`Broker::enable`.
|
||||
|
||||
Bro can accept incoming connections by calling :bro:see:`Broker::listen`
|
||||
and then monitor connection status updates via the
|
||||
:bro:see:`Broker::incoming_connection_established` and
|
||||
:bro:see:`Broker::incoming_connection_broken` events.
|
||||
Bro can accept incoming connections by calling :bro:see:`Broker::listen`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
|
||||
|
||||
Bro can initiate outgoing connections by calling :bro:see:`Broker::connect`
|
||||
and then monitor connection status updates via the
|
||||
:bro:see:`Broker::outgoing_connection_established`,
|
||||
:bro:see:`Broker::outgoing_connection_broken`, and
|
||||
:bro:see:`Broker::outgoing_connection_incompatible` events.
|
||||
Bro can initiate outgoing connections by calling :bro:see:`Broker::peer`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
|
||||
|
||||
Remote Printing
|
||||
===============
|
||||
|
||||
To receive remote print messages, first use the
|
||||
:bro:see:`Broker::subscribe_to_prints` function to advertise to peers a
|
||||
topic prefix of interest and then create an event handler for
|
||||
:bro:see:`Broker::print_handler` to handle any print messages that are
|
||||
received.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro
|
||||
|
||||
To send remote print messages, just call :bro:see:`Broker::send_print`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro
|
||||
|
||||
Notice that the subscriber only used the prefix "bro/print/", but is
|
||||
able to receive messages with full topics of "bro/print/hi",
|
||||
"bro/print/stuff", and "bro/print/bye". The model here is that the
|
||||
publisher of a message checks for all subscribers who advertised
|
||||
interest in a prefix of that message's topic and sends it to them.
|
||||
|
||||
Message Format
|
||||
--------------
|
||||
|
||||
For other applications that want to exchange print messages with Bro,
|
||||
the Broker message format is simply:
|
||||
|
||||
.. code:: c++
|
||||
|
||||
broker::message{std::string{}};
|
||||
In either case, connection status updates are monitored via the
|
||||
:bro:see:`Broker::peer_added` and :bro:see:`Broker::peer_lost` events.
|
||||
|
||||
Remote Events
|
||||
=============
|
||||
-------------
|
||||
|
||||
Receiving remote events is similar to remote prints. Just use the
|
||||
:bro:see:`Broker::subscribe_to_events` function and possibly define any
|
||||
new events along with handlers that peers may want to send.
|
||||
To receive remote events, you need to first subscribe to a "topic" to which
|
||||
the events are being sent. A topic is just a string chosen by the sender,
|
||||
and named in a way that helps organize events into various categories.
|
||||
See the :ref:`topic naming conventions section <broker_topic_naming>` for
|
||||
more on how topics work and are chosen.
|
||||
|
||||
Use the :bro:see:`Broker::subscribe` function to subscribe to topics and
|
||||
define any event handlers for events that peers will send.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
|
||||
|
||||
There are two different ways to send events. The first is to call the
|
||||
:bro:see:`Broker::send_event` function directly. The second option is to call
|
||||
the :bro:see:`Broker::auto_event` function where you specify a
|
||||
particular event that will be automatically sent to peers whenever the
|
||||
event is called locally via the normal event invocation syntax.
|
||||
There are two different ways to send events.
|
||||
|
||||
The first is to call the :bro:see:`Broker::publish` function which you can
|
||||
supply directly with the event and its arguments or give it the return value of
|
||||
:bro:see:`Broker::make_event` in case you need to send the same event/args
|
||||
multiple times. When publishing events like this, local event handlers for
|
||||
the event are not called.
|
||||
|
||||
The second option is to call the :bro:see:`Broker::auto_publish` function where
|
||||
you specify a particular event that will be automatically sent to peers
|
||||
whenever the event is called locally via the normal event invocation syntax.
|
||||
When auto-publishing events, local event handlers for the event are called
|
||||
in addition to sending the event to any subscribed peers.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-connector.bro
|
||||
|
||||
Again, the subscription model is prefix-based.
|
||||
|
||||
Message Format
|
||||
--------------
|
||||
|
||||
For other applications that want to exchange event messages with Bro,
|
||||
the Broker message format is:
|
||||
|
||||
.. code:: c++
|
||||
|
||||
broker::message{std::string{}, ...};
|
||||
|
||||
The first parameter is the name of the event and the remaining ``...``
|
||||
are its arguments, which are any of the supported Broker data types as
|
||||
they correspond to the Bro types for the event named in the first
|
||||
parameter of the message.
|
||||
Note that the subscription model is prefix-based, meaning that if you subscribe
|
||||
to the "bro/events" topic prefix you would receive events that are published
|
||||
to topic names "bro/events/foo" and "bro/events/bar" but not "bro/misc".
|
||||
|
||||
Remote Logging
|
||||
==============
|
||||
--------------
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
|
||||
|
||||
Use the :bro:see:`Broker::subscribe_to_logs` function to advertise interest
|
||||
in logs written by peers. The topic names that Bro uses are implicitly of the
|
||||
form "bro/log/<stream-name>".
|
||||
To toggle remote logs, redef :bro:see:`Log::enable_remote_logging`.
|
||||
Use the :bro:see:`Broker::subscribe` function to advertise interest
|
||||
in logs written by peers. The topic names that Bro uses are determined by
|
||||
:bro:see:`Broker::log_topic`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
|
||||
|
||||
To send remote logs either redef :bro:see:`Log::enable_remote_logging` or
|
||||
use the :bro:see:`Broker::enable_remote_logs` function. The former
|
||||
allows any log stream to be sent to peers while the latter enables remote
|
||||
logging for particular streams.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-connector.bro
|
||||
|
||||
Message Format
|
||||
--------------
|
||||
Note that logging events are only raised locally on the node that performs
|
||||
the :bro:see:`Log::write` and not automatically published to peers.
|
||||
|
||||
For other applications that want to exchange log messages with Bro,
|
||||
the Broker message format is:
|
||||
|
||||
.. code:: c++
|
||||
|
||||
broker::message{broker::enum_value{}, broker::record{}};
|
||||
|
||||
The enum value corresponds to the stream's :bro:see:`Log::ID` value, and
|
||||
the record corresponds to a single entry of that log's columns record,
|
||||
in this case a ``Test::Info`` value.
|
||||
|
||||
Tuning Access Control
|
||||
=====================
|
||||
|
||||
By default, endpoints do not restrict the message topics that it sends
|
||||
to peers and do not restrict what message topics and data store
|
||||
identifiers get advertised to peers. These are the default
|
||||
:bro:see:`Broker::EndpointFlags` supplied to :bro:see:`Broker::enable`.
|
||||
|
||||
If not using the ``auto_publish`` flag, one can use the
|
||||
:bro:see:`Broker::publish_topic` and :bro:see:`Broker::unpublish_topic`
|
||||
functions to manipulate the set of message topics (must match exactly)
|
||||
that are allowed to be sent to peer endpoints. These settings take
|
||||
precedence over the per-message ``peers`` flag supplied to functions
|
||||
that take a :bro:see:`Broker::SendFlags` such as :bro:see:`Broker::send_print`,
|
||||
:bro:see:`Broker::send_event`, :bro:see:`Broker::auto_event` or
|
||||
:bro:see:`Broker::enable_remote_logs`.
|
||||
|
||||
If not using the ``auto_advertise`` flag, one can use the
|
||||
:bro:see:`Broker::advertise_topic` and
|
||||
:bro:see:`Broker::unadvertise_topic` functions
|
||||
to manipulate the set of topic prefixes that are allowed to be
|
||||
advertised to peers. If an endpoint does not advertise a topic prefix, then
|
||||
the only way peers can send messages to it is via the ``unsolicited``
|
||||
flag of :bro:see:`Broker::SendFlags` and choosing a topic with a matching
|
||||
prefix (i.e. full topic may be longer than receivers prefix, just the
|
||||
prefix needs to match).
|
||||
.. _data_store_example:
|
||||
|
||||
Distributed Data Stores
|
||||
=======================
|
||||
-----------------------
|
||||
|
||||
There are three flavors of key-value data store interfaces: master,
|
||||
clone, and frontend.
|
||||
See :doc:`/scripts/base/frameworks/broker/store.bro` for an overview
|
||||
of the Broker data store API.
|
||||
|
||||
A frontend is the common interface to query and modify data stores.
|
||||
That is, a clone is a specific type of frontend and a master is also a
|
||||
specific type of frontend, but a standalone frontend can also exist to
|
||||
e.g. query and modify the contents of a remote master store without
|
||||
actually "owning" any of the contents itself.
|
||||
There are two flavors of key-value data store interfaces: master and clone.
|
||||
|
||||
A master data store can be cloned from remote peers which may then
|
||||
perform lightweight, local queries against the clone, which
|
||||
|
@ -177,24 +363,217 @@ modify their content directly, instead they send modifications to the
|
|||
centralized master store which applies them and then broadcasts them to
|
||||
all clones.
|
||||
|
||||
Master and clone stores get to choose what type of storage backend to
|
||||
use. E.g. In-memory versus SQLite for persistence. Note that if clones
|
||||
are used, then data store sizes must be able to fit within memory
|
||||
regardless of the storage backend as a single snapshot of the master
|
||||
store is sent in a single chunk to initialize the clone.
|
||||
Master stores get to choose what type of storage backend to
|
||||
use. E.g. In-memory versus SQLite for persistence.
|
||||
|
||||
Data stores also support expiration on a per-key basis either using an
|
||||
absolute point in time or a relative amount of time since the entry's
|
||||
last modification time.
|
||||
Data stores also support expiration on a per-key basis using an amount of
|
||||
time relative to the entry's last modification time.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-listener.bro
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro
|
||||
|
||||
In the above example, if a local copy of the store contents isn't
|
||||
needed, just replace the :bro:see:`Broker::create_clone` call with
|
||||
:bro:see:`Broker::create_frontend`. Queries will then be made against
|
||||
the remote master store instead of the local clone.
|
||||
|
||||
Note that all data store queries must be made within Bro's asynchronous
|
||||
``when`` statements and must specify a timeout block.
|
||||
|
||||
Cluster Framework Examples
|
||||
==========================
|
||||
|
||||
This section contains a few brief examples of how various communication
|
||||
patterns one might use when developing Bro scripts that are to operate in
|
||||
the context of a cluster.
|
||||
|
||||
A Reminder About Events and Module Namespaces
|
||||
---------------------------------------------
|
||||
|
||||
For simplicity, the following examples do not use any modules/namespaces.
|
||||
If you choose to use them within your own code, it's important to
|
||||
remember that the ``event`` and ``schedule`` dispatching statements
|
||||
should always use the fully-qualified event name.
|
||||
|
||||
For example, this will likely not work as expected:
|
||||
|
||||
.. code:: bro
|
||||
|
||||
module MyModule;
|
||||
|
||||
export {
|
||||
global my_event: event();
|
||||
}
|
||||
|
||||
event my_event()
|
||||
{
|
||||
print "got my event";
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
event my_event();
|
||||
schedule 10sec { my_event() };
|
||||
}
|
||||
|
||||
This code runs without errors, however, the local ``my_event`` handler
|
||||
will never be called and also not any remote handlers either, even if
|
||||
:bro:see:`Broker::auto_publish` was used elsewhere for it. Instead, at
|
||||
minimum you would need change the ``bro_init()`` handler:
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
event MyModule::my_event();
|
||||
schedule 10sec { MyModule::my_event() };
|
||||
}
|
||||
|
||||
Though, an easy rule of thumb to remember would be to always use the
|
||||
explicit module namespace scoping and you can't go wrong:
|
||||
|
||||
.. code:: bro
|
||||
|
||||
module MyModule;
|
||||
|
||||
export {
|
||||
global MyModule::my_event: event();
|
||||
}
|
||||
|
||||
event MyModule::my_event()
|
||||
{
|
||||
print "got my event";
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
event MyModule::my_event();
|
||||
schedule 10sec { MyModule::my_event() };
|
||||
}
|
||||
|
||||
Note that other identifiers in Bro do not have this inconsistency
|
||||
related to module namespacing, it's just events that require
|
||||
explicitness.
|
||||
|
||||
Manager Sending Events To Workers
|
||||
---------------------------------
|
||||
|
||||
This is fairly straightforward, we just need a topic name which we know
|
||||
all workers are subscribed combined with the event we want to send them.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event manager_to_workers(s: string)
|
||||
{
|
||||
print "got event from manager", s;
|
||||
}
|
||||
|
||||
event some_event_handled_on_manager()
|
||||
{
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v0");
|
||||
|
||||
# If you know this event is only handled on the manager, you don't
|
||||
# need any of the following conditions, they're just here as an
|
||||
# example of how you can further discriminate based on node identity.
|
||||
|
||||
# Can check based on the name of the node.
|
||||
if ( Cluster::node == "manager" )
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v1");
|
||||
|
||||
# Can check based on the type of the node.
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v2");
|
||||
|
||||
# The run-time overhead of the above conditions can even be
|
||||
# eliminated by using the following conditional directives.
|
||||
# It's evaluated once per node at parse-time and, if false,
|
||||
# any code within is just ignored / treated as not existing at all.
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v3");
|
||||
@endif
|
||||
}
|
||||
|
||||
Worker Sending Events To Manager
|
||||
--------------------------------
|
||||
|
||||
This should look almost identical to the previous case of sending an event
|
||||
from the manager to workers, except it simply changes the topic name to
|
||||
one which the manager is subscribed.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event worker_to_manager(worker_name: string)
|
||||
{
|
||||
print "got event from worker", worker_name;
|
||||
}
|
||||
|
||||
event some_event_handled_on_worker()
|
||||
{
|
||||
Broker::publish(Cluster::manager_topic, worker_to_manager,
|
||||
Cluster::node);
|
||||
}
|
||||
|
||||
Worker Sending Events To All Workers
|
||||
------------------------------------
|
||||
|
||||
Since workers are not directly connected to each other in the cluster
|
||||
topology, this type of communication is a bit different than what we
|
||||
did before. Instead of using :bro:see:`Broker::publish` we use different
|
||||
"relay" calls to hop the message from a different node that *is* connected.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event worker_to_workers(worker_name: string)
|
||||
{
|
||||
print "got event from worker", worker_name;
|
||||
}
|
||||
|
||||
event some_event_handled_on_worker()
|
||||
{
|
||||
# We know the manager is connected to all workers, so we could
|
||||
# choose to relay the event across it. Note that sending the event
|
||||
# this way will not allow the manager to handle it, even if it
|
||||
# does have an event handler.
|
||||
Broker::relay(Cluster::manager_topic, Cluster::worker_topic,
|
||||
worker_to_workers, Cluster::node + " (via manager)");
|
||||
|
||||
# We also know that any given proxy is connected to all workers,
|
||||
# though now we have a choice of which proxy to use. If we
|
||||
# want to distribute the work associated with relaying uniformly,
|
||||
# we can use a round-robin strategy. The key used here is simply
|
||||
# used by the cluster framework internally to keep track of
|
||||
# which node is up next in the round-robin.
|
||||
Cluster::relay_rr(Cluster::proxy_pool, "example_key",
|
||||
Cluster::worker_topic, worker_to_workers,
|
||||
Cluster::node + " (via a proxy)");
|
||||
}
|
||||
|
||||
Worker Distributing Events Uniformly Across Proxies
|
||||
---------------------------------------------------
|
||||
|
||||
If you want to offload some data/work from a worker to your proxies,
|
||||
we can make use of a `Highest Random Weight (HRW) hashing
|
||||
<https://en.wikipedia.org/wiki/Rendezvous_hashing>`_ distribution strategy
|
||||
to uniformly map an arbitrary key space across all available proxies.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event worker_to_proxies(worker_name: string)
|
||||
{
|
||||
print "got event from worker", worker_name;
|
||||
}
|
||||
|
||||
global my_counter = 0;
|
||||
|
||||
event some_event_handled_on_worker()
|
||||
{
|
||||
# The key here is used to choose which proxy shall receive
|
||||
# the event. Different keys may map to different nodes, but
|
||||
# any given key always maps to the same node provided the
|
||||
# pool of nodes remains consistent. If a proxy goes offline,
|
||||
# that key maps to a different node until the original comes
|
||||
# back up.
|
||||
Cluster::publish_hrw(Cluster::proxy_pool,
|
||||
cat("example_key", ++my_counter),
|
||||
worker_to_proxies, Cluster::node);
|
||||
}
|
||||
|
|
BIN
doc/frameworks/broker/cluster-layout.png
Normal file
BIN
doc/frameworks/broker/cluster-layout.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 55 KiB |
2
doc/frameworks/broker/cluster-layout.xml
Normal file
2
doc/frameworks/broker/cluster-layout.xml
Normal file
|
@ -0,0 +1,2 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<mxfile userAgent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36" version="9.0.3-1" editor="www.draw.io" type="device"><diagram name="Page-1" id="42789a77-a242-8287-6e28-9cd8cfd52e62">7VxLc6M4EP41Po4LSUjAcZJJZg+7VVOVrd3ZowwKZgYjFyaxvb9+hZEwEhA/eITs2JdYrZYE0ve1ultyZuh+tfua0vXyDx6weAatYDdDX2ZQfDxL/Mkl+0Li2G4hCNMoKETgKHiK/mVSKNuFL1HANppixnmcRWtd6PMkYX6myWia8q2u9sxjfdQ1DVlN8OTTuC79OwqypZQCyzpW/MaicCmHdrGsWFD/Z5jyl0SON4Po+fApqldU9SX1N0sa8G1FhB5m6D7lPCu+rXb3LM7nVk1b0e6xpbZ87pQl2VkNGEXAcTwrgD71XPQJoKKLVxq/MPUOhyfN9mp2WCAmSxYTnog/d4dXZnmnQJSW2SqWX2O6YPFdOSv3PObpsdkmo2n2OV8wQ/YYxXkPlipLiGBRZkmgWvgx3Wwi/89llBQVshkoSpVGP1iW7WWZvmRciHiaLXnIExr/zvlattpkKf/J1FOK1fOIgz6TskahIdd95kn2SFdRnIP8L5YGNKFSLEcCUJYrHVqHj5An/KEyj4dH3kXZ9/yt51iW/pFzcHxWqVpfZ7n0G/6S+qxtcSWVaBqyrEXHK3TyNa50LNHzlfEVy9K9UEhZTLPoVScMlbwLSz3ZVKwY3VcU1jxKsk2l52+5QCgoE6KYJg0IcqCOY1MfQTFp1Ra2bVdbiC/FM6hS5WWOogM7zmWKfWPKL80UNAmmAHIZUwB0RmdKjSgrAYCQpTNIYjG7d4v8W5iVq1VlUByLTT9H+3YZZexpTQ9rthV+h06fI68OVFD7al7l81Xky4pTLGsANW0BtWBQRZOBADOnO9hpHIVJzliBVzFDbwD4laUZ270JPVWL9BVHyrpuj86NctmWFbdGQasJrBW4XIYG2GA2Cxhs1jRRQFinfLf/BJsQUkjEuFX9qQEncLyFZZ0DnOdnRnx/msCBUAcOwHXgOLgOHDwEcJo80zpwYh4Ky5LbnI+BE7Ig+CwDI4IIOFWcePoeZJN3hIlXg4mERIN7dlv7HjYXD7/b4t+CVQ1QXxzvrm3T6Qac0uGuuNvFg4sV+14tKEe87rT35JrDM1xzMIhrXvOlbYLmnmMjDCxgI9dBjsYED84RJB7GmCAHQ7334h1lh29Fth6YE9GLhRwxAlbOerkj2/Y8790Rr01sYBFjmGKaasO0Rxka9S5z/JsC4jbPDtw8u3e12kbUOKZjh29Ge1yjrTImNaNtDWW07enYaAKN/Agi1xlirLwN1RExOhrT1JIbh0biUMmVObIcjS9zizRSpjk52UimQ2ffWBqJpc8t+2Eqe2PYMKn8GjGQbTAM4OsYZqO+GdaSArWMiAVoh2SdE6DOjZwjRyU1plVoS1yDtfC8je56bl4VsgxzmlAnK3J1jlnOdWRFjt4RgEZHPZEV4mHJqixphZx63FBhYnu0ezU57lxs42ZyNMXcBdL7ctO8Oi7tcWBonu/a5lFDCwwvNvVQD5e9nsFDLrPsJp5OOeij4GxANE30dgFCRvRrGbdkTuirMHgo/d5tnNsNpsfNFI9q+Grb+phQhSNZQqLvo90tYYewEoFuQGmwZxXM1C3avJNjORGbNo17IMjM6J2yaeRCG9VR37BpdR4YUSS2+7WB9WPBpuT0lqc/i6PCD5qdXrRzwsxOuz6bana6TBKPcYAMG5BxC9ff4xDRmtumfzFooH5OEu0WlzeP4wwblt/uoU/nlGhOiJn5nmYObaSEN1Fucpnwdq/jKnb1jgA09rO+cmjuwDk0XOPmFTHCIKHk4EedgJwBzJFiSdfMqoGB8KTOUAfKqjmXmfoWhNXzpu8UabaA8PBI/WFsJOMHjPN0bOYrumLsVPx6Kv48s/5UPFp7z57jURWQdgX5W0dfo2TrhjSkw5xGDJM6s/pZT2M1p2WyejVYI0VW4NRRU0+b4qVnChem0zqp9x6dNd0/as2mfdy7nv+PbNqYv8ZoSrP+wmH7G1Z4oGTamCcfI13hhMZ9rauvcNrmTTUT8n1dMDN+EmPsBd19x/ovam8sGzU5dpELNUc5UrT8WemZX5ccO8e9Gomc5W1P5Wp4BqfOJWf5EwTl4pgd9UVO0is5RfH471oK9eP/xEEP/wE=</diagram></mxfile>
|
|
@ -1,18 +1,12 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
Broker::peer("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
print "peer added", endpoint;
|
||||
terminate();
|
||||
}
|
||||
|
|
|
@ -1,20 +1,17 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
print "peer added", endpoint;
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_broken(peer_name: string)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_broken", peer_name;
|
||||
print "peer lost", endpoint;
|
||||
terminate();
|
||||
}
|
||||
|
|
|
@ -1,31 +1,35 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
global my_event: event(msg: string, c: count);
|
||||
global my_auto_event: event(msg: string, c: count);
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
Broker::auto_event("bro/event/my_auto_event", my_auto_event);
|
||||
Broker::peer("127.0.0.1");
|
||||
Broker::auto_publish("bro/event/my_auto_event", my_auto_event);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "hi", 0));
|
||||
print "peer added", endpoint;
|
||||
Broker::publish("bro/event/my_event", my_event, "hi", 0);
|
||||
event my_auto_event("stuff", 88);
|
||||
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "...", 1));
|
||||
Broker::publish("bro/event/my_event", my_event, "...", 1);
|
||||
event my_auto_event("more stuff", 51);
|
||||
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "bye", 2));
|
||||
local e = Broker::make_event(my_event, "bye", 2);
|
||||
Broker::publish("bro/event/my_event", e);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
terminate();
|
||||
}
|
||||
|
||||
event my_event(msg: string, c: count)
|
||||
{
|
||||
print "got my_event", msg, c;
|
||||
}
|
||||
|
||||
event my_auto_event(msg: string, c: count)
|
||||
{
|
||||
print "got my_auto_event", msg, c;
|
||||
}
|
||||
|
|
|
@ -1,20 +1,17 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
global msg_count = 0;
|
||||
global my_event: event(msg: string, c: count);
|
||||
global my_auto_event: event(msg: string, c: count);
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_events("bro/event/");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::subscribe("bro/event/");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
print "peer added", endpoint;
|
||||
}
|
||||
|
||||
event my_event(msg: string, c: count)
|
||||
|
|
|
@ -1,17 +1,11 @@
|
|||
@load ./testlog
|
||||
|
||||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
redef Log::enable_local_logging = F;
|
||||
redef Log::enable_remote_logging = F;
|
||||
global n = 0;
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::enable_remote_logs(Test::LOG);
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
Broker::peer("127.0.0.1");
|
||||
}
|
||||
|
||||
event do_write()
|
||||
|
@ -24,17 +18,19 @@ event do_write()
|
|||
event do_write();
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
print "peer added", endpoint;
|
||||
event do_write();
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
terminate();
|
||||
}
|
||||
|
||||
event Test::log_test(rec: Test::Info)
|
||||
{
|
||||
print "wrote log", rec;
|
||||
Broker::publish("bro/logs/forward/test", Test::log_test, rec);
|
||||
}
|
||||
|
|
|
@ -1,24 +1,21 @@
|
|||
@load ./testlog
|
||||
|
||||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_logs("bro/log/Test::LOG");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::subscribe("bro/logs");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
print "peer added", endpoint;
|
||||
}
|
||||
|
||||
event Test::log_test(rec: Test::Info)
|
||||
{
|
||||
print "wrote log", rec;
|
||||
print "got log event", rec;
|
||||
|
||||
if ( rec$num == 5 )
|
||||
terminate();
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
Broker::send_print("bro/print/hi", "hello");
|
||||
Broker::send_print("bro/print/stuff", "...");
|
||||
Broker::send_print("bro/print/bye", "goodbye");
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
{
|
||||
terminate();
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
global msg_count = 0;
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_prints("bro/print/");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
}
|
||||
|
||||
event Broker::print_handler(msg: string)
|
||||
{
|
||||
++msg_count;
|
||||
print "got print message", msg;
|
||||
|
||||
if ( msg_count == 3 )
|
||||
terminate();
|
||||
}
|
|
@ -1,53 +1,29 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
|
||||
global h: opaque of Broker::Handle;
|
||||
|
||||
function dv(d: Broker::Data): Broker::DataVector
|
||||
{
|
||||
local rval: Broker::DataVector;
|
||||
rval[0] = d;
|
||||
return rval;
|
||||
}
|
||||
global h: opaque of Broker::Store;
|
||||
|
||||
global ready: event();
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
terminate();
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
{
|
||||
local myset: set[string] = {"a", "b", "c"};
|
||||
local myvec: vector of string = {"alpha", "beta", "gamma"};
|
||||
h = Broker::create_master("mystore");
|
||||
Broker::insert(h, Broker::data("one"), Broker::data(110));
|
||||
Broker::insert(h, Broker::data("two"), Broker::data(223));
|
||||
Broker::insert(h, Broker::data("myset"), Broker::data(myset));
|
||||
Broker::insert(h, Broker::data("myvec"), Broker::data(myvec));
|
||||
Broker::increment(h, Broker::data("one"));
|
||||
Broker::decrement(h, Broker::data("two"));
|
||||
Broker::add_to_set(h, Broker::data("myset"), Broker::data("d"));
|
||||
Broker::remove_from_set(h, Broker::data("myset"), Broker::data("b"));
|
||||
Broker::push_left(h, Broker::data("myvec"), dv(Broker::data("delta")));
|
||||
Broker::push_right(h, Broker::data("myvec"), dv(Broker::data("omega")));
|
||||
|
||||
when ( local res = Broker::size(h) )
|
||||
{
|
||||
print "master size", res;
|
||||
event ready();
|
||||
}
|
||||
timeout 10sec
|
||||
{ print "timeout"; }
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1secs);
|
||||
Broker::auto_event("bro/event/ready", ready);
|
||||
h = Broker::create_master("mystore");
|
||||
|
||||
local myset: set[string] = {"a", "b", "c"};
|
||||
local myvec: vector of string = {"alpha", "beta", "gamma"};
|
||||
Broker::put(h, "one", 110);
|
||||
Broker::put(h, "two", 223);
|
||||
Broker::put(h, "myset", myset);
|
||||
Broker::put(h, "myvec", myvec);
|
||||
Broker::increment(h, "one");
|
||||
Broker::decrement(h, "two");
|
||||
Broker::insert_into_set(h, "myset", "d");
|
||||
Broker::remove_from(h, "myset", "b");
|
||||
Broker::push(h, "myvec", "delta");
|
||||
|
||||
Broker::peer("127.0.0.1");
|
||||
}
|
||||
|
|
|
@ -1,43 +1,79 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
|
||||
global h: opaque of Broker::Handle;
|
||||
global h: opaque of Broker::Store;
|
||||
global expected_key_count = 4;
|
||||
global key_count = 0;
|
||||
|
||||
# Lookup a value in the store based on an arbitrary key string.
|
||||
function do_lookup(key: string)
|
||||
{
|
||||
when ( local res = Broker::lookup(h, Broker::data(key)) )
|
||||
when ( local res = Broker::get(h, key) )
|
||||
{
|
||||
++key_count;
|
||||
print "lookup", key, res;
|
||||
|
||||
if ( key_count == expected_key_count )
|
||||
# End after we iterated over looking up each key in the store twice.
|
||||
if ( key_count == expected_key_count * 2 )
|
||||
terminate();
|
||||
}
|
||||
timeout 10sec
|
||||
# All data store queries must specify a timeout
|
||||
timeout 3sec
|
||||
{ print "timeout", key; }
|
||||
}
|
||||
|
||||
event ready()
|
||||
event check_keys()
|
||||
{
|
||||
h = Broker::create_clone("mystore");
|
||||
|
||||
# Here we just query for the list of keys in the store, and show how to
|
||||
# look up each one's value.
|
||||
when ( local res = Broker::keys(h) )
|
||||
{
|
||||
print "clone keys", res;
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 0)));
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 1)));
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 2)));
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 3)));
|
||||
|
||||
if ( res?$result )
|
||||
{
|
||||
# Since we know that the keys we are storing are all strings,
|
||||
# we can conveniently cast the result of Broker::keys to
|
||||
# a native Bro type, namely 'set[string]'.
|
||||
for ( k in res$result as string_set )
|
||||
do_lookup(k);
|
||||
|
||||
# Alternatively, we can use a generic iterator to iterate
|
||||
# over the results (which we know is of the 'set' type because
|
||||
# that's what Broker::keys() always returns). If the keys
|
||||
# we stored were not all of the same type, then you would
|
||||
# likely want to use this method of inspecting the store's keys.
|
||||
local i = Broker::set_iterator(res$result);
|
||||
|
||||
while ( ! Broker::set_iterator_last(i) )
|
||||
{
|
||||
do_lookup(Broker::set_iterator_value(i) as string);
|
||||
Broker::set_iterator_next(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
timeout 10sec
|
||||
{ print "timeout"; }
|
||||
# All data store queries must specify a timeout.
|
||||
# You also might see timeouts on connecting/initializing a clone since
|
||||
# it hasn't had time to get fully set up yet.
|
||||
timeout 1sec
|
||||
{
|
||||
print "timeout";
|
||||
schedule 1sec { check_keys() };
|
||||
}
|
||||
}
|
||||
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "peer added";
|
||||
# We could create a clone early, like in bro_init and it will periodically
|
||||
# try to synchronize with its master once it connects, however, we just
|
||||
# create it now since we know the peer w/ the master store has just
|
||||
# connected.
|
||||
h = Broker::create_clone("mystore");
|
||||
|
||||
event check_keys();
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_events("bro/event/ready");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
|
|
@ -13,6 +13,5 @@ export {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Broker::enable();
|
||||
Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]);
|
||||
}
|
||||
|
|
150
doc/frameworks/configuration.rst
Normal file
150
doc/frameworks/configuration.rst
Normal file
|
@ -0,0 +1,150 @@
|
|||
|
||||
.. _framework-configuration:
|
||||
|
||||
=======================
|
||||
Configuration Framework
|
||||
=======================
|
||||
|
||||
.. rst-class:: opening
|
||||
|
||||
Bro includes a "configuration framework" that allows
|
||||
updating script options dynamically at runtime. This functionality
|
||||
consists of several components: an "option" declaration, the
|
||||
ability to specify input files to enable changing the value of options at
|
||||
runtime, a couple of built-in functions, and a log file "config.log"
|
||||
which contains information about every change to option values.
|
||||
|
||||
|
||||
.. contents::
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The configuration framework provides an alternative to using Bro
|
||||
script constants to store various Bro settings.
|
||||
|
||||
In general, traditional constants can be used when a value is not
|
||||
expected to change at runtime, but they cannot be used for values that
|
||||
need to be modified occasionally. While a "redef" allows a
|
||||
re-definition of an already defined constant in Bro, these
|
||||
redefinitions can only be performed when Bro first starts. Afterwards,
|
||||
constants can no longer be modified.
|
||||
|
||||
However, it is clearly desirable to be able to change at runtime many
|
||||
of the configuration options that Bro offers. Having to restart Bro
|
||||
can be time-consuming and causes Bro to lose all connection state and
|
||||
knowledge that it accumulated. Bro's configuration framework solves
|
||||
this problem by allowing changing configuration options at runtime.
|
||||
|
||||
Declaring options
|
||||
-----------------
|
||||
|
||||
The "option" keyword allows variables to be declared as configuration options.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
module TestModule;
|
||||
|
||||
export {
|
||||
option my_networks: set[subnet] = {};
|
||||
option enable_feature = F;
|
||||
option hostname = "testsystem";
|
||||
}
|
||||
|
||||
The rules regarding options can be thought of as being in between global
|
||||
variables and constants. Like global variables, options cannot be declared
|
||||
inside a function, hook, or event handler. Like constants, options must be
|
||||
initialized when declared. The value of an option can change at runtime,
|
||||
but options cannot be assigned a new value using normal assignments.
|
||||
|
||||
|
||||
Changing options
|
||||
----------------
|
||||
|
||||
The configuration framework facilitates reading in new option values
|
||||
from external files at runtime.
|
||||
|
||||
Configuration files contain a mapping between option names and their values.
|
||||
The format for these files looks like this:
|
||||
|
||||
[option name][tab/spaces][new value]
|
||||
|
||||
Configuration files can be specified by adding them to Config::config_files.
|
||||
For example, simply add something like this to local.bro:
|
||||
|
||||
.. code:: bro
|
||||
|
||||
redef Config::config_files += { "/path/to/config.dat" };
|
||||
|
||||
The specified configuration file will then be monitored continuously for changes,
|
||||
so that writing ``TestModule::enable_feature T`` into that file will
|
||||
automatically update the option's value accordingly. Here is an example
|
||||
configuration file::
|
||||
|
||||
TestModule::my_networks 10.0.12.0/24,192.168.17.0/24
|
||||
TestModule::enable_feature T
|
||||
TestModule::hostname host-1
|
||||
|
||||
Internally, the configuration framework uses the Bro input framework
|
||||
with a type of input reader specifically for reading config files. Users
|
||||
familiar with the Bro input framework might be aware that the input framework
|
||||
is usually very strict about the syntax of input files. This is not true
|
||||
for configuration files: the files need no header lines and either
|
||||
tabs or spaces are accepted as separators.
|
||||
|
||||
If you inspect the configuration framework scripts, you will notice that the
|
||||
scripts simply catch events from the input framework and then a built-in
|
||||
function :bro:see:`Option::set` is called to set an option to the new value.
|
||||
If you want to change an option yourself during runtime, you can
|
||||
call Option::set directly from a script.
|
||||
|
||||
The log file "config.log" contains information about each configuration
|
||||
change that occurs during runtime.
|
||||
|
||||
|
||||
Change handlers
|
||||
---------------
|
||||
|
||||
A change handler is a user-defined function that is called automatically
|
||||
each time an option value changes. This example shows how to register a
|
||||
change handler for an option that has a data type of "addr" (for other
|
||||
data types, the return type and 2nd parameter data type must be adjusted
|
||||
accordingly):
|
||||
|
||||
.. code:: bro
|
||||
|
||||
option testaddr = 127.0.0.1;
|
||||
|
||||
# Note: the data type of 2nd parameter and return type must match
|
||||
function change_addr(ID: string, new_value: addr): addr
|
||||
{
|
||||
print fmt("Value of %s changed from %s to %s", ID, testaddr, new_value);
|
||||
return new_value;
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Option::set_change_handler("testaddr", change_addr);
|
||||
}
|
||||
|
||||
Each time the specified option value is changed, the change handler
|
||||
function will be called before the change is performed. The value returned
|
||||
by the change handler is the value finally assigned to the option. This
|
||||
allows, for example, checking of values to reject invalid input (the original
|
||||
value can be returned to reject the change).
|
||||
|
||||
A change handler can optionally have a third argument, which is the location
|
||||
string (this is normally the pathname of the configuration file that triggered
|
||||
the change).
|
||||
|
||||
It is also possible to chain together multiple change handlers. In this
|
||||
case, the value returned by the first change handler is the "new value" seen
|
||||
by the next change handler, and so on. The built-in function
|
||||
:bro:see:`Option::set_change_handler` takes an optional third argument
|
||||
that can specify a priority for the handlers.
|
||||
|
||||
Note that change handlers are also used internally by the
|
||||
configuration framework. If you look at the script level source code of
|
||||
the config framework, you can see that change handlers are used for
|
||||
logging the option changes to config.log.
|
|
@ -8,11 +8,13 @@ GeoLocation
|
|||
.. rst-class:: opening
|
||||
|
||||
During the process of creating policy scripts the need may arise
|
||||
to find the geographic location for an IP address. Bro has support
|
||||
to find the geographic location for an IP address. Bro had support
|
||||
for the `GeoIP library <http://www.maxmind.com/app/c>`__ at the
|
||||
policy script level beginning with release 1.3 to account for this
|
||||
need. To use this functionality, you need to first install the libGeoIP
|
||||
software, and then install the GeoLite city database before building
|
||||
policy script level from release 1.3 to 2.5.X to account for this
|
||||
need. Starting with release 2.6 GeoIP support requires `libmaxminddb
|
||||
<https://github.com/maxmind/libmaxminddb/releases>`__.
|
||||
To use this functionality, you need to first install the libmaxminddb
|
||||
software, and then install the GeoLite2 city database before building
|
||||
Bro.
|
||||
|
||||
.. contents::
|
||||
|
@ -20,85 +22,91 @@ GeoLocation
|
|||
Install libGeoIP
|
||||
----------------
|
||||
|
||||
Before building Bro, you need to install libGeoIP.
|
||||
Before building Bro, you need to install libmaxminddb.
|
||||
|
||||
* FreeBSD:
|
||||
|
||||
.. console::
|
||||
|
||||
sudo pkg install GeoIP
|
||||
sudo pkg install libmaxminddb
|
||||
|
||||
* RPM/RedHat-based Linux:
|
||||
|
||||
.. console::
|
||||
|
||||
sudo yum install GeoIP-devel
|
||||
sudo yum install libmaxminddb-devel
|
||||
|
||||
* DEB/Debian-based Linux:
|
||||
|
||||
.. console::
|
||||
|
||||
sudo apt-get install libgeoip-dev
|
||||
sudo apt-get install libmaxminddb-dev
|
||||
|
||||
* Mac OS X:
|
||||
|
||||
You need to install from your preferred package management system
|
||||
(e.g. MacPorts, Fink, or Homebrew). The name of the package that you need
|
||||
may be libgeoip, geoip, or geoip-dev, depending on which package management
|
||||
system you are using.
|
||||
may be libmaxminddb, maxminddb, or libmaxminddb-dev, depending on which
|
||||
package management system you are using.
|
||||
|
||||
|
||||
GeoIPLite Database Installation
|
||||
-------------------------------
|
||||
GeoLite2-City Database Installation
|
||||
-----------------------------------
|
||||
|
||||
A country database for GeoIPLite is included when you do the C API
|
||||
install, but for Bro, we are using the city database which includes
|
||||
cities and regions in addition to countries.
|
||||
Bro can use the city or country database. The city database includes cities
|
||||
and regions in addition to countries.
|
||||
|
||||
`Download <http://www.maxmind.com/app/geolitecity>`__ the GeoLite city
|
||||
binary database:
|
||||
`Download <http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz>`__
|
||||
the GeoLite2 city binary database:
|
||||
|
||||
.. console::
|
||||
|
||||
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
|
||||
gunzip GeoLiteCity.dat.gz
|
||||
wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz
|
||||
tar zxf GeoLite2-City.tar.gz
|
||||
|
||||
Next, the file needs to be renamed and put in the GeoIP database directory.
|
||||
This directory should already exist and will vary depending on which platform
|
||||
and package you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For
|
||||
Linux, use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
|
||||
already exists).
|
||||
Next, the file "GeoLite2-City_YYYYMMDD/GeoLite2-City.mmdb" needs to be renamed
|
||||
and put in the GeoIP database directory. This directory should already exist
|
||||
and will vary depending on which platform and package you are using. For
|
||||
FreeBSD, use ``/usr/local/share/GeoIP``. For Linux, use ``/usr/share/GeoIP``
|
||||
or ``/var/lib/GeoIP`` (choose whichever one already exists).
|
||||
|
||||
.. console::
|
||||
|
||||
mv GeoLiteCity.dat <path_to_database_dir>/GeoIPCity.dat
|
||||
|
||||
Note that there is a separate database for IPv6 addresses, which can also
|
||||
be installed if you want GeoIP functionality for IPv6.
|
||||
mv <extracted subdir>/GeoLite2-City.mmdb <path_to_database_dir>/GeoLite2-City.mmdb
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
Before using the GeoIP functionality, it is a good idea to verify that
|
||||
everything is setup correctly. After installing libGeoIP and the GeoIP city
|
||||
database, and building Bro, you can quickly check if the GeoIP functionality
|
||||
works by running a command like this:
|
||||
everything is setup correctly. After installing libmaxminddb and the GeoIP
|
||||
city database, and building Bro, you can quickly check if the GeoIP
|
||||
functionality works by running a command like this:
|
||||
|
||||
.. console::
|
||||
|
||||
bro -e "print lookup_location(8.8.8.8);"
|
||||
|
||||
If you see an error message similar to "Failed to open GeoIP City database",
|
||||
then you may need to either rename or move your GeoIP city database file (the
|
||||
error message should give you the full pathname of the database file that
|
||||
Bro is looking for).
|
||||
If you see an error message similar to "Failed to open GeoIP location
|
||||
database", then you may need to either rename or move your GeoIP
|
||||
location database file. Bro looks for location database files in the
|
||||
following order by default:
|
||||
|
||||
/usr/share/GeoIP/GeoLite2-City.mmdb
|
||||
/var/lib/GeoIP/GeoLite2-City.mmdb
|
||||
/usr/local/share/GeoIP/GeoLite2-City.mmdb
|
||||
/usr/local/var/GeoIP/GeoLite2-City.mmdb
|
||||
/usr/share/GeoIP/GeoLite2-Country.mmdb
|
||||
/var/lib/GeoIP/GeoLite2-Country.mmdb
|
||||
/usr/local/share/GeoIP/GeoLite2-Country.mmdb
|
||||
/usr/local/var/GeoIP/GeoLite2-Country.mmdb
|
||||
|
||||
If you see an error message similar to "Bro was not configured for GeoIP
|
||||
support", then you need to rebuild Bro and make sure it is linked against
|
||||
libGeoIP. Normally, if libGeoIP is installed correctly then it should
|
||||
automatically be found when building Bro. If this doesn't happen, then
|
||||
you may need to specify the path to the libGeoIP installation
|
||||
(e.g. ``./configure --with-geoip=<path>``).
|
||||
support", then you either need to rebuild Bro and make sure it is linked
|
||||
against libmaxminddb or else set the :bro:see:`mmdb_dir` value
|
||||
correctly. Normally, if libmaxminddb is installed correctly then it
|
||||
should automatically be found when building Bro. If this doesn't
|
||||
happen, then you may need to specify the path to the libmaxminddb
|
||||
installation (e.g. ``./configure --with-geoip=<path>``).
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
|
|
@ -6,6 +6,7 @@ Frameworks
|
|||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
configuration
|
||||
file-analysis
|
||||
geoip
|
||||
input
|
||||
|
|
|
@ -532,10 +532,5 @@ Bro supports the following additional built-in output formats:
|
|||
|
||||
logging-input-sqlite
|
||||
|
||||
Additional writers are available as external plugins:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
../components/bro-plugins/README
|
||||
|
||||
Additional writers are available as external plugins through the `Bro
|
||||
Package Manager <https://github.com/bro/bro-plugins>`_.
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../../aux/broccoli/bindings/broccoli-python/CHANGES
|
|
@ -1 +0,0 @@
|
|||
../../aux/broccoli/bindings/broccoli-ruby/CHANGES
|
|
@ -1 +0,0 @@
|
|||
../../aux/broccoli/CHANGES
|
1
doc/install/CHANGES-broker.txt
Symbolic link
1
doc/install/CHANGES-broker.txt
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../aux/broker/CHANGES
|
|
@ -17,23 +17,11 @@ BroControl
|
|||
|
||||
.. literalinclude:: CHANGES-broctl.txt
|
||||
|
||||
--------
|
||||
Broccoli
|
||||
--------
|
||||
------
|
||||
Broker
|
||||
------
|
||||
|
||||
.. literalinclude:: CHANGES-broccoli.txt
|
||||
|
||||
---------------
|
||||
Broccoli Python
|
||||
---------------
|
||||
|
||||
.. literalinclude:: CHANGES-broccoli-python.txt
|
||||
|
||||
-------------
|
||||
Broccoli Ruby
|
||||
-------------
|
||||
|
||||
.. literalinclude:: CHANGES-broccoli-ruby.txt
|
||||
.. literalinclude:: CHANGES-broker.txt
|
||||
|
||||
--------
|
||||
Capstats
|
||||
|
|
85
doc/install/cross-compiling.rst
Normal file
85
doc/install/cross-compiling.rst
Normal file
|
@ -0,0 +1,85 @@
|
|||
.. _crosstool-NG: https://crosstool-ng.github.io/
|
||||
.. _CMake toolchain: https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html
|
||||
|
||||
===================
|
||||
Cross Compiling Bro
|
||||
===================
|
||||
|
||||
.. contents::
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
You need three things on the host system:
|
||||
|
||||
1. The Bro source tree.
|
||||
2. A cross-compilation toolchain, such as one built via crosstool-NG_.
|
||||
3. Pre-built Bro dependencies from the target system. This usually
|
||||
includes libpcap, zlib, OpenSSL, and Python development headers
|
||||
and libraries.
|
||||
|
||||
Configuration and Compiling
|
||||
===========================
|
||||
|
||||
You first need to compile a few build tools native to the host system
|
||||
for use during the later cross-compile build. In the root of your
|
||||
Bro source tree:
|
||||
|
||||
.. console::
|
||||
|
||||
./configure --builddir=../bro-buildtools
|
||||
( cd ../bro-buildtools && make binpac bifcl )
|
||||
|
||||
Next configure Bro to use your cross-compilation toolchain:
|
||||
|
||||
.. console::
|
||||
|
||||
./configure --toolchain=/home/jon/x-tools/RaspberryPi-toolchain.cmake --with-binpac=$(pwd)/../bro-buildtools/aux/binpac/src/binpac --with-bifcl=$(pwd)/../bro-buildtools/src/bifcl
|
||||
|
||||
Here, the toolchain file a `CMake toolchain`_ file. It might look
|
||||
something the following (using a Raspberry Pi as target system)::
|
||||
|
||||
# Operating System on which CMake is targeting.
|
||||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
|
||||
# The CMAKE_STAGING_PREFIX option may not work.
|
||||
# Given that Bro is configured:
|
||||
#
|
||||
# `./configure --prefix=<dir>`
|
||||
#
|
||||
# The options are:
|
||||
#
|
||||
# (1) `make install` and then copy over the --prefix dir from host to
|
||||
# target system.
|
||||
#
|
||||
# (2) `DESTDIR=<staging_dir> make install` and then copy over the
|
||||
# contents of that staging directory.
|
||||
|
||||
set(toolchain /home/jon/x-tools/arm-rpi-linux-gnueabihf)
|
||||
set(CMAKE_C_COMPILER ${toolchain}/bin/arm-rpi-linux-gnueabihf-gcc)
|
||||
set(CMAKE_CXX_COMPILER ${toolchain}/bin/arm-rpi-linux-gnueabihf-g++)
|
||||
|
||||
# The cross-compiler/linker will use these paths to locate dependencies.
|
||||
set(CMAKE_FIND_ROOT_PATH
|
||||
/home/jon/x-tools/bro-rpi-deps
|
||||
${toolchain}/arm-rpi-linux-gnueabihf/sysroot
|
||||
)
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
|
||||
If that configuration succeeds you are ready to build:
|
||||
|
||||
.. console::
|
||||
|
||||
make
|
||||
|
||||
And if that works, install on your host system:
|
||||
|
||||
.. console::
|
||||
|
||||
make install
|
||||
|
||||
From there, you can copy/move the files from the installation prefix
|
||||
on the host system to the target system and start running Bro as usual.
|
|
@ -10,3 +10,4 @@ Installation
|
|||
|
||||
install
|
||||
upgrade
|
||||
cross-compiling
|
||||
|
|
|
@ -35,16 +35,16 @@ before you begin:
|
|||
|
||||
To build Bro from source, the following additional dependencies are required:
|
||||
|
||||
* CMake 2.8 or greater (http://www.cmake.org)
|
||||
* CMake 2.8.12 or greater (http://www.cmake.org)
|
||||
* Make
|
||||
* C/C++ compiler with C++11 support (GCC 4.8+ or Clang 3.3+)
|
||||
* SWIG (http://www.swig.org)
|
||||
* Bison (GNU Parser Generator)
|
||||
* Flex (Fast Lexical Analyzer)
|
||||
* Bison 2.5 or greater (https://www.gnu.org/software/bison/)
|
||||
* Flex (lexical analyzer generator) (https://github.com/westes/flex)
|
||||
* Libpcap headers (http://www.tcpdump.org)
|
||||
* OpenSSL headers (http://www.openssl.org)
|
||||
* zlib headers
|
||||
* Python
|
||||
* zlib headers (https://zlib.net/)
|
||||
* Python (https://www.python.org/)
|
||||
|
||||
To install the required dependencies, you can use:
|
||||
|
||||
|
@ -67,7 +67,7 @@ To install the required dependencies, you can use:
|
|||
|
||||
.. console::
|
||||
|
||||
sudo pkg install bash cmake swig bison python py27-sqlite3
|
||||
sudo pkg install bash cmake swig30 bison python py27-sqlite3 py27-ipaddress
|
||||
|
||||
For older versions of FreeBSD (especially FreeBSD 9.x), the system compiler
|
||||
is not new enough to compile Bro. For these systems, you will have to install
|
||||
|
@ -95,14 +95,17 @@ To install the required dependencies, you can use:
|
|||
clicking "Install").
|
||||
|
||||
OS X comes with all required dependencies except for CMake_, SWIG_,
|
||||
and OpenSSL (OpenSSL headers were removed in OS X 10.11, therefore OpenSSL
|
||||
must be installed manually for OS X versions 10.11 or newer).
|
||||
Distributions of these dependencies can
|
||||
likely be obtained from your preferred Mac OS X package management
|
||||
system (e.g. Homebrew_, MacPorts_, or Fink_). Specifically for
|
||||
Homebrew, the ``cmake``, ``swig``, and ``openssl`` packages
|
||||
provide the required dependencies. For MacPorts, the ``cmake``, ``swig``,
|
||||
``swig-python``, and ``openssl`` packages provide the required dependencies.
|
||||
Bison, and OpenSSL (OpenSSL headers were removed in OS X 10.11,
|
||||
therefore OpenSSL must be installed manually for OS X versions 10.11
|
||||
or newer).
|
||||
|
||||
Distributions of these dependencies can likely be obtained from your
|
||||
preferred Mac OS X package management system (e.g. Homebrew_,
|
||||
MacPorts_, or Fink_). Specifically for Homebrew, the ``cmake``,
|
||||
``swig``, ``openssl``, and ``bison`` packages
|
||||
provide the required dependencies. For MacPorts, the ``cmake``,
|
||||
``swig``, ``swig-python``, ``openssl``, and ``bison`` packages provide
|
||||
the required dependencies.
|
||||
|
||||
|
||||
Optional Dependencies
|
||||
|
@ -111,7 +114,6 @@ Optional Dependencies
|
|||
Bro can make use of some optional libraries and tools if they are found at
|
||||
build time:
|
||||
|
||||
* C++ Actor Framework (CAF) version 0.14 (http://actor-framework.org)
|
||||
* LibGeoIP (for geolocating IP addresses)
|
||||
* sendmail (enables Bro and BroControl to send mail)
|
||||
* curl (used by a Bro script that implements active HTTP)
|
||||
|
@ -208,6 +210,13 @@ all of the documentation for the latest Bro release is available on the
|
|||
Bro web site), there are instructions in ``doc/README`` in the source
|
||||
distribution.
|
||||
|
||||
Cross Compiling
|
||||
---------------
|
||||
|
||||
See :doc:`cross-compiling` for an example of how
|
||||
to cross compile Bro for a different target platform than the one on
|
||||
which you build.
|
||||
|
||||
Configure the Run-Time Environment
|
||||
==================================
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ History
|
|||
|
||||
Bro's history goes back much further than many people realize. `Vern
|
||||
Paxson <http://www.icir.org/vern>`_ designed and implemented the
|
||||
initial version almost two decades ago.
|
||||
initial version more than two decades ago.
|
||||
Vern began work on the code in 1995 as a researcher at the `Lawrence
|
||||
Berkeley National Laboratory (LBNL) <http://www.lbl.gov>`_. Berkeley
|
||||
Lab began operational deployment in 1996, and the USENIX Security
|
||||
|
|
|
@ -316,9 +316,8 @@ Analyzing live traffic from an interface is simple:
|
|||
|
||||
bro -i en0 <list of scripts to load>
|
||||
|
||||
``en0`` can be replaced by the interface of your choice and for the list of
|
||||
scripts, you can just use "all" for now to perform all the default analysis
|
||||
that's available.
|
||||
``en0`` can be replaced by the interface of your choice. A selection
|
||||
of common base scripts will be loaded by default.
|
||||
|
||||
Bro will output log files into the working directory.
|
||||
|
||||
|
@ -326,22 +325,6 @@ Bro will output log files into the working directory.
|
|||
capturing as an unprivileged user and checksum offloading are
|
||||
particularly relevant at this point.
|
||||
|
||||
To use the site-specific ``local.bro`` script, just add it to the
|
||||
command-line:
|
||||
|
||||
.. console::
|
||||
|
||||
bro -i en0 local
|
||||
|
||||
This will cause Bro to print a warning about lacking the
|
||||
``Site::local_nets`` variable being configured. You can supply this
|
||||
information at the command line like this (supply your "local" subnets
|
||||
in place of the example subnets):
|
||||
|
||||
.. console::
|
||||
|
||||
bro -r mypackets.trace local "Site::local_nets += { 1.2.3.0/24, 5.6.7.0/24 }"
|
||||
|
||||
|
||||
Reading Packet Capture (pcap) Files
|
||||
-----------------------------------
|
||||
|
@ -373,7 +356,6 @@ script that we include as a suggested configuration:
|
|||
|
||||
bro -r mypackets.trace local
|
||||
|
||||
|
||||
Telling Bro Which Scripts to Load
|
||||
---------------------------------
|
||||
|
||||
|
@ -381,33 +363,65 @@ A command-line invocation of Bro typically looks like:
|
|||
|
||||
.. console::
|
||||
|
||||
bro <options> <policies...>
|
||||
bro <options> <scripts...>
|
||||
|
||||
Where the last arguments are the specific policy scripts that this Bro
|
||||
instance will load. These arguments don't have to include the ``.bro``
|
||||
file extension, and if the corresponding script resides under the default
|
||||
installation path, ``$PREFIX/share/bro``, then it requires no path
|
||||
qualification. Further, a directory of scripts can be specified as
|
||||
an argument to be loaded as a "package" if it contains a ``__load__.bro``
|
||||
script that defines the scripts that are part of the package.
|
||||
file extension, and if the corresponding script resides in the default
|
||||
search path, then it requires no path qualification. The following
|
||||
directories are included in the default search path for Bro scripts::
|
||||
|
||||
./
|
||||
<prefix>/share/bro/
|
||||
<prefix>/share/bro/policy/
|
||||
<prefix>/share/bro/site/
|
||||
|
||||
This example does all of the base analysis (primarily protocol
|
||||
logging) and adds SSL certificate validation.
|
||||
These prefix paths can be used to load scripts like this:
|
||||
|
||||
.. console::
|
||||
|
||||
bro -r mypackets.trace protocols/ssl/validate-certs
|
||||
bro -r mypackets.trace frameworks/files/extract-all
|
||||
|
||||
This will load the
|
||||
``<prefix>/share/bro/policy/frameworks/files/extract-all.bro`` script which will
|
||||
cause Bro to extract all of the files it discovers in the PCAP.
|
||||
|
||||
.. note:: If one wants Bro to be able to load scripts that live outside the
|
||||
default directories in Bro's installation root, the full path to the file(s)
|
||||
must be provided. See the default search path by running ``bro --help``.
|
||||
|
||||
You might notice that a script you load from the command line uses the
|
||||
``@load`` directive in the Bro language to declare dependence on other scripts.
|
||||
This directive is similar to the ``#include`` of C/C++, except the semantics
|
||||
are, "load this script if it hasn't already been loaded."
|
||||
|
||||
.. note:: If one wants Bro to be able to load scripts that live outside the
|
||||
default directories in Bro's installation root, the ``BROPATH`` environment
|
||||
variable will need to be extended to include all the directories that need
|
||||
to be searched for scripts. See the default search path by doing
|
||||
``bro --help``.
|
||||
Further, a directory of scripts can be specified as
|
||||
an argument to be loaded as a "package" if it contains a ``__load__.bro``
|
||||
script that defines the scripts that are part of the package.
|
||||
|
||||
Local site customization
|
||||
------------------------
|
||||
|
||||
There is one script that is installed which is considered "local site
|
||||
customization" and is not overwritten when upgrades take place. To use
|
||||
the site-specific ``local.bro`` script, just add it to the command-line (can
|
||||
also be loaded through scripts with @load):
|
||||
|
||||
.. console::
|
||||
|
||||
bro -i en0 local
|
||||
|
||||
This causes Bro to load a script that prints a warning about lacking the
|
||||
``Site::local_nets`` variable being configured. You can supply this
|
||||
information at the command line like this (supply your "local" subnets
|
||||
in place of the example subnets):
|
||||
|
||||
.. console::
|
||||
|
||||
bro -r mypackets.trace local "Site::local_nets += { 1.2.3.0/24, 5.6.7.0/24 }"
|
||||
|
||||
When running with Broctl, this value is set by configuring the ``networks.cfg``
|
||||
file.
|
||||
|
||||
Running Bro Without Installing
|
||||
------------------------------
|
||||
|
|
|
@ -14,6 +14,8 @@ Network Protocols
|
|||
+============================+=======================================+=================================+
|
||||
| conn.log | TCP/UDP/ICMP connections | :bro:type:`Conn::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| config.log | Configuration option changes | :bro:type:`Config::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| dce_rpc.log | Distributed Computing Environment/RPC | :bro:type:`DCE_RPC::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| dhcp.log | DHCP leases | :bro:type:`DHCP::Info` |
|
||||
|
@ -76,6 +78,10 @@ Files
|
|||
+============================+=======================================+=================================+
|
||||
| files.log | File analysis results | :bro:type:`Files::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| ocsp.log | Online Certificate Status Protocol | :bro:type:`OCSP::Info` |
|
||||
| | (OCSP). Only created if policy script | |
|
||||
| | is loaded. | |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| pe.log | Portable Executable (PE) | :bro:type:`PE::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| x509.log | X.509 certificate info | :bro:type:`X509::Info` |
|
||||
|
@ -124,9 +130,6 @@ Network Observations
|
|||
+============================+=======================================+=================================+
|
||||
| known_certs.log | SSL certificates | :bro:type:`Known::CertsInfo` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| known_devices.log | MAC addresses of devices on the | :bro:type:`Known::DevicesInfo` |
|
||||
| | network | |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| known_hosts.log | Hosts that have completed TCP | :bro:type:`Known::HostsInfo` |
|
||||
| | handshakes | |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
|
@ -162,8 +165,8 @@ Bro Diagnostics
|
|||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| cluster.log | Bro cluster messages | :bro:type:`Cluster::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| communication.log | Communication events between Bro or | :bro:type:`Communication::Info` |
|
||||
| | Broccoli instances | |
|
||||
| broker.log | Peering status events between Bro or | :bro:type:`Broker::Info` |
|
||||
| | Broker-enabled processes | |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| loaded_scripts.log | Shows all scripts loaded by Bro | :bro:type:`LoadedScripts::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
|
|
|
@ -85,6 +85,25 @@ Arithmetic operators
|
|||
| | | of elements. |
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
|
||||
Bitwise operators
|
||||
-----------------
|
||||
|
||||
The bitwise operators work with operands of type :bro:type:`count` or
|
||||
``vector of count``, but the bitwise complement operator works with ``count``
|
||||
only.
|
||||
|
||||
+------------------------------+-------------+
|
||||
| Name | Syntax |
|
||||
+==============================+=============+
|
||||
| Bitwise AND | *a* & *b* |
|
||||
+------------------------------+-------------+
|
||||
| Bitwise OR | *a* | *b* |
|
||||
+------------------------------+-------------+
|
||||
| Bitwise XOR | *a* ^ *b* |
|
||||
+------------------------------+-------------+
|
||||
| Bitwise complement | ~ *a* |
|
||||
+------------------------------+-------------+
|
||||
|
||||
|
||||
Assignment operators
|
||||
--------------------
|
||||
|
@ -122,6 +141,73 @@ field name must be in the declaration of the record type.
|
|||
+------------------------------+-------------+-------------------------------+
|
||||
|
||||
|
||||
Pattern operators
|
||||
-----------------
|
||||
|
||||
In the table below, *p* is a pattern, and *s* is a string.
|
||||
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
| Name | Syntax | Notes |
|
||||
+==============================+=============+===============================+
|
||||
| Exact matching | *p* == *s* | Evaluates to a boolean, |
|
||||
| | | indicating if the entire |
|
||||
| | | string exactly matches the |
|
||||
| | | pattern. |
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
| Embedded matching | *p* in *s* | Evaluates to a boolean, |
|
||||
| | | indicating if pattern is |
|
||||
| | | found somewhere in the string.|
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
| Conjunction | *p1* & *p2* | Evaluates to a pattern that |
|
||||
| | | represents matching p1 |
|
||||
| | | followed by p2. |
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
| Disjunction | *p1* | *p2* | Evaluates to a pattern that |
|
||||
| | | represents matching p1 or p2. |
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
|
||||
|
||||
Type casting
|
||||
------------
|
||||
|
||||
The "as" operator performs type casting and the "is" operator checks if a
|
||||
type cast is supported or not. For both operators, the first operand is a
|
||||
value and the second operand is the name of a Bro script type (either built-in
|
||||
or user-defined).
|
||||
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
| Name | Syntax | Notes |
|
||||
+==============================+=============+===============================+
|
||||
| Type cast | *v* as *t* | Cast value "v" into type "t". |
|
||||
| | | Evaluates to the value casted |
|
||||
| | | to the specified type. |
|
||||
| | | If this is not a supported |
|
||||
| | | cast, then a runtime error is |
|
||||
| | | triggered. |
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
| Check if a cast is supported | *v* is *t* | Evaluates to boolean. If true,|
|
||||
| | | then "v as t" would succeed. |
|
||||
+------------------------------+-------------+-------------------------------+
|
||||
|
||||
Only the following kinds of type casts are supported currently:
|
||||
|
||||
- Broker values (i.e., :bro:see:`Broker::Data` values returned from
|
||||
functions such as :bro:id:`Broker::data`) can be casted to their
|
||||
corresponding Bro script types.
|
||||
- A value of declared type "any" can be casted to its actual underlying type.
|
||||
- All values can be casted to their declared types (i.e., this is a no-op).
|
||||
|
||||
The function in this example tries to cast a value to a string::
|
||||
|
||||
function example(a: any)
|
||||
{
|
||||
local s: string;
|
||||
|
||||
if ( a is string )
|
||||
s = (a as string);
|
||||
}
|
||||
|
||||
|
||||
Other operators
|
||||
---------------
|
||||
|
||||
|
|
|
@ -20,6 +20,9 @@ Declarations
|
|||
+----------------------------+-----------------------------+
|
||||
| :bro:keyword:`const` | Declare a constant |
|
||||
+----------------------------+-----------------------------+
|
||||
| :bro:keyword:`option` | Declare a configuration |
|
||||
| | option |
|
||||
+----------------------------+-----------------------------+
|
||||
| :bro:keyword:`type` | Declare a user-defined type |
|
||||
+----------------------------+-----------------------------+
|
||||
| :bro:keyword:`redef` | Redefine a global value or |
|
||||
|
@ -176,6 +179,25 @@ all loaded Bro scripts.
|
|||
or "global" keywords (i.e., "const" replaces "local" and "global").
|
||||
|
||||
|
||||
.. bro:keyword:: option
|
||||
|
||||
A variable declared with the "option" keyword is a configuration option.
|
||||
|
||||
Options are required to be initialized at the
|
||||
time of declaration. Normally, the type is inferred from the initializer,
|
||||
but the type can be explicitly specified. Example::
|
||||
|
||||
option hostname = "host-1";
|
||||
option peers: set[addr] = {};
|
||||
|
||||
The value of an option cannot be changed by an assignment statement.
|
||||
|
||||
The scope of an option is global.
|
||||
|
||||
Note that an "option" declaration cannot also use the "local", "global",
|
||||
or "const" keywords.
|
||||
|
||||
|
||||
.. bro:keyword:: type
|
||||
|
||||
The "type" keyword is used to declare a user-defined type. The name
|
||||
|
@ -549,6 +571,42 @@ Here are the statements that the Bro scripting language supports.
|
|||
do not indicate the presence of a `compound statement`_), and that no
|
||||
semicolon is needed at the end of a "switch" statement.
|
||||
|
||||
There is an alternative form of the switch statement that supports
|
||||
switching by type rather than value. This form of the switch statement
|
||||
uses type-based versions of "case":
|
||||
|
||||
- "case type t: ...": Take branch if the value of the switch expression
|
||||
could be casted to type t (where "t" is the name of a Bro script type,
|
||||
either built-in or user-defined).
|
||||
|
||||
- "case type t as x: ...": Same as above, but the casted value is
|
||||
available through ID "x".
|
||||
|
||||
Multiple types can be listed per branch, separated by commas (the "type"
|
||||
keyword must be repeated for each type in the list).
|
||||
|
||||
Example::
|
||||
|
||||
function example(v: any)
|
||||
{
|
||||
switch (v) {
|
||||
case type count as c:
|
||||
print "It's a count", c;
|
||||
break;
|
||||
|
||||
case type bool, type addr:
|
||||
print "It's a bool or address";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Note that a single switch statement switches either by type or by value,
|
||||
but not both.
|
||||
|
||||
Also note that the type-based switch statement will trigger a runtime
|
||||
error if any cast in any "case" is an unsupported cast (see the
|
||||
documentation of the type casting operator "as").
|
||||
|
||||
|
||||
.. bro:keyword:: when
|
||||
|
||||
|
|
|
@ -91,6 +91,10 @@ Here is a more detailed description of each type:
|
|||
type, but a unary plus or minus applied to a "count" results in an
|
||||
"int".
|
||||
|
||||
In addition, "count" types support bitwise operations. You can use
|
||||
``&``, ``|``, and ``^`` for bitwise ``and``, ``or``, and ``xor``. You
|
||||
can also use ``~`` for bitwise (one's) complement.
|
||||
|
||||
.. bro:type:: double
|
||||
|
||||
A numeric type representing a double-precision floating-point
|
||||
|
@ -194,11 +198,11 @@ Here is a more detailed description of each type:
|
|||
|
||||
.. bro:type:: pattern
|
||||
|
||||
A type representing regular-expression patterns which can be used
|
||||
A type representing regular-expression patterns that can be used
|
||||
for fast text-searching operations. Pattern constants are created
|
||||
by enclosing text within forward slashes (/) and is the same syntax
|
||||
by enclosing text within forward slashes (``/``) and use the same syntax
|
||||
as the patterns supported by the `flex lexical analyzer
|
||||
<http://flex.sourceforge.net/manual/Patterns.html>`_. The speed of
|
||||
<http://westes.github.io/flex/manual/Patterns.html>`_. The speed of
|
||||
regular expression matching does not depend on the complexity or
|
||||
size of the patterns. Patterns support two types of matching, exact
|
||||
and embedded.
|
||||
|
@ -233,6 +237,32 @@ Here is a more detailed description of each type:
|
|||
is false since "oob" does not appear at the start of "foobar". The
|
||||
``!in`` operator would yield the negation of ``in``.
|
||||
|
||||
You can create a disjunction (either-or) of two patterns
|
||||
using the ``|`` operator. For example::
|
||||
|
||||
/foo/ | /bar/ in "foobar"
|
||||
|
||||
yields true, like in the similar example above. You can also
|
||||
create the conjunction (concatenation) of patterns using the ``&``
|
||||
operator. For example::
|
||||
|
||||
/foo/ & /bar/ in "foobar"
|
||||
|
||||
will yield true because the pattern /(foo)(bar)/ appears in
|
||||
the string "foobar".
|
||||
|
||||
When specifying a pattern, you can add a final ``i`` specifier to
|
||||
mark it as case-insensitive. For example, ``/foo|bar/i`` will match
|
||||
a "foo", "Foo", "BaR", etc.
|
||||
|
||||
You can also introduce a case-insensitive sub-pattern by enclosing it
|
||||
in ``(?i:``<pattern>``)``. So, for example, ``/foo|(?i:bar)/`` will
|
||||
match "foo" and "BaR", but *not* "Foo".
|
||||
|
||||
For both ways of specifying case-insensitivity, characters enclosed
|
||||
in double quotes maintain their case-sensitivity. So for example
|
||||
/"foo"/i will not match "Foo", but it will match "foo".
|
||||
|
||||
.. bro:type:: port
|
||||
|
||||
A type representing transport-level port numbers (besides TCP and
|
||||
|
@ -514,6 +544,15 @@ Here is a more detailed description of each type:
|
|||
|
||||
|s|
|
||||
|
||||
You can compute the union, intersection, or difference of two sets
|
||||
using the ``|``, ``&``, and ``-`` operators. You can compare
|
||||
sets for equality (they have exactly the same elements) using ``==``.
|
||||
The ``<`` operator returns ``T`` if the lefthand operand is a proper
|
||||
subset of the righthand operand. Similarly, ``<=`` returns ``T``
|
||||
if the lefthand operator is a subset (not necessarily proper, i.e.,
|
||||
it may be equal to the righthand operand). The operators ``!=``, ``>``
|
||||
and ``>=`` provide the expected complementary operations.
|
||||
|
||||
See the :bro:keyword:`for` statement for info on how to iterate over
|
||||
the elements in a set.
|
||||
|
||||
|
@ -569,6 +608,20 @@ Here is a more detailed description of each type:
|
|||
|
||||
|v|
|
||||
|
||||
A particularly common operation on a vector is to append an element
|
||||
to its end. You can do so using:
|
||||
|
||||
.. code:: bro
|
||||
|
||||
v += e;
|
||||
|
||||
where if e's type is ``X``, v's type is ``vector of X``. Note that
|
||||
this expression is equivalent to:
|
||||
|
||||
.. code:: bro
|
||||
|
||||
v[|v|] = e;
|
||||
|
||||
Vectors of integral types (``int`` or ``count``) support the pre-increment
|
||||
(``++``) and pre-decrement operators (``--``), which will increment or
|
||||
decrement each element in the vector.
|
||||
|
@ -585,6 +638,9 @@ Here is a more detailed description of each type:
|
|||
The resulting vector of bool is the logical "and" (or logical "or") of
|
||||
each element of the operand vectors.
|
||||
|
||||
Vectors of type ``count`` can also be operands for the bitwise and/or/xor
|
||||
operators, ``&``, ``|`` and ``^``.
|
||||
|
||||
See the :bro:keyword:`for` statement for info on how to iterate over
|
||||
the elements in a vector.
|
||||
|
||||
|
|
|
@ -3,10 +3,10 @@ event bro_init()
|
|||
local v1: vector of count;
|
||||
local v2 = vector(1, 2, 3, 4);
|
||||
|
||||
v1[|v1|] = 1;
|
||||
v1[|v1|] = 2;
|
||||
v1[|v1|] = 3;
|
||||
v1[|v1|] = 4;
|
||||
v1 += 1;
|
||||
v1 += 2;
|
||||
v1 += 3;
|
||||
v1 += 4;
|
||||
|
||||
print fmt("contents of v1: %s", v1);
|
||||
print fmt("length of v1: %d", |v1|);
|
||||
|
|
|
@ -171,7 +171,7 @@ write scripts for Bro but for understanding Bro itself.
|
|||
Gaining familiarity with the specific events generated by Bro is a big
|
||||
step towards building a mind set for working with Bro scripts. The
|
||||
majority of events generated by Bro are defined in the
|
||||
built-in-function files or ``.bif`` files which also act as the basis for
|
||||
built-in-function (``*.bif``) files which also act as the basis for
|
||||
online event documentation. These in-line comments are compiled into
|
||||
an online documentation system using Broxygen. Whether starting a
|
||||
script from scratch or reading and maintaining someone else's script,
|
||||
|
@ -212,11 +212,11 @@ later.
|
|||
While Bro is capable of packet level processing, its strengths lay in
|
||||
the context of a connection between an originator and a responder. As
|
||||
such, there are events defined for the primary parts of the connection
|
||||
life-cycle as you'll see from the small selection of
|
||||
connection-related events below.
|
||||
life-cycle such as the following:
|
||||
|
||||
.. btest-include:: ${BRO_SRC_ROOT}/build/scripts/base/bif/event.bif.bro
|
||||
:lines: 69-72,88,106-109,129,132-137,148
|
||||
* :bro:see:`new_connection`
|
||||
* :bro:see:`connection_timeout`
|
||||
* :bro:see:`connection_state_remove`
|
||||
|
||||
Of the events listed, the event that will give us the best insight
|
||||
into the connection record data type will be
|
||||
|
@ -325,29 +325,14 @@ variable declared while scripts using a different namespace or no
|
|||
namespace altogether will not have access to the variable.
|
||||
Alternatively, if a global variable is declared within an ``export { ... }``
|
||||
block that variable is available to any other script through the
|
||||
naming convention of ``MODULE::variable_name``.
|
||||
|
||||
The declaration below is taken from the
|
||||
:doc:`/scripts/policy/protocols/conn/known-hosts.bro` script and
|
||||
declares a variable called ``known_hosts`` as a global set of unique
|
||||
IP addresses within the ``Known`` namespace and exports it for use
|
||||
outside of the ``Known`` namespace. Were we to want to use the
|
||||
``known_hosts`` variable we'd be able to access it through
|
||||
``Known::known_hosts``.
|
||||
|
||||
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/conn/known-hosts.bro
|
||||
:lines: 8-10, 32, 37
|
||||
|
||||
The sample above also makes use of an ``export { ... }`` block. When the module
|
||||
keyword is used in a script, the variables declared are said to be in
|
||||
that module's "namespace". Where as a global variable can be accessed
|
||||
by its name alone when it is not declared within a module, a global
|
||||
variable declared within a module must be exported and then accessed
|
||||
via ``MODULE_NAME::VARIABLE_NAME``. As in the example above, we would be
|
||||
able to access the ``known_hosts`` in a separate script variable via
|
||||
``Known::known_hosts`` due to the fact that ``known_hosts`` was declared as
|
||||
a global variable within an export block under the ``Known`` namespace.
|
||||
naming convention of ``<module name>::<variable name>``, i.e. the variable
|
||||
needs to be "scoped" by the name of the module in which it was declared.
|
||||
|
||||
When the ``module`` keyword is used in a script, the variables declared
|
||||
are said to be in that module's "namespace". Where as a global variable
|
||||
can be accessed by its name alone when it is not declared within a
|
||||
module, a global variable declared within a module must be exported and
|
||||
then accessed via ``<module name>::<variable name>``.
|
||||
|
||||
Constants
|
||||
~~~~~~~~~
|
||||
|
@ -1009,8 +994,6 @@ which is a factor of 5 to an alternate file, while writing the
|
|||
remaining logs to factor.log.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/scripting/framework_logging_factorial_03.bro
|
||||
:lines: 38-62
|
||||
:linenos:
|
||||
|
||||
To dynamically alter the file in which a stream writes its logs, a
|
||||
filter can specify a function that returns a string to be used as the
|
||||
|
|
|
@ -65,7 +65,7 @@ export {
|
|||
[9] = "WINDOWS_CE_GUI",
|
||||
[10] = "EFI_APPLICATION",
|
||||
[11] = "EFI_BOOT_SERVICE_DRIVER",
|
||||
[12] = "EFI_RUNTIME_
DRIVER",
|
||||
[12] = "EFI_RUNTIME_DRIVER",
|
||||
[13] = "EFI_ROM",
|
||||
[14] = "XBOX"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
|
|
@ -126,7 +126,7 @@ event pe_section_header(f: fa_file, h: PE::SectionHeader) &priority=5
|
|||
|
||||
if ( ! f$pe?$section_names )
|
||||
f$pe$section_names = vector();
|
||||
f$pe$section_names[|f$pe$section_names|] = h$name;
|
||||
f$pe$section_names += h$name;
|
||||
}
|
||||
|
||||
event file_state_remove(f: fa_file) &priority=-5
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
Support for X509 certificates with the file analysis framework.
|
||||
Also supports parsing OCSP requests and responses.
|
||||
|
|
|
@ -10,23 +10,17 @@ export {
|
|||
type Info: record {
|
||||
## Current timestamp.
|
||||
ts: time &log;
|
||||
|
||||
## File id of this certificate.
|
||||
id: string &log;
|
||||
|
||||
## Basic information about the certificate.
|
||||
certificate: X509::Certificate &log;
|
||||
|
||||
## The opaque wrapping the certificate. Mainly used
|
||||
## for the verify operations.
|
||||
handle: opaque of x509;
|
||||
|
||||
## All extensions that were encountered in the certificate.
|
||||
extensions: vector of X509::Extension &default=vector();
|
||||
|
||||
## Subject alternative name extension of the certificate.
|
||||
san: X509::SubjectAlternativeName &optional &log;
|
||||
|
||||
## Basic constraints extension of the certificate.
|
||||
basic_constraints: X509::BasicConstraints &optional &log;
|
||||
};
|
||||
|
@ -38,6 +32,24 @@ export {
|
|||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509, $path="x509"]);
|
||||
|
||||
# We use MIME types internally to distinguish between user and CA certificates.
|
||||
# The first certificate in a connection always gets tagged as user-cert, all
|
||||
# following certificates get tagged as CA certificates. Certificates gotten via
|
||||
# other means (e.g. identified from HTTP traffic when they are transfered in plain
|
||||
# text) get tagged as application/pkix-cert.
|
||||
Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-user-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-ca-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_X509, "application/pkix-cert");
|
||||
|
||||
# Always calculate hashes. They are not necessary for base scripts
|
||||
# but very useful for identification, and required for policy scripts
|
||||
Files::register_for_mime_type(Files::ANALYZER_MD5, "application/x-x509-user-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_MD5, "application/x-x509-ca-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_MD5, "application/pkix-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/x-x509-user-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/x-x509-ca-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/pkix-cert");
|
||||
}
|
||||
|
||||
redef record Files::Info += {
|
||||
|
@ -48,16 +60,13 @@ redef record Files::Info += {
|
|||
|
||||
event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=5
|
||||
{
|
||||
if ( ! f$info?$mime_type )
|
||||
f$info$mime_type = "application/pkix-cert";
|
||||
|
||||
f$info$x509 = [$ts=f$info$ts, $id=f$id, $certificate=cert, $handle=cert_ref];
|
||||
}
|
||||
|
||||
event x509_extension(f: fa_file, ext: X509::Extension) &priority=5
|
||||
{
|
||||
if ( f$info?$x509 )
|
||||
f$info$x509$extensions[|f$info$x509$extensions|] = ext;
|
||||
f$info$x509$extensions += ext;
|
||||
}
|
||||
|
||||
event x509_ext_basic_constraints(f: fa_file, ext: X509::BasicConstraints) &priority=5
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
@load ./main
|
||||
@load ./store
|
||||
@load ./log
|
||||
|
|
80
scripts/base/frameworks/broker/log.bro
Normal file
80
scripts/base/frameworks/broker/log.bro
Normal file
|
@ -0,0 +1,80 @@
|
|||
@load ./main
|
||||
|
||||
module Broker;
|
||||
|
||||
export {
|
||||
## The Broker logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## The type of a Broker activity being logged.
|
||||
type Type: enum {
|
||||
## An informational status update.
|
||||
STATUS,
|
||||
## An error situation.
|
||||
ERROR
|
||||
};
|
||||
|
||||
## A record type containing the column fields of the Broker log.
|
||||
type Info: record {
|
||||
## The network time at which a Broker event occurred.
|
||||
ts: time &log;
|
||||
## The type of the Broker event.
|
||||
ty: Type &log;
|
||||
## The event being logged.
|
||||
ev: string &log;
|
||||
## The peer (if any) with which a Broker event is
|
||||
## concerned.
|
||||
peer: NetworkInfo &log &optional;
|
||||
## An optional message describing the Broker event in more detail
|
||||
message: string &log &optional;
|
||||
};
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Broker::LOG, [$columns=Info, $path="broker"]);
|
||||
}
|
||||
|
||||
function log_status(ev: string, endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
local r: Info;
|
||||
|
||||
r = [$ts = network_time(),
|
||||
$ev = ev,
|
||||
$ty = STATUS,
|
||||
$message = msg];
|
||||
|
||||
if ( endpoint?$network )
|
||||
r$peer = endpoint$network;
|
||||
|
||||
Log::write(Broker::LOG, r);
|
||||
}
|
||||
|
||||
event Broker::peer_added(endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
log_status("peer-added", endpoint, msg);
|
||||
}
|
||||
|
||||
event Broker::peer_removed(endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
log_status("peer-removed", endpoint, msg);
|
||||
}
|
||||
|
||||
event Broker::peer_lost(endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
log_status("connection-terminated", endpoint, msg);
|
||||
}
|
||||
|
||||
event Broker::error(code: ErrorCode, msg: string)
|
||||
{
|
||||
local ev = cat(code);
|
||||
ev = subst_string(ev, "Broker::", "");
|
||||
ev = subst_string(ev, "_", "-");
|
||||
ev = to_lower(ev);
|
||||
|
||||
Log::write(Broker::LOG, [$ts = network_time(),
|
||||
$ev = ev,
|
||||
$ty = ERROR,
|
||||
$message = msg]);
|
||||
}
|
||||
|
|
@ -1,55 +1,182 @@
|
|||
##! Various data structure definitions for use with Bro's communication system.
|
||||
|
||||
module Log;
|
||||
|
||||
export {
|
||||
type Log::ID: enum {
|
||||
## Dummy place-holder.
|
||||
UNKNOWN
|
||||
};
|
||||
}
|
||||
##! The Broker-based communication API and its various options.
|
||||
|
||||
module Broker;
|
||||
|
||||
export {
|
||||
## Default port for Broker communication. Where not specified
|
||||
## otherwise, this is the port to connect to and listen on.
|
||||
const default_port = 9999/tcp &redef;
|
||||
|
||||
## A name used to identify this endpoint to peers.
|
||||
## Default interval to retry listening on a port if it's currently in
|
||||
## use already. Use of the BRO_DEFAULT_LISTEN_RETRY environment variable
|
||||
## (set as a number of seconds) will override this option and also
|
||||
## any values given to :bro:see:`Broker::listen`.
|
||||
const default_listen_retry = 30sec &redef;
|
||||
|
||||
## Default address on which to listen.
|
||||
##
|
||||
## .. bro:see:: Broker::connect Broker::listen
|
||||
const endpoint_name = "" &redef;
|
||||
## .. bro:see:: Broker::listen
|
||||
const default_listen_address = getenv("BRO_DEFAULT_LISTEN_ADDRESS") &redef;
|
||||
|
||||
## Change communication behavior.
|
||||
type EndpointFlags: record {
|
||||
## Whether to restrict message topics that can be published to peers.
|
||||
auto_publish: bool &default = T;
|
||||
## Whether to restrict what message topics or data store identifiers
|
||||
## the local endpoint advertises to peers (e.g. subscribing to
|
||||
## events or making a master data store available).
|
||||
auto_advertise: bool &default = T;
|
||||
## Default interval to retry connecting to a peer if it cannot be made to
|
||||
## work initially, or if it ever becomes disconnected. Use of the
|
||||
## BRO_DEFAULT_CONNECT_RETRY environment variable (set as number of
|
||||
## seconds) will override this option and also any values given to
|
||||
## :bro:see:`Broker::peer`.
|
||||
const default_connect_retry = 30sec &redef;
|
||||
|
||||
## If true, do not use SSL for network connections. By default, SSL will
|
||||
## even be used if no certificates / CAs have been configured. In that case
|
||||
## (which is the default) the communication will be encrypted, but not
|
||||
## authenticated.
|
||||
const disable_ssl = F &redef;
|
||||
|
||||
## Path to a file containing concatenated trusted certificates
|
||||
## in PEM format. If set, Bro will require valid certificates for
|
||||
## all peers.
|
||||
const ssl_cafile = "" &redef;
|
||||
|
||||
## Path to an OpenSSL-style directory of trusted certificates.
|
||||
## If set, Bro will require valid certificates for
|
||||
## all peers.
|
||||
const ssl_capath = "" &redef;
|
||||
|
||||
## Path to a file containing a X.509 certificate for this
|
||||
## node in PEM format. If set, Bro will require valid certificates for
|
||||
## all peers.
|
||||
const ssl_certificate = "" &redef;
|
||||
|
||||
## Passphrase to decrypt the private key specified by
|
||||
## :bro:see:`Broker::ssl_keyfile`. If set, Bro will require valid
|
||||
## certificates for all peers.
|
||||
const ssl_passphrase = "" &redef;
|
||||
|
||||
## Path to the file containing the private key for this node's
|
||||
## certificate. If set, Bro will require valid certificates for
|
||||
## all peers.
|
||||
const ssl_keyfile = "" &redef;
|
||||
|
||||
## The number of buffered messages at the Broker/CAF layer after which
|
||||
## a subscriber considers themselves congested (i.e. tune the congestion
|
||||
## control mechanisms).
|
||||
const congestion_queue_size = 200 &redef;
|
||||
|
||||
## Max number of threads to use for Broker/CAF functionality. Setting to
|
||||
## zero implies using the value of BRO_BROKER_MAX_THREADS environment
|
||||
## variable, if set, or else typically defaults to 4 (actually 2 threads
|
||||
## when simply reading offline pcaps as there's not expected to be any
|
||||
## communication and more threads just adds more overhead).
|
||||
const max_threads = 0 &redef;
|
||||
|
||||
## Max number of microseconds for under-utilized Broker/CAF
|
||||
## threads to sleep. Using zero will cause this to be automatically
|
||||
## determined or just use CAF's default setting.
|
||||
const max_sleep = 0 &redef;
|
||||
|
||||
## Forward all received messages to subscribing peers.
|
||||
const forward_messages = F &redef;
|
||||
|
||||
## The default topic prefix where logs will be published. The log's stream
|
||||
## id is appended when writing to a particular stream.
|
||||
const default_log_topic_prefix = "bro/logs/" &redef;
|
||||
|
||||
## The default implementation for :bro:see:`Broker::log_topic`.
|
||||
function default_log_topic(id: Log::ID, path: string): string
|
||||
{
|
||||
return default_log_topic_prefix + cat(id);
|
||||
}
|
||||
|
||||
## A function that will be called for each log entry to determine what
|
||||
## broker topic string will be used for sending it to peers. The
|
||||
## default implementation will return a value based on
|
||||
## :bro:see:`Broker::default_log_topic_prefix`.
|
||||
##
|
||||
## id: the ID associated with the log stream entry that will be sent.
|
||||
##
|
||||
## path: the path to which the log stream entry will be output.
|
||||
##
|
||||
## Returns: a string representing the broker topic to which the log
|
||||
## will be sent.
|
||||
const log_topic: function(id: Log::ID, path: string): string = default_log_topic &redef;
|
||||
|
||||
type ErrorCode: enum {
|
||||
## The unspecified default error code.
|
||||
UNSPECIFIED = 1,
|
||||
## Version incompatibility.
|
||||
PEER_INCOMPATIBLE = 2,
|
||||
## Referenced peer does not exist.
|
||||
PEER_INVALID = 3,
|
||||
## Remote peer not listening.
|
||||
PEER_UNAVAILABLE = 4,
|
||||
## A peering request timed out.
|
||||
PEER_TIMEOUT = 5,
|
||||
## Master with given name already exists.
|
||||
MASTER_EXISTS = 6,
|
||||
## Master with given name does not exist.
|
||||
NO_SUCH_MASTER = 7,
|
||||
## The given data store key does not exist.
|
||||
NO_SUCH_KEY = 8,
|
||||
## The store operation timed out.
|
||||
REQUEST_TIMEOUT = 9,
|
||||
## The operation expected a different type than provided.
|
||||
TYPE_CLASH = 10,
|
||||
## The data value cannot be used to carry out the desired operation.
|
||||
INVALID_DATA = 11,
|
||||
## The storage backend failed to execute the operation.
|
||||
BACKEND_FAILURE = 12,
|
||||
## The storage backend failed to execute the operation.
|
||||
STALE_DATA = 13,
|
||||
## Catch-all for a CAF-level problem.
|
||||
CAF_ERROR = 100
|
||||
};
|
||||
|
||||
## Fine-grained tuning of communication behavior for a particular message.
|
||||
type SendFlags: record {
|
||||
## Send the message to the local endpoint.
|
||||
self: bool &default = F;
|
||||
## Send the message to peer endpoints that advertise interest in
|
||||
## the topic associated with the message.
|
||||
peers: bool &default = T;
|
||||
## Send the message to peer endpoints even if they don't advertise
|
||||
## interest in the topic associated with the message.
|
||||
unsolicited: bool &default = F;
|
||||
## The possible states of a peer endpoint.
|
||||
type PeerStatus: enum {
|
||||
## The peering process is initiated.
|
||||
INITIALIZING,
|
||||
## Connection establishment in process.
|
||||
CONNECTING,
|
||||
## Connection established, peering pending.
|
||||
CONNECTED,
|
||||
## Successfully peered.
|
||||
PEERED,
|
||||
## Connection to remote peer lost.
|
||||
DISCONNECTED,
|
||||
## Reconnecting to peer after a lost connection.
|
||||
RECONNECTING,
|
||||
};
|
||||
|
||||
type NetworkInfo: record {
|
||||
## The IP address or hostname where the endpoint listens.
|
||||
address: string &log;
|
||||
## The port where the endpoint is bound to.
|
||||
bound_port: port &log;
|
||||
};
|
||||
|
||||
type EndpointInfo: record {
|
||||
## A unique identifier of the node.
|
||||
id: string;
|
||||
## Network-level information.
|
||||
network: NetworkInfo &optional;
|
||||
};
|
||||
|
||||
type PeerInfo: record {
|
||||
peer: EndpointInfo;
|
||||
status: PeerStatus;
|
||||
};
|
||||
|
||||
type PeerInfos: vector of PeerInfo;
|
||||
|
||||
## Opaque communication data.
|
||||
type Data: record {
|
||||
d: opaque of Broker::Data &optional;
|
||||
data: opaque of Broker::Data &optional;
|
||||
};
|
||||
|
||||
## Opaque communication data.
|
||||
## Opaque communication data sequence.
|
||||
type DataVector: vector of Broker::Data;
|
||||
|
||||
## Opaque event communication data.
|
||||
type EventArgs: record {
|
||||
type Event: record {
|
||||
## The name of the event. Not set if invalid event or arguments.
|
||||
name: string &optional;
|
||||
## The arguments to the event.
|
||||
|
@ -63,52 +190,25 @@ export {
|
|||
val: Broker::Data;
|
||||
};
|
||||
|
||||
## Enable use of communication.
|
||||
##
|
||||
## flags: used to tune the local Broker endpoint behavior.
|
||||
##
|
||||
## Returns: true if communication is successfully initialized.
|
||||
global enable: function(flags: EndpointFlags &default = EndpointFlags()): bool;
|
||||
|
||||
## Changes endpoint flags originally supplied to :bro:see:`Broker::enable`.
|
||||
##
|
||||
## flags: the new endpoint behavior flags to use.
|
||||
##
|
||||
## Returns: true if flags were changed.
|
||||
global set_endpoint_flags: function(flags: EndpointFlags &default = EndpointFlags()): bool;
|
||||
|
||||
## Allow sending messages to peers if associated with the given topic.
|
||||
## This has no effect if auto publication behavior is enabled via the flags
|
||||
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
|
||||
##
|
||||
## topic: a topic to allow messages to be published under.
|
||||
##
|
||||
## Returns: true if successful.
|
||||
global publish_topic: function(topic: string): bool;
|
||||
|
||||
## Disallow sending messages to peers if associated with the given topic.
|
||||
## This has no effect if auto publication behavior is enabled via the flags
|
||||
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
|
||||
##
|
||||
## topic: a topic to disallow messages to be published under.
|
||||
##
|
||||
## Returns: true if successful.
|
||||
global unpublish_topic: function(topic: string): bool;
|
||||
|
||||
## Listen for remote connections.
|
||||
##
|
||||
## p: the TCP port to listen on.
|
||||
##
|
||||
## a: an address string on which to accept connections, e.g.
|
||||
## "127.0.0.1". An empty string refers to @p INADDR_ANY.
|
||||
## "127.0.0.1". An empty string refers to INADDR_ANY.
|
||||
##
|
||||
## reuse: equivalent to behavior of SO_REUSEADDR.
|
||||
## p: the TCP port to listen on. The value 0 means that the OS should choose
|
||||
## the next available free port.
|
||||
##
|
||||
## Returns: true if the local endpoint is now listening for connections.
|
||||
## retry: If non-zero, retries listening in regular intervals if the port cannot be
|
||||
## acquired immediately. 0 disables retries. If the
|
||||
## BRO_DEFAULT_LISTEN_RETRY environment variable is set (as number
|
||||
## of seconds), it overrides any value given here.
|
||||
##
|
||||
## .. bro:see:: Broker::incoming_connection_established
|
||||
global listen: function(p: port, a: string &default = "", reuse: bool &default = T): bool;
|
||||
|
||||
## Returns: the bound port or 0/? on failure.
|
||||
##
|
||||
## .. bro:see:: Broker::status
|
||||
global listen: function(a: string &default = default_listen_address,
|
||||
p: port &default = default_port,
|
||||
retry: interval &default = default_listen_retry): port;
|
||||
## Initiate a remote connection.
|
||||
##
|
||||
## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
|
||||
|
@ -117,69 +217,82 @@ export {
|
|||
##
|
||||
## retry: an interval at which to retry establishing the
|
||||
## connection with the remote peer if it cannot be made initially, or
|
||||
## if it ever becomes disconnected.
|
||||
## if it ever becomes disconnected. If the
|
||||
## BRO_DEFAULT_CONNECT_RETRY environment variable is set (as number
|
||||
## of seconds), it overrides any value given here.
|
||||
##
|
||||
## Returns: true if it's possible to try connecting with the peer and
|
||||
## it's a new peer. The actual connection may not be established
|
||||
## it's a new peer. The actual connection may not be established
|
||||
## until a later point in time.
|
||||
##
|
||||
## .. bro:see:: Broker::outgoing_connection_established
|
||||
global connect: function(a: string, p: port, retry: interval): bool;
|
||||
## .. bro:see:: Broker::status
|
||||
global peer: function(a: string, p: port &default=default_port,
|
||||
retry: interval &default=default_connect_retry): bool;
|
||||
|
||||
## Remove a remote connection.
|
||||
##
|
||||
## a: the address used in previous successful call to :bro:see:`Broker::connect`.
|
||||
## Note that this does not terminate the connection to the peer, it
|
||||
## just means that we won't exchange any further information with it
|
||||
## unless peering resumes later.
|
||||
##
|
||||
## p: the port used in previous successful call to :bro:see:`Broker::connect`.
|
||||
## a: the address used in previous successful call to :bro:see:`Broker::peer`.
|
||||
##
|
||||
## p: the port used in previous successful call to :bro:see:`Broker::peer`.
|
||||
##
|
||||
## Returns: true if the arguments match a previously successful call to
|
||||
## :bro:see:`Broker::connect`.
|
||||
global disconnect: function(a: string, p: port): bool;
|
||||
## :bro:see:`Broker::peer`.
|
||||
##
|
||||
## TODO: We do not have a function yet to terminate a connection.
|
||||
global unpeer: function(a: string, p: port): bool;
|
||||
|
||||
## Print a simple message to any interested peers. The receiver can use
|
||||
## :bro:see:`Broker::print_handler` to handle messages.
|
||||
## Get a list of all peer connections.
|
||||
##
|
||||
## topic: a topic associated with the printed message.
|
||||
## Returns: a list of all peer connections.
|
||||
global peers: function(): vector of PeerInfo;
|
||||
|
||||
## Get a unique identifier for the local broker endpoint.
|
||||
##
|
||||
## msg: the print message to send to peers.
|
||||
## Returns: a unique identifier for the local broker endpoint.
|
||||
global node_id: function(): string;
|
||||
|
||||
## Sends all pending log messages to remote peers. This normally
|
||||
## doesn't need to be used except for test cases that are time-sensitive.
|
||||
global flush_logs: function(): count;
|
||||
|
||||
## Publishes the value of an identifier to a given topic. The subscribers
|
||||
## will update their local value for that identifier on receipt.
|
||||
##
|
||||
## flags: tune the behavior of how the message is sent.
|
||||
## topic: a topic associated with the message.
|
||||
##
|
||||
## id: the identifier to publish.
|
||||
##
|
||||
## Returns: true if the message is sent.
|
||||
global send_print: function(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool;
|
||||
global publish_id: function(topic: string, id: string): bool;
|
||||
|
||||
## Register interest in all peer print messages that use a certain topic
|
||||
## prefix. Use :bro:see:`Broker::print_handler` to handle received
|
||||
## messages.
|
||||
## Register interest in all peer event messages that use a certain topic
|
||||
## prefix. Note that subscriptions may not be altered immediately after
|
||||
## calling (except during :bro:see:`bro_init`).
|
||||
##
|
||||
## topic_prefix: a prefix to match against remote message topics.
|
||||
## e.g. an empty prefix matches everything and "a" matches
|
||||
## "alice" and "amy" but not "bob".
|
||||
##
|
||||
## Returns: true if it's a new print subscription and it is now registered.
|
||||
global subscribe_to_prints: function(topic_prefix: string): bool;
|
||||
## Returns: true if it's a new event subscription and it is now registered.
|
||||
global subscribe: function(topic_prefix: string): bool;
|
||||
|
||||
## Unregister interest in all peer print messages that use a topic prefix.
|
||||
## Unregister interest in all peer event messages that use a topic prefix.
|
||||
## Note that subscriptions may not be altered immediately after calling
|
||||
## (except during :bro:see:`bro_init`).
|
||||
##
|
||||
## topic_prefix: a prefix previously supplied to a successful call to
|
||||
## :bro:see:`Broker::subscribe_to_prints`.
|
||||
## :bro:see:`Broker::subscribe`.
|
||||
##
|
||||
## Returns: true if interest in the topic prefix is no longer advertised.
|
||||
global unsubscribe_to_prints: function(topic_prefix: string): bool;
|
||||
|
||||
## Send an event to any interested peers.
|
||||
##
|
||||
## topic: a topic associated with the event message.
|
||||
##
|
||||
## args: event arguments as made by :bro:see:`Broker::event_args`.
|
||||
##
|
||||
## flags: tune the behavior of how the message is sent.
|
||||
##
|
||||
## Returns: true if the message is sent.
|
||||
global send_event: function(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool;
|
||||
global unsubscribe: function(topic_prefix: string): bool;
|
||||
|
||||
## Automatically send an event to any interested peers whenever it is
|
||||
## locally dispatched (e.g. using "event my_event(...);" in a script).
|
||||
## locally dispatched. (For example, using "event my_event(...);" in a
|
||||
## script.)
|
||||
##
|
||||
## topic: a topic string associated with the event message.
|
||||
## Peers advertise interest by registering a subscription to some
|
||||
|
@ -187,83 +300,18 @@ export {
|
|||
##
|
||||
## ev: a Bro event value.
|
||||
##
|
||||
## flags: tune the behavior of how the message is sent.
|
||||
##
|
||||
## Returns: true if automatic event sending is now enabled.
|
||||
global auto_event: function(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool;
|
||||
global auto_publish: function(topic: string, ev: any): bool;
|
||||
|
||||
## Stop automatically sending an event to peers upon local dispatch.
|
||||
##
|
||||
## topic: a topic originally given to :bro:see:`Broker::auto_event`.
|
||||
## topic: a topic originally given to :bro:see:`Broker::auto_publish`.
|
||||
##
|
||||
## ev: an event originally given to :bro:see:`Broker::auto_event`.
|
||||
## ev: an event originally given to :bro:see:`Broker::auto_publish`.
|
||||
##
|
||||
## Returns: true if automatic events will not occur for the topic/event
|
||||
## pair.
|
||||
global auto_event_stop: function(topic: string, ev: any): bool;
|
||||
|
||||
## Register interest in all peer event messages that use a certain topic
|
||||
## prefix.
|
||||
##
|
||||
## topic_prefix: a prefix to match against remote message topics.
|
||||
## e.g. an empty prefix matches everything and "a" matches
|
||||
## "alice" and "amy" but not "bob".
|
||||
##
|
||||
## Returns: true if it's a new event subscription and it is now registered.
|
||||
global subscribe_to_events: function(topic_prefix: string): bool;
|
||||
|
||||
## Unregister interest in all peer event messages that use a topic prefix.
|
||||
##
|
||||
## topic_prefix: a prefix previously supplied to a successful call to
|
||||
## :bro:see:`Broker::subscribe_to_events`.
|
||||
##
|
||||
## Returns: true if interest in the topic prefix is no longer advertised.
|
||||
global unsubscribe_to_events: function(topic_prefix: string): bool;
|
||||
|
||||
## Enable remote logs for a given log stream.
|
||||
##
|
||||
## id: the log stream to enable remote logs for.
|
||||
##
|
||||
## flags: tune the behavior of how log entry messages are sent.
|
||||
##
|
||||
## Returns: true if remote logs are enabled for the stream.
|
||||
global enable_remote_logs: function(id: Log::ID, flags: SendFlags &default = SendFlags()): bool;
|
||||
|
||||
## Disable remote logs for a given log stream.
|
||||
##
|
||||
## id: the log stream to disable remote logs for.
|
||||
##
|
||||
## Returns: true if remote logs are disabled for the stream.
|
||||
global disable_remote_logs: function(id: Log::ID): bool;
|
||||
|
||||
## Check if remote logs are enabled for a given log stream.
|
||||
##
|
||||
## id: the log stream to check.
|
||||
##
|
||||
## Returns: true if remote logs are enabled for the given stream.
|
||||
global remote_logs_enabled: function(id: Log::ID): bool;
|
||||
|
||||
## Register interest in all peer log messages that use a certain topic
|
||||
## prefix. Logs are implicitly sent with topic "bro/log/<stream-name>" and
|
||||
## the receiving side processes them through the logging framework as usual.
|
||||
##
|
||||
## topic_prefix: a prefix to match against remote message topics.
|
||||
## e.g. an empty prefix matches everything and "a" matches
|
||||
## "alice" and "amy" but not "bob".
|
||||
##
|
||||
## Returns: true if it's a new log subscription and it is now registered.
|
||||
global subscribe_to_logs: function(topic_prefix: string): bool;
|
||||
|
||||
## Unregister interest in all peer log messages that use a topic prefix.
|
||||
## Logs are implicitly sent with topic "bro/log/<stream-name>" and the
|
||||
## receiving side processes them through the logging framework as usual.
|
||||
##
|
||||
## topic_prefix: a prefix previously supplied to a successful call to
|
||||
## :bro:see:`Broker::subscribe_to_logs`.
|
||||
##
|
||||
## Returns: true if interest in the topic prefix is no longer advertised.
|
||||
global unsubscribe_to_logs: function(topic_prefix: string): bool;
|
||||
|
||||
global auto_unpublish: function(topic: string, ev: any): bool;
|
||||
}
|
||||
|
||||
@load base/bif/comm.bif
|
||||
|
@ -271,106 +319,75 @@ export {
|
|||
|
||||
module Broker;
|
||||
|
||||
@ifdef ( Broker::__enable )
|
||||
event retry_listen(a: string, p: port, retry: interval)
|
||||
{
|
||||
listen(a, p, retry);
|
||||
}
|
||||
|
||||
function enable(flags: EndpointFlags &default = EndpointFlags()) : bool
|
||||
{
|
||||
return __enable(flags);
|
||||
}
|
||||
function listen(a: string, p: port, retry: interval): port
|
||||
{
|
||||
local bound = __listen(a, p);
|
||||
|
||||
function set_endpoint_flags(flags: EndpointFlags &default = EndpointFlags()): bool
|
||||
{
|
||||
return __set_endpoint_flags(flags);
|
||||
}
|
||||
if ( bound == 0/tcp )
|
||||
{
|
||||
local e = getenv("BRO_DEFAULT_LISTEN_RETRY");
|
||||
|
||||
function publish_topic(topic: string): bool
|
||||
{
|
||||
return __publish_topic(topic);
|
||||
}
|
||||
if ( e != "" )
|
||||
retry = double_to_interval(to_double(e));
|
||||
|
||||
function unpublish_topic(topic: string): bool
|
||||
{
|
||||
return __unpublish_topic(topic);
|
||||
}
|
||||
if ( retry != 0secs )
|
||||
schedule retry { retry_listen(a, p, retry) };
|
||||
}
|
||||
|
||||
function listen(p: port, a: string &default = "", reuse: bool &default = T): bool
|
||||
{
|
||||
return __listen(p, a, reuse);
|
||||
}
|
||||
return bound;
|
||||
}
|
||||
|
||||
function connect(a: string, p: port, retry: interval): bool
|
||||
{
|
||||
return __connect(a, p, retry);
|
||||
}
|
||||
function peer(a: string, p: port, retry: interval): bool
|
||||
{
|
||||
return __peer(a, p, retry);
|
||||
}
|
||||
|
||||
function disconnect(a: string, p: port): bool
|
||||
{
|
||||
return __disconnect(a, p);
|
||||
}
|
||||
function unpeer(a: string, p: port): bool
|
||||
{
|
||||
return __unpeer(a, p);
|
||||
}
|
||||
|
||||
function send_print(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool
|
||||
{
|
||||
return __send_print(topic, msg, flags);
|
||||
}
|
||||
function peers(): vector of PeerInfo
|
||||
{
|
||||
return __peers();
|
||||
}
|
||||
|
||||
function subscribe_to_prints(topic_prefix: string): bool
|
||||
{
|
||||
return __subscribe_to_prints(topic_prefix);
|
||||
}
|
||||
function node_id(): string
|
||||
{
|
||||
return __node_id();
|
||||
}
|
||||
|
||||
function unsubscribe_to_prints(topic_prefix: string): bool
|
||||
{
|
||||
return __unsubscribe_to_prints(topic_prefix);
|
||||
}
|
||||
function flush_logs(): count
|
||||
{
|
||||
return __flush_logs();
|
||||
}
|
||||
|
||||
function send_event(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool
|
||||
{
|
||||
return __event(topic, args, flags);
|
||||
}
|
||||
function publish_id(topic: string, id: string): bool
|
||||
{
|
||||
return __publish_id(topic, id);
|
||||
}
|
||||
|
||||
function auto_event(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool
|
||||
{
|
||||
return __auto_event(topic, ev, flags);
|
||||
}
|
||||
function subscribe(topic_prefix: string): bool
|
||||
{
|
||||
return __subscribe(topic_prefix);
|
||||
}
|
||||
|
||||
function auto_event_stop(topic: string, ev: any): bool
|
||||
{
|
||||
return __auto_event_stop(topic, ev);
|
||||
}
|
||||
function unsubscribe(topic_prefix: string): bool
|
||||
{
|
||||
return __unsubscribe(topic_prefix);
|
||||
}
|
||||
|
||||
function subscribe_to_events(topic_prefix: string): bool
|
||||
{
|
||||
return __subscribe_to_events(topic_prefix);
|
||||
}
|
||||
function auto_publish(topic: string, ev: any): bool
|
||||
{
|
||||
return __auto_publish(topic, ev);
|
||||
}
|
||||
|
||||
function unsubscribe_to_events(topic_prefix: string): bool
|
||||
{
|
||||
return __unsubscribe_to_events(topic_prefix);
|
||||
}
|
||||
|
||||
function enable_remote_logs(id: Log::ID, flags: SendFlags &default = SendFlags()): bool
|
||||
{
|
||||
return __enable_remote_logs(id, flags);
|
||||
}
|
||||
|
||||
function disable_remote_logs(id: Log::ID): bool
|
||||
{
|
||||
return __disable_remote_logs(id);
|
||||
}
|
||||
|
||||
function remote_logs_enabled(id: Log::ID): bool
|
||||
{
|
||||
return __remote_logs_enabled(id);
|
||||
}
|
||||
|
||||
function subscribe_to_logs(topic_prefix: string): bool
|
||||
{
|
||||
return __subscribe_to_logs(topic_prefix);
|
||||
}
|
||||
|
||||
function unsubscribe_to_logs(topic_prefix: string): bool
|
||||
{
|
||||
return __unsubscribe_to_logs(topic_prefix);
|
||||
}
|
||||
|
||||
@endif
|
||||
function auto_unpublish(topic: string, ev: any): bool
|
||||
{
|
||||
return __auto_unpublish(topic, ev);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,11 +1,16 @@
|
|||
# Load the core cluster support.
|
||||
@load ./main
|
||||
@load ./pools
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
|
||||
# Give the node being started up it's peer name.
|
||||
redef peer_description = Cluster::node;
|
||||
|
||||
@if ( Cluster::enable_round_robin_logging )
|
||||
redef Broker::log_topic = Cluster::rr_log_topic;
|
||||
@endif
|
||||
|
||||
# Add a cluster prefix.
|
||||
@prefixes += cluster
|
||||
|
||||
|
@ -19,13 +24,6 @@ redef peer_description = Cluster::node;
|
|||
|
||||
@load ./setup-connections
|
||||
|
||||
# Don't load the listening script until we're a bit more sure that the
|
||||
# cluster framework is actually being enabled.
|
||||
@load frameworks/communication/listen
|
||||
|
||||
## Set the port that this node is supposed to listen on.
|
||||
redef Communication::listen_port = Cluster::nodes[Cluster::node]$p;
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@load ./nodes/manager
|
||||
# If no logger is defined, then the manager receives logs.
|
||||
|
|
|
@ -7,10 +7,111 @@
|
|||
##! ``@load base/frameworks/cluster``.
|
||||
|
||||
@load base/frameworks/control
|
||||
@load base/frameworks/broker
|
||||
|
||||
module Cluster;
|
||||
|
||||
export {
|
||||
## Whether to distribute log messages among available logging nodes.
|
||||
const enable_round_robin_logging = T &redef;
|
||||
|
||||
## The topic name used for exchanging general messages that are relevant to
|
||||
## any node in a cluster. Used with broker-enabled cluster communication.
|
||||
const broadcast_topic = "bro/cluster/broadcast" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## logger nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const logger_topic = "bro/cluster/logger" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## manager nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const manager_topic = "bro/cluster/manager" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## proxy nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const proxy_topic = "bro/cluster/proxy" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## worker nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const worker_topic = "bro/cluster/worker" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## time machine nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const time_machine_topic = "bro/cluster/time_machine" &redef;
|
||||
|
||||
## The topic prefix used for exchanging messages that are relevant to
|
||||
## a named node in a cluster. Used with broker-enabled cluster communication.
|
||||
const node_topic_prefix = "bro/cluster/node/" &redef;
|
||||
|
||||
## Name of the node on which master data stores will be created if no other
|
||||
## has already been specified by the user in :bro:see:`Cluster::stores`.
|
||||
## An empty value means "use whatever name corresponds to the manager
|
||||
## node".
|
||||
const default_master_node = "" &redef;
|
||||
|
||||
## The type of data store backend that will be used for all data stores if
|
||||
## no other has already been specified by the user in :bro:see:`Cluster::stores`.
|
||||
const default_backend = Broker::MEMORY &redef;
|
||||
|
||||
## The type of persistent data store backend that will be used for all data
|
||||
## stores if no other has already been specified by the user in
|
||||
## :bro:see:`Cluster::stores`. This will be used when script authors call
|
||||
## :bro:see:`Cluster::create_store` with the *persistent* argument set true.
|
||||
const default_persistent_backend = Broker::SQLITE &redef;
|
||||
|
||||
## Setting a default dir will, for persistent backends that have not
|
||||
## been given an explicit file path via :bro:see:`Cluster::stores`,
|
||||
## automatically create a path within this dir that is based on the name of
|
||||
## the data store.
|
||||
const default_store_dir = "" &redef;
|
||||
|
||||
## Information regarding a cluster-enabled data store.
|
||||
type StoreInfo: record {
|
||||
## The name of the data store.
|
||||
name: string &optional;
|
||||
## The store handle.
|
||||
store: opaque of Broker::Store &optional;
|
||||
## The name of the cluster node on which the master version of the data
|
||||
## store resides.
|
||||
master_node: string &default=default_master_node;
|
||||
## Whether the data store is the master version or a clone.
|
||||
master: bool &default=F;
|
||||
## The type of backend used for storing data.
|
||||
backend: Broker::BackendType &default=default_backend;
|
||||
## Parameters used for configuring the backend.
|
||||
options: Broker::BackendOptions &default=Broker::BackendOptions();
|
||||
## A resync/reconnect interval to pass through to
|
||||
## :bro:see:`Broker::create_clone`.
|
||||
clone_resync_interval: interval &default=Broker::default_clone_resync_interval;
|
||||
## A staleness duration to pass through to
|
||||
## :bro:see:`Broker::create_clone`.
|
||||
clone_stale_interval: interval &default=Broker::default_clone_stale_interval;
|
||||
## A mutation buffer interval to pass through to
|
||||
## :bro:see:`Broker::create_clone`.
|
||||
clone_mutation_buffer_interval: interval &default=Broker::default_clone_mutation_buffer_interval;
|
||||
};
|
||||
|
||||
## A table of cluster-enabled data stores that have been created, indexed
|
||||
## by their name. This table will be populated automatically by
|
||||
## :bro:see:`Cluster::create_store`, but if you need to customize
|
||||
## the options related to a particular data store, you may redef this
|
||||
## table. Calls to :bro:see:`Cluster::create_store` will first check
|
||||
## the table for an entry of the same name and, if found, will use the
|
||||
## predefined options there when setting up the store.
|
||||
global stores: table[string] of StoreInfo &default=StoreInfo() &redef;
|
||||
|
||||
## Sets up a cluster-enabled data store. They will also still properly
|
||||
## function for uses that are not operating a cluster.
|
||||
##
|
||||
## name: the name of the data store to create.
|
||||
##
|
||||
## persistent: whether the data store must be persistent.
|
||||
##
|
||||
## Returns: the store's information. For master stores, the store will be
|
||||
## ready to use immediately. For clones, the store field will not
|
||||
## be set until the node containing the master store has connected.
|
||||
global create_store: function(name: string, persistent: bool &default=F): StoreInfo;
|
||||
|
||||
## The cluster logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
|
@ -18,6 +119,8 @@ export {
|
|||
type Info: record {
|
||||
## The time at which a cluster message was generated.
|
||||
ts: time;
|
||||
## The name of the node that is creating the log record.
|
||||
node: string;
|
||||
## A message indicating information about the cluster's operation.
|
||||
message: string;
|
||||
} &log;
|
||||
|
@ -46,43 +149,6 @@ export {
|
|||
TIME_MACHINE,
|
||||
};
|
||||
|
||||
## Events raised by a manager and handled by the workers.
|
||||
const manager2worker_events = /Drop::.*/ &redef;
|
||||
|
||||
## Events raised by a manager and handled by proxies.
|
||||
const manager2proxy_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by a manager and handled by loggers.
|
||||
const manager2logger_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by proxies and handled by loggers.
|
||||
const proxy2logger_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by proxies and handled by a manager.
|
||||
const proxy2manager_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by proxies and handled by workers.
|
||||
const proxy2worker_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by workers and handled by loggers.
|
||||
const worker2logger_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by workers and handled by a manager.
|
||||
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
|
||||
|
||||
## Events raised by workers and handled by proxies.
|
||||
const worker2proxy_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by TimeMachine instances and handled by a manager.
|
||||
const tm2manager_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by TimeMachine instances and handled by workers.
|
||||
const tm2worker_events = /EMPTY/ &redef;
|
||||
|
||||
## Events sent by the control host (i.e., BroControl) when dynamically
|
||||
## connecting to a running instance to update settings or request data.
|
||||
const control_events = Control::controller_events &redef;
|
||||
|
||||
## Record type to indicate a node in a cluster.
|
||||
type Node: record {
|
||||
## Identifies the type of cluster node in this node's configuration.
|
||||
|
@ -92,22 +158,17 @@ export {
|
|||
## If the *ip* field is a non-global IPv6 address, this field
|
||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||
zone_id: string &default="";
|
||||
## The port to which this local node can connect when
|
||||
## establishing communication.
|
||||
## The port that this node will listen on for peer connections.
|
||||
p: port;
|
||||
## Identifier for the interface a worker is sniffing.
|
||||
interface: string &optional;
|
||||
## Name of the logger node this node uses. For manager, proxies and workers.
|
||||
logger: string &optional;
|
||||
## Name of the manager node this node uses. For workers and proxies.
|
||||
manager: string &optional;
|
||||
## Name of the proxy node this node uses. For workers and managers.
|
||||
proxy: string &optional;
|
||||
## Names of worker nodes that this node connects with.
|
||||
## For managers and proxies.
|
||||
workers: set[string] &optional;
|
||||
## Name of a time machine node with which this node connects.
|
||||
time_machine: string &optional;
|
||||
## A unique identifier assigned to the node by the broker framework.
|
||||
## This field is only set while a node is connected.
|
||||
id: string &optional;
|
||||
};
|
||||
|
||||
## This function can be called at any time to determine if the cluster
|
||||
|
@ -134,6 +195,8 @@ export {
|
|||
## named cluster-layout.bro somewhere in the BROPATH. It will be
|
||||
## automatically loaded if the CLUSTER_NODE environment variable is set.
|
||||
## Note that BroControl handles all of this automatically.
|
||||
## The table is typically indexed by node names/labels (e.g. "manager"
|
||||
## or "worker-1").
|
||||
const nodes: table[string] of Node = {} &redef;
|
||||
|
||||
## Indicates whether or not the manager will act as the logger and receive
|
||||
|
@ -147,9 +210,67 @@ export {
|
|||
const node = getenv("CLUSTER_NODE") &redef;
|
||||
|
||||
## Interval for retrying failed connections between cluster nodes.
|
||||
## If set, the BRO_DEFAULT_CONNECT_RETRY (given in number of seconds)
|
||||
## overrides this option.
|
||||
const retry_interval = 1min &redef;
|
||||
|
||||
## When using broker-enabled cluster framework, nodes broadcast this event
|
||||
## to exchange their user-defined name along with a string that uniquely
|
||||
## identifies it for the duration of its lifetime. This string may change
|
||||
## if the node dies and has to reconnect later.
|
||||
global hello: event(name: string, id: string);
|
||||
|
||||
## When using broker-enabled cluster framework, this event will be emitted
|
||||
## locally whenever a cluster node connects or reconnects.
|
||||
global node_up: event(name: string, id: string);
|
||||
|
||||
## When using broker-enabled cluster framework, this event will be emitted
|
||||
## locally whenever a connected cluster node becomes disconnected.
|
||||
global node_down: event(name: string, id: string);
|
||||
|
||||
## Write a message to the cluster logging stream.
|
||||
global log: function(msg: string);
|
||||
|
||||
## Retrieve the topic associated with a specific node in the cluster.
|
||||
##
|
||||
## name: the name of the cluster node (e.g. "manager").
|
||||
##
|
||||
## Returns: a topic string that may used to send a message exclusively to
|
||||
## a given cluster node.
|
||||
global node_topic: function(name: string): string;
|
||||
}
|
||||
|
||||
global active_worker_ids: set[string] = set();
|
||||
|
||||
type NamedNode: record {
|
||||
name: string;
|
||||
node: Node;
|
||||
};
|
||||
|
||||
function nodes_with_type(node_type: NodeType): vector of NamedNode
|
||||
{
|
||||
local rval: vector of NamedNode = vector();
|
||||
local names: vector of string = vector();
|
||||
|
||||
for ( name in Cluster::nodes )
|
||||
names += name;
|
||||
|
||||
names = sort(names, strcmp);
|
||||
|
||||
for ( i in names )
|
||||
{
|
||||
name = names[i];
|
||||
local n = Cluster::nodes[name];
|
||||
|
||||
if ( n$node_type != node_type )
|
||||
next;
|
||||
|
||||
rval += NamedNode($name=name, $node=n);
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
function is_enabled(): bool
|
||||
{
|
||||
return (node != "");
|
||||
|
@ -160,16 +281,70 @@ function local_node_type(): NodeType
|
|||
return is_enabled() ? nodes[node]$node_type : NONE;
|
||||
}
|
||||
|
||||
event remote_connection_handshake_done(p: event_peer) &priority=5
|
||||
function node_topic(name: string): string
|
||||
{
|
||||
if ( p$descr in nodes && nodes[p$descr]$node_type == WORKER )
|
||||
++worker_count;
|
||||
return node_topic_prefix + name;
|
||||
}
|
||||
|
||||
event remote_connection_closed(p: event_peer) &priority=5
|
||||
event Cluster::hello(name: string, id: string) &priority=10
|
||||
{
|
||||
if ( p$descr in nodes && nodes[p$descr]$node_type == WORKER )
|
||||
--worker_count;
|
||||
if ( name !in nodes )
|
||||
{
|
||||
Reporter::error(fmt("Got Cluster::hello msg from unexpected node: %s", name));
|
||||
return;
|
||||
}
|
||||
|
||||
local n = nodes[name];
|
||||
|
||||
if ( n?$id )
|
||||
{
|
||||
if ( n$id != id )
|
||||
Reporter::error(fmt("Got Cluster::hello msg from duplicate node:%s",
|
||||
name));
|
||||
}
|
||||
else
|
||||
event Cluster::node_up(name, id);
|
||||
|
||||
n$id = id;
|
||||
Cluster::log(fmt("got hello from %s (%s)", name, id));
|
||||
|
||||
if ( n$node_type == WORKER )
|
||||
{
|
||||
add active_worker_ids[id];
|
||||
worker_count = |active_worker_ids|;
|
||||
}
|
||||
}
|
||||
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
local e = Broker::make_event(Cluster::hello, node, Broker::node_id());
|
||||
Broker::publish(Cluster::broadcast_topic, e);
|
||||
}
|
||||
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
for ( node_name in nodes )
|
||||
{
|
||||
local n = nodes[node_name];
|
||||
|
||||
if ( n?$id && n$id == endpoint$id )
|
||||
{
|
||||
Cluster::log(fmt("node down: %s", node_name));
|
||||
delete n$id;
|
||||
|
||||
if ( n$node_type == WORKER )
|
||||
{
|
||||
delete active_worker_ids[endpoint$id];
|
||||
worker_count = |active_worker_ids|;
|
||||
}
|
||||
|
||||
event Cluster::node_down(node_name, endpoint$id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
|
@ -183,3 +358,90 @@ event bro_init() &priority=5
|
|||
|
||||
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]);
|
||||
}
|
||||
|
||||
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
|
||||
{
|
||||
local info = stores[name];
|
||||
info$name = name;
|
||||
|
||||
if ( Cluster::default_store_dir != "" )
|
||||
{
|
||||
local default_options = Broker::BackendOptions();
|
||||
local path = Cluster::default_store_dir + "/" + name;
|
||||
|
||||
if ( info$options$sqlite$path == default_options$sqlite$path )
|
||||
info$options$sqlite$path = path + ".sqlite";
|
||||
|
||||
if ( info$options$rocksdb$path == default_options$rocksdb$path )
|
||||
info$options$rocksdb$path = path + ".rocksdb";
|
||||
}
|
||||
|
||||
if ( persistent )
|
||||
{
|
||||
switch ( info$backend ) {
|
||||
case Broker::MEMORY:
|
||||
info$backend = Cluster::default_persistent_backend;
|
||||
break;
|
||||
case Broker::SQLITE:
|
||||
fallthrough;
|
||||
case Broker::ROCKSDB:
|
||||
# no-op: user already asked for a specific persistent backend.
|
||||
break;
|
||||
default:
|
||||
Reporter::error(fmt("unhandled data store type: %s", info$backend));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( ! Cluster::is_enabled() )
|
||||
{
|
||||
if ( info?$store )
|
||||
{
|
||||
Reporter::warning(fmt("duplicate cluster store creation for %s", name));
|
||||
return info;
|
||||
}
|
||||
|
||||
info$store = Broker::create_master(name, info$backend, info$options);
|
||||
info$master = T;
|
||||
stores[name] = info;
|
||||
return info;
|
||||
}
|
||||
|
||||
if ( info$master_node == "" )
|
||||
{
|
||||
local mgr_nodes = nodes_with_type(Cluster::MANAGER);
|
||||
|
||||
if ( |mgr_nodes| == 0 )
|
||||
Reporter::fatal(fmt("empty master node name for cluster store " +
|
||||
"'%s', but there's no manager node to default",
|
||||
name));
|
||||
|
||||
info$master_node = mgr_nodes[0]$name;
|
||||
}
|
||||
else if ( info$master_node !in Cluster::nodes )
|
||||
Reporter::fatal(fmt("master node '%s' for cluster store '%s' does not exist",
|
||||
info$master_node, name));
|
||||
|
||||
if ( Cluster::node == info$master_node )
|
||||
{
|
||||
info$store = Broker::create_master(name, info$backend, info$options);
|
||||
info$master = T;
|
||||
stores[name] = info;
|
||||
Cluster::log(fmt("created master store: %s", name));
|
||||
return info;
|
||||
}
|
||||
|
||||
info$master = F;
|
||||
stores[name] = info;
|
||||
info$store = Broker::create_clone(info$name,
|
||||
info$clone_resync_interval,
|
||||
info$clone_stale_interval,
|
||||
info$clone_mutation_buffer_interval);
|
||||
Cluster::log(fmt("created clone store: %s", info$name));
|
||||
return info;
|
||||
}
|
||||
|
||||
function log(msg: string)
|
||||
{
|
||||
Log::write(Cluster::LOG, [$ts = network_time(), $node = node, $message = msg]);
|
||||
}
|
||||
|
|
458
scripts/base/frameworks/cluster/pools.bro
Normal file
458
scripts/base/frameworks/cluster/pools.bro
Normal file
|
@ -0,0 +1,458 @@
|
|||
##! Defines an interface for managing pools of cluster nodes. Pools are
|
||||
##! a useful way to distribute work or data among nodes within a cluster.
|
||||
|
||||
@load ./main
|
||||
@load base/utils/hash_hrw
|
||||
|
||||
module Cluster;
|
||||
|
||||
export {
|
||||
## Store state of a cluster within the context of a work pool.
|
||||
type PoolNode: record {
|
||||
## The node name (e.g. "manager").
|
||||
name: string;
|
||||
## An alias of *name* used to prevent hashing collisions when creating
|
||||
## *site_id*.
|
||||
alias: string;
|
||||
## A 32-bit unique identifier for the pool node, derived from name/alias.
|
||||
site_id: count;
|
||||
## Whether the node is currently alive and can receive work.
|
||||
alive: bool &default=F;
|
||||
};
|
||||
|
||||
## A pool specification.
|
||||
type PoolSpec: record {
|
||||
## A topic string that can be used to reach all nodes within a pool.
|
||||
topic: string &default = "";
|
||||
## The type of nodes that are contained within the pool.
|
||||
node_type: Cluster::NodeType &default = Cluster::PROXY;
|
||||
## The maximum number of nodes that may belong to the pool.
|
||||
## If not set, then all available nodes will be added to the pool,
|
||||
## else the cluster framework will automatically limit the pool
|
||||
## membership according to the threshhold.
|
||||
max_nodes: count &optional;
|
||||
## Whether the pool requires exclusive access to nodes. If true,
|
||||
## then *max_nodes* nodes will not be assigned to any other pool.
|
||||
## When using this flag, *max_nodes* must also be set.
|
||||
exclusive: bool &default = F;
|
||||
};
|
||||
|
||||
type PoolNodeTable: table[string] of PoolNode;
|
||||
type RoundRobinTable: table[string] of int;
|
||||
|
||||
## A pool used for distributing data/work among a set of cluster nodes.
|
||||
type Pool: record {
|
||||
## The specification of the pool that was used when registering it.
|
||||
spec: PoolSpec &default = PoolSpec();
|
||||
## Nodes in the pool, indexed by their name (e.g. "manager").
|
||||
nodes: PoolNodeTable &default = PoolNodeTable();
|
||||
## A list of nodes in the pool in a deterministic order.
|
||||
node_list: vector of PoolNode &default = vector();
|
||||
## The Rendezvous hashing structure.
|
||||
hrw_pool: HashHRW::Pool &default = HashHRW::Pool();
|
||||
## Round-Robin table indexed by arbitrary key and storing the next
|
||||
## index of *node_list* that will be eligible to receive work (if it's
|
||||
## alive at the time of next request).
|
||||
rr_key_seq: RoundRobinTable &default = RoundRobinTable();
|
||||
## Number of pool nodes that are currently alive.
|
||||
alive_count: count &default = 0;
|
||||
};
|
||||
|
||||
## The specification for :bro:see:`Cluster::proxy_pool`.
|
||||
global proxy_pool_spec: PoolSpec =
|
||||
PoolSpec($topic = "bro/cluster/pool/proxy",
|
||||
$node_type = Cluster::PROXY) &redef;
|
||||
|
||||
## The specification for :bro:see:`Cluster::worker_pool`.
|
||||
global worker_pool_spec: PoolSpec =
|
||||
PoolSpec($topic = "bro/cluster/pool/worker",
|
||||
$node_type = Cluster::WORKER) &redef;
|
||||
|
||||
## The specification for :bro:see:`Cluster::logger_pool`.
|
||||
global logger_pool_spec: PoolSpec =
|
||||
PoolSpec($topic = "bro/cluster/pool/logger",
|
||||
$node_type = Cluster::LOGGER) &redef;
|
||||
|
||||
## A pool containing all the proxy nodes of a cluster.
|
||||
## The pool's node membership/availability is automatically
|
||||
## maintained by the cluster framework.
|
||||
global proxy_pool: Pool;
|
||||
|
||||
## A pool containing all the worker nodes of a cluster.
|
||||
## The pool's node membership/availability is automatically
|
||||
## maintained by the cluster framework.
|
||||
global worker_pool: Pool;
|
||||
|
||||
## A pool containing all the logger nodes of a cluster.
|
||||
## The pool's node membership/availability is automatically
|
||||
## maintained by the cluster framework.
|
||||
global logger_pool: Pool;
|
||||
|
||||
## Registers and initializes a pool.
|
||||
global register_pool: function(spec: PoolSpec): Pool;
|
||||
|
||||
## Retrieve the topic associated with the node mapped via Rendezvous hash
|
||||
## of an arbitrary key.
|
||||
##
|
||||
## pool: the pool of nodes to consider.
|
||||
##
|
||||
## key: data used for input to the hashing function that will uniformly
|
||||
## distribute keys among available nodes.
|
||||
##
|
||||
## Returns: a topic string associated with a cluster node that is alive
|
||||
## or an empty string if nothing is alive.
|
||||
global hrw_topic: function(pool: Pool, key: any): string;
|
||||
|
||||
## Retrieve the topic associated with the node in a round-robin fashion.
|
||||
##
|
||||
## pool: the pool of nodes to consider.
|
||||
##
|
||||
## key: an arbitrary string to identify the purpose for which you're
|
||||
## requesting the topic. e.g. consider using namespacing of your script
|
||||
## like "Intel::cluster_rr_key".
|
||||
##
|
||||
## Returns: a topic string associated with a cluster node that is alive,
|
||||
## or an empty string if nothing is alive.
|
||||
global rr_topic: function(pool: Pool, key: string): string;
|
||||
|
||||
## Distributes log message topics among logger nodes via round-robin.
|
||||
## This will be automatically assigned to :bro:see:`Broker::log_topic`
|
||||
## if :bro:see:`Cluster::enable_round_robin_logging` is enabled.
|
||||
## If no logger nodes are active, then this will return the value
|
||||
## of :bro:see:`Broker::default_log_topic`.
|
||||
global rr_log_topic: function(id: Log::ID, path: string): string;
|
||||
}
|
||||
|
||||
## Initialize a node as a member of a pool.
|
||||
##
|
||||
## pool: the pool to which the node will belong.
|
||||
##
|
||||
## name: the name of the node (e.g. "manager").
|
||||
##
|
||||
## Returns: F if a node of the same name already exists in the pool, else T.
|
||||
global init_pool_node: function(pool: Pool, name: string): bool;
|
||||
|
||||
## Mark a pool node as alive/online/available. :bro:see:`Cluster::hrw_topic`
|
||||
## will distribute keys to nodes marked as alive.
|
||||
##
|
||||
## pool: the pool to which the node belongs.
|
||||
##
|
||||
## name: the name of the node to mark.
|
||||
##
|
||||
## Returns: F if the node does not exist in the pool, else T.
|
||||
global mark_pool_node_alive: function(pool: Pool, name: string): bool;
|
||||
|
||||
## Mark a pool node as dead/offline/unavailable. :bro:see:`Cluster::hrw_topic`
|
||||
## will not distribute keys to nodes marked as dead.
|
||||
##
|
||||
## pool: the pool to which the node belongs.
|
||||
##
|
||||
## name: the name of the node to mark.
|
||||
##
|
||||
## Returns: F if the node does not exist in the pool, else T.
|
||||
global mark_pool_node_dead: function(pool: Pool, name: string): bool;
|
||||
|
||||
global registered_pools: vector of Pool = vector();
|
||||
|
||||
function register_pool(spec: PoolSpec): Pool
|
||||
{
|
||||
local rval = Pool($spec = spec);
|
||||
registered_pools += rval;
|
||||
return rval;
|
||||
}
|
||||
|
||||
function hrw_topic(pool: Pool, key: any): string
|
||||
{
|
||||
if ( |pool$hrw_pool$sites| == 0 )
|
||||
return "";
|
||||
|
||||
local site = HashHRW::get_site(pool$hrw_pool, key);
|
||||
local pn: PoolNode = site$user_data;
|
||||
return node_topic_prefix + pn$name;
|
||||
}
|
||||
|
||||
function rr_topic(pool: Pool, key: string): string
|
||||
{
|
||||
if ( key !in pool$rr_key_seq )
|
||||
pool$rr_key_seq[key] = 0;
|
||||
|
||||
local next_idx = pool$rr_key_seq[key];
|
||||
local start = next_idx;
|
||||
local rval = "";
|
||||
|
||||
if ( next_idx >= |pool$node_list| )
|
||||
return rval;
|
||||
|
||||
while ( T )
|
||||
{
|
||||
local pn = pool$node_list[next_idx];
|
||||
|
||||
++next_idx;
|
||||
|
||||
if ( next_idx == |pool$node_list| )
|
||||
next_idx = 0;
|
||||
|
||||
if ( pn$alive )
|
||||
{
|
||||
rval = node_topic_prefix + pn$name;
|
||||
break;
|
||||
}
|
||||
|
||||
if ( next_idx == start )
|
||||
# no nodes alive
|
||||
break;
|
||||
}
|
||||
|
||||
pool$rr_key_seq[key] = next_idx;
|
||||
return rval;
|
||||
}
|
||||
|
||||
function rr_log_topic(id: Log::ID, path: string): string
|
||||
{
|
||||
local rval = rr_topic(logger_pool, "Cluster::rr_log_topic");
|
||||
|
||||
if ( rval != "" )
|
||||
return rval;
|
||||
|
||||
rval = Broker::default_log_topic(id, path);
|
||||
return rval;
|
||||
}
|
||||
|
||||
event Cluster::node_up(name: string, id: string) &priority=10
|
||||
{
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
local pool = registered_pools[i];
|
||||
|
||||
if ( name in pool$nodes )
|
||||
mark_pool_node_alive(pool, name);
|
||||
}
|
||||
}
|
||||
|
||||
event Cluster::node_down(name: string, id: string) &priority=10
|
||||
{
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
local pool = registered_pools[i];
|
||||
|
||||
if ( name in pool$nodes )
|
||||
mark_pool_node_dead(pool, name);
|
||||
}
|
||||
}
|
||||
|
||||
function site_id_in_pool(pool: Pool, site_id: count): bool
|
||||
{
|
||||
for ( i in pool$nodes )
|
||||
{
|
||||
local pn = pool$nodes[i];
|
||||
|
||||
if ( pn$site_id == site_id )
|
||||
return T;
|
||||
}
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function init_pool_node(pool: Pool, name: string): bool
|
||||
{
|
||||
if ( name in pool$nodes )
|
||||
return F;
|
||||
|
||||
local loop = T;
|
||||
local c = 0;
|
||||
|
||||
while ( loop )
|
||||
{
|
||||
# site id collisions are unlikely, but using aliases handles it...
|
||||
# alternatively could terminate and ask user to pick a new node name
|
||||
# if it ends up colliding.
|
||||
local alias = name + fmt(".%s", c);
|
||||
local site_id = fnv1a32(alias);
|
||||
|
||||
if ( site_id_in_pool(pool, site_id) )
|
||||
++c;
|
||||
else
|
||||
{
|
||||
local pn = PoolNode($name=name, $alias=alias, $site_id=site_id,
|
||||
$alive=Cluster::node == name);
|
||||
pool$nodes[name] = pn;
|
||||
pool$node_list += pn;
|
||||
|
||||
if ( pn$alive )
|
||||
++pool$alive_count;
|
||||
|
||||
loop = F;
|
||||
}
|
||||
}
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function mark_pool_node_alive(pool: Pool, name: string): bool
|
||||
{
|
||||
if ( name !in pool$nodes )
|
||||
return F;
|
||||
|
||||
local pn = pool$nodes[name];
|
||||
|
||||
if ( ! pn$alive )
|
||||
{
|
||||
pn$alive = T;
|
||||
++pool$alive_count;
|
||||
}
|
||||
|
||||
HashHRW::add_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn));
|
||||
return T;
|
||||
}
|
||||
|
||||
function mark_pool_node_dead(pool: Pool, name: string): bool
|
||||
{
|
||||
if ( name !in pool$nodes )
|
||||
return F;
|
||||
|
||||
local pn = pool$nodes[name];
|
||||
|
||||
if ( pn$alive )
|
||||
{
|
||||
pn$alive = F;
|
||||
--pool$alive_count;
|
||||
}
|
||||
|
||||
HashHRW::rem_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn));
|
||||
return T;
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
worker_pool = register_pool(worker_pool_spec);
|
||||
proxy_pool = register_pool(proxy_pool_spec);
|
||||
logger_pool = register_pool(logger_pool_spec);
|
||||
}
|
||||
|
||||
type PoolEligibilityTracking: record {
|
||||
eligible_nodes: vector of NamedNode &default = vector();
|
||||
next_idx: count &default = 0;
|
||||
excluded: count &default = 0;
|
||||
};
|
||||
|
||||
global pool_eligibility: table[Cluster::NodeType] of PoolEligibilityTracking = table();
|
||||
|
||||
function pool_sorter(a: Pool, b: Pool): int
|
||||
{
|
||||
return strcmp(a$spec$topic, b$spec$topic);
|
||||
}
|
||||
|
||||
# Needs to execute before the bro_init in setup-connections
|
||||
event bro_init() &priority=-5
|
||||
{
|
||||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
# Sorting now ensures the node distribution process is stable even if
|
||||
# there's a change in the order of time-of-registration between Bro runs.
|
||||
sort(registered_pools, pool_sorter);
|
||||
|
||||
pool_eligibility[Cluster::WORKER] =
|
||||
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::WORKER));
|
||||
pool_eligibility[Cluster::PROXY] =
|
||||
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::PROXY));
|
||||
pool_eligibility[Cluster::LOGGER] =
|
||||
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::LOGGER));
|
||||
|
||||
if ( manager_is_logger )
|
||||
{
|
||||
local mgr = nodes_with_type(Cluster::MANAGER);
|
||||
|
||||
if ( |mgr| > 0 )
|
||||
{
|
||||
local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes;
|
||||
eln += mgr[0];
|
||||
}
|
||||
}
|
||||
|
||||
local pool: Pool;
|
||||
local pet: PoolEligibilityTracking;
|
||||
local en: vector of NamedNode;
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
pool = registered_pools[i];
|
||||
|
||||
if ( pool$spec$node_type !in pool_eligibility )
|
||||
Reporter::fatal(fmt("invalid pool node type: %s", pool$spec$node_type));
|
||||
|
||||
if ( ! pool$spec$exclusive )
|
||||
next;
|
||||
|
||||
if ( ! pool$spec?$max_nodes )
|
||||
Reporter::fatal("Cluster::PoolSpec 'max_nodes' field must be set when using the 'exclusive' flag");
|
||||
|
||||
pet = pool_eligibility[pool$spec$node_type];
|
||||
pet$excluded += pool$spec$max_nodes;
|
||||
}
|
||||
|
||||
for ( nt in pool_eligibility )
|
||||
{
|
||||
pet = pool_eligibility[nt];
|
||||
|
||||
if ( pet$excluded > |pet$eligible_nodes| )
|
||||
Reporter::fatal(fmt("not enough %s nodes to satisfy pool exclusivity requirements: need %d nodes", nt, pet$excluded));
|
||||
}
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
pool = registered_pools[i];
|
||||
|
||||
if ( ! pool$spec$exclusive )
|
||||
next;
|
||||
|
||||
pet = pool_eligibility[pool$spec$node_type];
|
||||
|
||||
local e = 0;
|
||||
|
||||
while ( e < pool$spec$max_nodes )
|
||||
{
|
||||
init_pool_node(pool, pet$eligible_nodes[e]$name);
|
||||
++e;
|
||||
}
|
||||
|
||||
local nen: vector of NamedNode = vector();
|
||||
|
||||
for ( j in pet$eligible_nodes )
|
||||
{
|
||||
if ( j < e )
|
||||
next;
|
||||
|
||||
nen += pet$eligible_nodes[j];
|
||||
}
|
||||
|
||||
pet$eligible_nodes = nen;
|
||||
}
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
pool = registered_pools[i];
|
||||
|
||||
if ( pool$spec$exclusive )
|
||||
next;
|
||||
|
||||
pet = pool_eligibility[pool$spec$node_type];
|
||||
local nodes_to_init = |pet$eligible_nodes|;
|
||||
|
||||
if ( pool$spec?$max_nodes &&
|
||||
pool$spec$max_nodes < |pet$eligible_nodes| )
|
||||
nodes_to_init = pool$spec$max_nodes;
|
||||
|
||||
local nodes_inited = 0;
|
||||
|
||||
while ( nodes_inited < nodes_to_init )
|
||||
{
|
||||
init_pool_node(pool, pet$eligible_nodes[pet$next_idx]$name);
|
||||
++nodes_inited;
|
||||
++pet$next_idx;
|
||||
|
||||
if ( pet$next_idx == |pet$eligible_nodes| )
|
||||
pet$next_idx = 0;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,142 +2,125 @@
|
|||
##! as defined by :bro:id:`Cluster::nodes`.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/communication
|
||||
|
||||
@if ( Cluster::node in Cluster::nodes )
|
||||
@load ./pools
|
||||
@load base/frameworks/broker
|
||||
|
||||
module Cluster;
|
||||
|
||||
event bro_init() &priority=9
|
||||
function connect_peer(node_type: NodeType, node_name: string)
|
||||
{
|
||||
local me = nodes[node];
|
||||
local nn = nodes_with_type(node_type);
|
||||
|
||||
for ( i in Cluster::nodes )
|
||||
for ( i in nn )
|
||||
{
|
||||
local n = nodes[i];
|
||||
local n = nn[i];
|
||||
|
||||
# Connections from the control node for runtime control and update events.
|
||||
# Every node in a cluster is eligible for control from this host.
|
||||
if ( n$node_type == CONTROL )
|
||||
Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id,
|
||||
$connect=F, $class="control",
|
||||
$events=control_events];
|
||||
if ( n$name != node_name )
|
||||
next;
|
||||
|
||||
if ( me$node_type == LOGGER )
|
||||
{
|
||||
if ( n$node_type == MANAGER && n$logger == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=manager2logger_events, $request_logs=T];
|
||||
if ( n$node_type == PROXY && n$logger == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=proxy2logger_events, $request_logs=T];
|
||||
if ( n$node_type == WORKER && n$logger == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=worker2logger_events, $request_logs=T];
|
||||
}
|
||||
else if ( me$node_type == MANAGER )
|
||||
{
|
||||
if ( n$node_type == LOGGER && me$logger == i )
|
||||
Communication::nodes["logger"] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node];
|
||||
|
||||
if ( n$node_type == WORKER && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=worker2manager_events,
|
||||
$request_logs=Cluster::manager_is_logger];
|
||||
|
||||
if ( n$node_type == PROXY && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=proxy2manager_events,
|
||||
$request_logs=Cluster::manager_is_logger];
|
||||
|
||||
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$events=tm2manager_events];
|
||||
}
|
||||
|
||||
else if ( me$node_type == PROXY )
|
||||
{
|
||||
if ( n$node_type == LOGGER && me$logger == i )
|
||||
Communication::nodes["logger"] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node];
|
||||
|
||||
if ( n$node_type == WORKER && n$proxy == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
|
||||
$sync=T, $auth=T, $events=worker2proxy_events];
|
||||
|
||||
# accepts connections from the previous one.
|
||||
# (This is not ideal for setups with many proxies)
|
||||
# FIXME: Once we're using multiple proxies, we should also figure out some $class scheme ...
|
||||
if ( n$node_type == PROXY )
|
||||
{
|
||||
if ( n?$proxy )
|
||||
Communication::nodes[i]
|
||||
= [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $auth=F, $sync=T, $retry=retry_interval];
|
||||
else if ( me?$proxy && me$proxy == i )
|
||||
Communication::nodes[me$proxy]
|
||||
= [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
|
||||
$connect=F, $auth=T, $sync=T];
|
||||
}
|
||||
|
||||
# Finally the manager, to send it status updates.
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node,
|
||||
$events=manager2proxy_events];
|
||||
}
|
||||
else if ( me$node_type == WORKER )
|
||||
{
|
||||
if ( n$node_type == LOGGER && me$logger == i )
|
||||
Communication::nodes["logger"] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node];
|
||||
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node,
|
||||
$events=manager2worker_events];
|
||||
|
||||
if ( n$node_type == PROXY && me$proxy == i )
|
||||
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$sync=T, $class=node,
|
||||
$events=proxy2worker_events];
|
||||
|
||||
if ( n$node_type == TIME_MACHINE &&
|
||||
me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T,
|
||||
$retry=retry_interval,
|
||||
$events=tm2worker_events];
|
||||
|
||||
}
|
||||
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
||||
Cluster::retry_interval);
|
||||
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
||||
n$node$ip, n$node$p, Cluster::retry_interval,
|
||||
status));
|
||||
}
|
||||
}
|
||||
|
||||
@endif
|
||||
function connect_peers_with_type(node_type: NodeType)
|
||||
{
|
||||
local rval: vector of NamedNode = vector();
|
||||
local nn = nodes_with_type(node_type);
|
||||
|
||||
for ( i in nn )
|
||||
{
|
||||
local n = nn[i];
|
||||
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
||||
Cluster::retry_interval);
|
||||
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
||||
n$node$ip, n$node$p, Cluster::retry_interval,
|
||||
status));
|
||||
}
|
||||
}
|
||||
|
||||
event bro_init() &priority=-10
|
||||
{
|
||||
if ( getenv("BROCTL_CHECK_CONFIG") != "" )
|
||||
return;
|
||||
|
||||
local self = nodes[node];
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
local pool = registered_pools[i];
|
||||
|
||||
if ( node in pool$nodes )
|
||||
Broker::subscribe(pool$spec$topic);
|
||||
}
|
||||
|
||||
switch ( self$node_type ) {
|
||||
case NONE:
|
||||
return;
|
||||
case CONTROL:
|
||||
break;
|
||||
case LOGGER:
|
||||
Broker::subscribe(Cluster::logger_topic);
|
||||
Broker::subscribe(Broker::default_log_topic_prefix);
|
||||
break;
|
||||
case MANAGER:
|
||||
Broker::subscribe(Cluster::manager_topic);
|
||||
|
||||
if ( Cluster::manager_is_logger )
|
||||
Broker::subscribe(Broker::default_log_topic_prefix);
|
||||
|
||||
break;
|
||||
case PROXY:
|
||||
Broker::subscribe(Cluster::proxy_topic);
|
||||
break;
|
||||
case WORKER:
|
||||
Broker::subscribe(Cluster::worker_topic);
|
||||
break;
|
||||
case TIME_MACHINE:
|
||||
Broker::subscribe(Cluster::time_machine_topic);
|
||||
break;
|
||||
default:
|
||||
Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type));
|
||||
return;
|
||||
}
|
||||
|
||||
Broker::subscribe(Cluster::broadcast_topic);
|
||||
Broker::subscribe(node_topic(node));
|
||||
|
||||
Broker::listen(Broker::default_listen_address,
|
||||
self$p,
|
||||
Broker::default_listen_retry);
|
||||
|
||||
Cluster::log(fmt("listening on %s:%s", Broker::default_listen_address, self$p));
|
||||
|
||||
switch ( self$node_type ) {
|
||||
case MANAGER:
|
||||
connect_peers_with_type(LOGGER);
|
||||
|
||||
if ( self?$time_machine )
|
||||
connect_peer(TIME_MACHINE, self$time_machine);
|
||||
|
||||
break;
|
||||
case PROXY:
|
||||
connect_peers_with_type(LOGGER);
|
||||
|
||||
if ( self?$manager )
|
||||
connect_peer(MANAGER, self$manager);
|
||||
|
||||
break;
|
||||
case WORKER:
|
||||
connect_peers_with_type(LOGGER);
|
||||
connect_peers_with_type(PROXY);
|
||||
|
||||
if ( self?$manager )
|
||||
connect_peer(MANAGER, self$manager);
|
||||
|
||||
if ( self?$time_machine )
|
||||
connect_peer(TIME_MACHINE, self$time_machine);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
The communication framework facilitates connecting to remote Bro or
|
||||
Broccoli instances to share state and transfer events.
|
|
@ -1 +0,0 @@
|
|||
@load ./main
|
|
@ -1,354 +0,0 @@
|
|||
##! Facilitates connecting to remote Bro or Broccoli instances to share state
|
||||
##! and/or transfer events.
|
||||
|
||||
@load base/frameworks/packet-filter
|
||||
@load base/utils/addrs
|
||||
|
||||
module Communication;
|
||||
|
||||
export {
|
||||
|
||||
## The communication logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
|
||||
## are wildcards.
|
||||
const listen_interface = 0.0.0.0 &redef;
|
||||
|
||||
## Which port to listen on. Note that BroControl sets this
|
||||
## automatically.
|
||||
const listen_port = 47757/tcp &redef;
|
||||
|
||||
## This defines if a listening socket should use SSL.
|
||||
const listen_ssl = F &redef;
|
||||
|
||||
## Defines if a listening socket can bind to IPv6 addresses.
|
||||
##
|
||||
## Note that this is overridden by the BroControl IPv6Comm option.
|
||||
const listen_ipv6 = F &redef;
|
||||
|
||||
## If :bro:id:`Communication::listen_interface` is a non-global
|
||||
## IPv6 address and requires a specific :rfc:`4007` ``zone_id``,
|
||||
## it can be specified here.
|
||||
const listen_ipv6_zone_id = "" &redef;
|
||||
|
||||
## Defines the interval at which to retry binding to
|
||||
## :bro:id:`Communication::listen_interface` on
|
||||
## :bro:id:`Communication::listen_port` if it's already in use.
|
||||
const listen_retry = 30 secs &redef;
|
||||
|
||||
## Default compression level. Compression level is 0-9, with 0 = no
|
||||
## compression.
|
||||
global compression_level = 0 &redef;
|
||||
|
||||
## A record type containing the column fields of the communication log.
|
||||
type Info: record {
|
||||
## The network time at which a communication event occurred.
|
||||
ts: time &log;
|
||||
## The peer name (if any) with which a communication event is
|
||||
## concerned.
|
||||
peer: string &log &optional;
|
||||
## Where the communication event message originated from, that
|
||||
## is, either from the scripting layer or inside the Bro process.
|
||||
src_name: string &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_desc: string &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_addr: addr &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_port: port &log &optional;
|
||||
## The severity of the communication event message.
|
||||
level: string &log &optional;
|
||||
## A message describing the communication event between Bro or
|
||||
## Broccoli instances.
|
||||
message: string &log;
|
||||
};
|
||||
|
||||
## A remote peer to which we would like to talk.
|
||||
## If there's no entry for a peer, it may still connect
|
||||
## and request state, but not send us any.
|
||||
type Node: record {
|
||||
## Remote address.
|
||||
host: addr;
|
||||
|
||||
## If the *host* field is a non-global IPv6 address, this field
|
||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||
zone_id: string &optional;
|
||||
|
||||
## Port of the remote Bro communication endpoint if we are
|
||||
## initiating the connection (based on the *connect* field).
|
||||
p: port &optional;
|
||||
|
||||
## When accepting a connection, the configuration only
|
||||
## applies if the class matches the one transmitted by
|
||||
## the peer.
|
||||
##
|
||||
## When initiating a connection, the class is sent to
|
||||
## the other side.
|
||||
class: string &optional;
|
||||
|
||||
## Events requested from remote side.
|
||||
events: pattern &optional;
|
||||
|
||||
## Whether we are going to connect (rather than waiting
|
||||
## for the other side to connect to us).
|
||||
connect: bool &default = F;
|
||||
|
||||
## If disconnected, reconnect after this many seconds.
|
||||
retry: interval &default = 0 secs;
|
||||
|
||||
## Whether to accept remote events.
|
||||
accept_input: bool &default = T;
|
||||
|
||||
## Whether to perform state synchronization with peer.
|
||||
sync: bool &default = F;
|
||||
|
||||
## Whether to request logs from the peer.
|
||||
request_logs: bool &default = F;
|
||||
|
||||
## When performing state synchronization, whether we consider
|
||||
## our state to be authoritative (only one side can be
|
||||
## authoritative). If so, we will send the peer our current
|
||||
## set when the connection is set up.
|
||||
auth: bool &default = F;
|
||||
|
||||
## If not set, no capture filter is sent.
|
||||
## If set to an empty string, then the default capture filter
|
||||
## is sent.
|
||||
capture_filter: string &optional;
|
||||
|
||||
## Whether to use SSL-based communication.
|
||||
ssl: bool &default = F;
|
||||
|
||||
## Compression level is 0-9, with 0 = no compression.
|
||||
compression: count &default = compression_level;
|
||||
|
||||
## The remote peer.
|
||||
peer: event_peer &optional;
|
||||
|
||||
## Indicates the status of the node.
|
||||
connected: bool &default = F;
|
||||
};
|
||||
|
||||
## The table of Bro or Broccoli nodes that Bro will initiate connections
|
||||
## to or respond to connections from. Note that BroControl sets this
|
||||
## automatically.
|
||||
global nodes: table[string] of Node &redef;
|
||||
|
||||
## A table of peer nodes for which this node issued a
|
||||
## :bro:id:`Communication::connect_peer` call but with which a connection
|
||||
## has not yet been established or with which a connection has been
|
||||
## closed and is currently in the process of retrying to establish.
|
||||
## When a connection is successfully established, the peer is removed
|
||||
## from the table.
|
||||
global pending_peers: table[peer_id] of Node;
|
||||
|
||||
## A table of peer nodes for which this node has an established connection.
|
||||
## Peers are automatically removed if their connection is closed and
|
||||
## automatically added back if a connection is re-established later.
|
||||
global connected_peers: table[peer_id] of Node;
|
||||
|
||||
## Connect to a node in :bro:id:`Communication::nodes` independent
|
||||
## of its "connect" flag.
|
||||
##
|
||||
## peer: the string used to index a particular node within the
|
||||
## :bro:id:`Communication::nodes` table.
|
||||
global connect_peer: function(peer: string);
|
||||
}
|
||||
|
||||
const src_names = {
|
||||
[REMOTE_SRC_CHILD] = "child",
|
||||
[REMOTE_SRC_PARENT] = "parent",
|
||||
[REMOTE_SRC_SCRIPT] = "script",
|
||||
};
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Communication::LOG, [$columns=Info, $path="communication"]);
|
||||
}
|
||||
|
||||
function do_script_log_common(level: count, src: count, msg: string)
|
||||
{
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
|
||||
$src_name = src_names[src],
|
||||
$peer = get_event_peer()$descr,
|
||||
$message = msg]);
|
||||
}
|
||||
|
||||
# This is a core generated event.
|
||||
event remote_log(level: count, src: count, msg: string)
|
||||
{
|
||||
do_script_log_common(level, src, msg);
|
||||
}
|
||||
|
||||
# This is a core generated event.
|
||||
event remote_log_peer(p: event_peer, level: count, src: count, msg: string)
|
||||
{
|
||||
local rmsg = fmt("[#%d/%s:%d] %s", p$id, addr_to_uri(p$host), p$p, msg);
|
||||
do_script_log_common(level, src, rmsg);
|
||||
}
|
||||
|
||||
function do_script_log(p: event_peer, msg: string)
|
||||
{
|
||||
do_script_log_common(REMOTE_LOG_INFO, REMOTE_SRC_SCRIPT, msg);
|
||||
}
|
||||
|
||||
function connect_peer(peer: string)
|
||||
{
|
||||
local node = nodes[peer];
|
||||
local p = listen_port;
|
||||
|
||||
if ( node?$p )
|
||||
p = node$p;
|
||||
|
||||
local class = node?$class ? node$class : "";
|
||||
local zone_id = node?$zone_id ? node$zone_id : "";
|
||||
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
|
||||
|
||||
if ( id == PEER_ID_NONE )
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$peer = get_event_peer()$descr,
|
||||
$message = "can't trigger connect"]);
|
||||
pending_peers[id] = node;
|
||||
}
|
||||
|
||||
|
||||
function setup_peer(p: event_peer, node: Node)
|
||||
{
|
||||
if ( node?$events )
|
||||
{
|
||||
do_script_log(p, fmt("requesting events matching %s", node$events));
|
||||
request_remote_events(p, node$events);
|
||||
}
|
||||
|
||||
if ( node?$capture_filter && node$capture_filter != "" )
|
||||
{
|
||||
local filter = node$capture_filter;
|
||||
do_script_log(p, fmt("sending capture_filter: %s", filter));
|
||||
send_capture_filter(p, filter);
|
||||
}
|
||||
|
||||
if ( node$accept_input )
|
||||
{
|
||||
do_script_log(p, "accepting state");
|
||||
set_accept_state(p, T);
|
||||
}
|
||||
|
||||
set_compression_level(p, node$compression);
|
||||
|
||||
if ( node$sync )
|
||||
{
|
||||
do_script_log(p, "requesting synchronized state");
|
||||
request_remote_sync(p, node$auth);
|
||||
}
|
||||
|
||||
if ( node$request_logs )
|
||||
{
|
||||
do_script_log(p, "requesting logs");
|
||||
request_remote_logs(p);
|
||||
}
|
||||
|
||||
node$peer = p;
|
||||
node$connected = T;
|
||||
connected_peers[p$id] = node;
|
||||
}
|
||||
|
||||
event remote_connection_established(p: event_peer)
|
||||
{
|
||||
if ( is_remote_event() )
|
||||
return;
|
||||
|
||||
do_script_log(p, "connection established");
|
||||
|
||||
if ( p$id in pending_peers )
|
||||
{
|
||||
# We issued the connect.
|
||||
local node = pending_peers[p$id];
|
||||
setup_peer(p, node);
|
||||
delete pending_peers[p$id];
|
||||
}
|
||||
else
|
||||
{ # The other side connected to us.
|
||||
local found = F;
|
||||
for ( i in nodes )
|
||||
{
|
||||
node = nodes[i];
|
||||
if ( node$host == p$host )
|
||||
{
|
||||
local c = 0;
|
||||
|
||||
# See if classes match = either both have
|
||||
# the same class, or neither of them has
|
||||
# a class.
|
||||
if ( p?$class && p$class != "" )
|
||||
++c;
|
||||
|
||||
if ( node?$class && node$class != "" )
|
||||
++c;
|
||||
|
||||
if ( c == 1 ||
|
||||
(c == 2 && p$class != node$class) )
|
||||
next;
|
||||
|
||||
found = T;
|
||||
setup_peer(p, node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( ! found )
|
||||
set_compression_level(p, compression_level);
|
||||
}
|
||||
|
||||
complete_handshake(p);
|
||||
}
|
||||
|
||||
event remote_connection_closed(p: event_peer)
|
||||
{
|
||||
if ( is_remote_event() )
|
||||
return;
|
||||
|
||||
do_script_log(p, "connection closed");
|
||||
|
||||
if ( p$id in connected_peers )
|
||||
{
|
||||
local node = connected_peers[p$id];
|
||||
node$connected = F;
|
||||
|
||||
delete connected_peers[p$id];
|
||||
|
||||
if ( node$retry != 0secs )
|
||||
# The core will retry.
|
||||
pending_peers[p$id] = node;
|
||||
}
|
||||
}
|
||||
|
||||
event remote_state_inconsistency(operation: string, id: string,
|
||||
expected_old: string, real_old: string)
|
||||
{
|
||||
if ( is_remote_event() )
|
||||
return;
|
||||
|
||||
local msg = fmt("state inconsistency: %s should be %s but is %s before %s",
|
||||
id, expected_old, real_old, operation);
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$peer = get_event_peer()$descr,
|
||||
$message = msg]);
|
||||
}
|
||||
|
||||
|
||||
# Actually initiate the connections that need to be established.
|
||||
event bro_init() &priority = -10 # let others modify nodes
|
||||
{
|
||||
if ( |nodes| > 0 )
|
||||
enable_communication();
|
||||
|
||||
for ( tag in nodes )
|
||||
{
|
||||
if ( ! nodes[tag]$connect )
|
||||
next;
|
||||
|
||||
connect_peer(tag);
|
||||
}
|
||||
}
|
2
scripts/base/frameworks/config/README
Normal file
2
scripts/base/frameworks/config/README
Normal file
|
@ -0,0 +1,2 @@
|
|||
The configuration framework provides a way to change the Bro configuration
|
||||
in "option" values at run-time.
|
2
scripts/base/frameworks/config/__load__.bro
Normal file
2
scripts/base/frameworks/config/__load__.bro
Normal file
|
@ -0,0 +1,2 @@
|
|||
@load ./main
|
||||
@load ./input
|
77
scripts/base/frameworks/config/input.bro
Normal file
77
scripts/base/frameworks/config/input.bro
Normal file
|
@ -0,0 +1,77 @@
|
|||
##! File input for the configuration framework using the input framework.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Config;
|
||||
|
||||
export {
|
||||
## Configuration files that will be read off disk. Files are reread
|
||||
## every time they are updated so updates should be atomic with "mv"
|
||||
## instead of writing the file in place.
|
||||
##
|
||||
## If the same configuration option is defined in several files with
|
||||
## different values, behavior is unspecified.
|
||||
const config_files: set[string] = {} &redef;
|
||||
|
||||
## Read specified configuration file and apply values; updates to file
|
||||
## are not tracked.
|
||||
global read_config: function(filename: string);
|
||||
}
|
||||
|
||||
global current_config: table[string] of string = table();
|
||||
|
||||
type ConfigItem: record {
|
||||
option_nv: string;
|
||||
};
|
||||
|
||||
type EventFields: record {
|
||||
option_name: string;
|
||||
option_val: string;
|
||||
};
|
||||
|
||||
event config_line(description: Input::EventDescription, tpe: Input::Event, p: EventFields)
|
||||
{
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
return;
|
||||
|
||||
for ( fi in config_files )
|
||||
Input::add_table([$reader=Input::READER_CONFIG,
|
||||
$mode=Input::REREAD,
|
||||
$source=fi,
|
||||
$name=cat("config-", fi),
|
||||
$idx=ConfigItem,
|
||||
$val=ConfigItem,
|
||||
$want_record=F,
|
||||
$destination=current_config]);
|
||||
}
|
||||
|
||||
event InputConfig::new_value(name: string, source: string, id: string, value: any)
|
||||
{
|
||||
if ( sub_bytes(name, 1, 15) != "config-oneshot-" && source !in config_files )
|
||||
return;
|
||||
|
||||
Config::set_value(id, value, source);
|
||||
}
|
||||
|
||||
function read_config(filename: string)
|
||||
{
|
||||
# Only read the configuration on the manager. The other nodes are being fed from
|
||||
# the manager.
|
||||
if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
return;
|
||||
|
||||
local iname = cat("config-oneshot-", filename);
|
||||
|
||||
Input::add_event([$reader=Input::READER_CONFIG,
|
||||
$mode=Input::MANUAL,
|
||||
$source=filename,
|
||||
$name=iname,
|
||||
$fields=EventFields,
|
||||
$ev=config_line]);
|
||||
Input::remove(iname);
|
||||
}
|
165
scripts/base/frameworks/config/main.bro
Normal file
165
scripts/base/frameworks/config/main.bro
Normal file
|
@ -0,0 +1,165 @@
|
|||
##! The configuration framework provides a way to change Bro options
|
||||
##! (as specified by the "option" keyword) at runtime. It also logs runtime
|
||||
##! changes to options to config.log.
|
||||
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Config;
|
||||
|
||||
export {
|
||||
## The config logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Represents the data in config.log.
|
||||
type Info: record {
|
||||
## Timestamp at which the configuration change occured.
|
||||
ts: time &log;
|
||||
## ID of the value that was changed.
|
||||
id: string &log;
|
||||
## Value before the change.
|
||||
old_value: string &log;
|
||||
## Value after the change.
|
||||
new_value: string &log;
|
||||
## Optional location that triggered the change.
|
||||
location: string &optional &log;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the :bro:type:`Config::Info`
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_config: event(rec: Info);
|
||||
|
||||
## Broker topic for announcing new configuration value. Sending new_value,
|
||||
## peers can send configuration changes that will be distributed accross
|
||||
## the entire cluster.
|
||||
const change_topic = "bro/config/change";
|
||||
|
||||
## This function is the config framework layer around the lower-level
|
||||
## :bro:see:`Option::set` call. Config::set_value will set the configuration
|
||||
## value for all nodes in the cluster, no matter where it was called. Note
|
||||
## that `bro:see:`Option::set` does not distribute configuration changes
|
||||
## to other nodes.
|
||||
##
|
||||
## ID: The ID of the option to update.
|
||||
##
|
||||
## val: The new value of the option.
|
||||
##
|
||||
## location: Optional parameter detailing where this change originated from.
|
||||
##
|
||||
## Returns: true on success, false when an error ocured.
|
||||
global set_value: function(ID: string, val: any, location: string &default = "" &optional): bool;
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
type OptionCacheValue: record {
|
||||
val: any;
|
||||
location: string;
|
||||
};
|
||||
|
||||
global option_cache: table[string] of OptionCacheValue;
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::subscribe(change_topic);
|
||||
}
|
||||
|
||||
event Config::cluster_set_option(ID: string, val: any, location: string)
|
||||
{
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
@endif
|
||||
Option::set(ID, val, location);
|
||||
}
|
||||
|
||||
function set_value(ID: string, val: any, location: string &default = "" &optional): bool
|
||||
{
|
||||
local cache_val: any;
|
||||
# first cache value in case setting it succeeds and we have to store it.
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
cache_val = copy(val);
|
||||
# First try setting it locally - abort if not possible.
|
||||
if ( ! Option::set(ID, val, location) )
|
||||
return F;
|
||||
# If setting worked, copy the new value into the cache on the manager
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
option_cache[ID] = OptionCacheValue($val=cache_val, $location=location);
|
||||
|
||||
# If it turns out that it is possible - send it to everyone else to apply.
|
||||
Broker::publish(change_topic, Config::cluster_set_option, ID, val, location);
|
||||
|
||||
if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
{
|
||||
Broker::relay(change_topic, change_topic, Config::cluster_set_option, ID, val, location);
|
||||
}
|
||||
return T;
|
||||
}
|
||||
@else
|
||||
# Standalone implementation
|
||||
function set_value(ID: string, val: any, location: string &default = "" &optional): bool
|
||||
{
|
||||
return Option::set(ID, val, location);
|
||||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
# Handling of new worker nodes.
|
||||
event Cluster::node_up(name: string, id: string) &priority=-10
|
||||
{
|
||||
# When a node connects, send it all current Option values.
|
||||
if ( name in Cluster::nodes )
|
||||
for ( ID in option_cache )
|
||||
Broker::publish(Cluster::node_topic(name), Config::cluster_set_option, ID, option_cache[ID]$val, option_cache[ID]$location);
|
||||
}
|
||||
@endif
|
||||
|
||||
|
||||
function format_value(value: any) : string
|
||||
{
|
||||
local tn = type_name(value);
|
||||
local part: string_vec = vector();
|
||||
if ( /^set/ in tn )
|
||||
{
|
||||
local it: set[bool] = value;
|
||||
for ( sv in it )
|
||||
part += cat(sv);
|
||||
return join_string_vec(part, ",");
|
||||
}
|
||||
else if ( /^vector/ in tn )
|
||||
{
|
||||
local vit: vector of any = value;
|
||||
for ( i in vit )
|
||||
part += cat(vit[i]);
|
||||
return join_string_vec(part, ",");
|
||||
}
|
||||
else if ( tn == "string" )
|
||||
return value;
|
||||
|
||||
return cat(value);
|
||||
}
|
||||
|
||||
function config_option_changed(ID: string, new_value: any, location: string): any
|
||||
{
|
||||
local log = Info($ts=network_time(), $id=ID, $old_value=format_value(lookup_ID(ID)), $new_value=format_value(new_value));
|
||||
if ( location != "" )
|
||||
log$location = location;
|
||||
Log::write(LOG, log);
|
||||
return new_value;
|
||||
}
|
||||
|
||||
event bro_init() &priority=10
|
||||
{
|
||||
Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config"]);
|
||||
|
||||
# Limit logging to the manager - everyone else just feeds off it.
|
||||
@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
||||
# Iterate over all existing options and add ourselves as change handlers with
|
||||
# a low priority so that we can log the changes.
|
||||
local gids = global_ids();
|
||||
for ( i in gids )
|
||||
{
|
||||
if ( ! gids[i]$option_value )
|
||||
next;
|
||||
|
||||
Option::set_change_handler(i, config_option_changed, -100);
|
||||
}
|
||||
@endif
|
||||
}
|
|
@ -5,6 +5,13 @@
|
|||
module Control;
|
||||
|
||||
export {
|
||||
## The topic prefix used for exchanging control messages via Broker.
|
||||
const topic_prefix = "bro/control";
|
||||
|
||||
## Whether the controllee should call :bro:see:`Broker::listen`.
|
||||
## In a cluster, this isn't needed since the setup process calls it.
|
||||
const controllee_listen = T &redef;
|
||||
|
||||
## The address of the host that will be controlled.
|
||||
const host = 0.0.0.0 &redef;
|
||||
|
||||
|
@ -22,12 +29,6 @@ export {
|
|||
## This can be used by commands that take an argument.
|
||||
const arg = "" &redef;
|
||||
|
||||
## Events that need to be handled by controllers.
|
||||
const controller_events = /Control::.*_request/ &redef;
|
||||
|
||||
## Events that need to be handled by controllees.
|
||||
const controllee_events = /Control::.*_response/ &redef;
|
||||
|
||||
## The commands that can currently be given on the command line for
|
||||
## remote control.
|
||||
const commands: set[string] = {
|
||||
|
@ -73,8 +74,7 @@ export {
|
|||
global shutdown_response: event();
|
||||
}
|
||||
|
||||
|
||||
event terminate_event()
|
||||
{
|
||||
terminate_communication();
|
||||
terminate();
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ signature file-reg-utf16 {
|
|||
|
||||
# Microsoft Registry format (typically DESKTOP.DAT)
|
||||
signature file-regf {
|
||||
file-mime "application vnd.ms-regf", 49
|
||||
file-mime "application/vnd.ms-regf", 49
|
||||
file-magic /^\x72\x65\x67\x66/
|
||||
}
|
||||
|
||||
|
|
|
@ -135,6 +135,20 @@ export {
|
|||
## The default per-file reassembly buffer size.
|
||||
const reassembly_buffer_size = 524288 &redef;
|
||||
|
||||
## Lookup to see if a particular file id exists and is still valid.
|
||||
##
|
||||
## fuid: the file id.
|
||||
##
|
||||
## Returns: T if the file uid is known.
|
||||
global file_exists: function(fuid: string): bool;
|
||||
|
||||
## Lookup an :bro:see:`fa_file` record with the file id.
|
||||
##
|
||||
## fuid: the file id.
|
||||
##
|
||||
## Returns: the associated :bro:see:`fa_file` record.
|
||||
global lookup_file: function(fuid: string): fa_file;
|
||||
|
||||
## Allows the file reassembler to be used if it's necessary because the
|
||||
## file is transferred out of order.
|
||||
##
|
||||
|
@ -338,6 +352,16 @@ function set_info(f: fa_file)
|
|||
f$info$is_orig = f$is_orig;
|
||||
}
|
||||
|
||||
function file_exists(fuid: string): bool
|
||||
{
|
||||
return __file_exists(fuid);
|
||||
}
|
||||
|
||||
function lookup_file(fuid: string): fa_file
|
||||
{
|
||||
return __lookup_file(fuid);
|
||||
}
|
||||
|
||||
function set_timeout_interval(f: fa_file, t: interval): bool
|
||||
{
|
||||
return __set_timeout_interval(f$id, t);
|
||||
|
|
|
@ -3,4 +3,5 @@
|
|||
@load ./readers/raw
|
||||
@load ./readers/benchmark
|
||||
@load ./readers/binary
|
||||
@load ./readers/config
|
||||
@load ./readers/sqlite
|
||||
|
|
|
@ -9,7 +9,7 @@ export {
|
|||
## Please note that the separator has to be exactly one character long.
|
||||
const separator = Input::separator &redef;
|
||||
|
||||
## Separator between set elements.
|
||||
## Separator between set and vector elements.
|
||||
## Please note that the separator has to be exactly one character long.
|
||||
const set_separator = Input::set_separator &redef;
|
||||
|
||||
|
@ -18,4 +18,33 @@ export {
|
|||
|
||||
## String to use for an unset &optional field.
|
||||
const unset_field = Input::unset_field &redef;
|
||||
|
||||
## Fail on invalid lines. If set to false, the ascii
|
||||
## input reader will jump over invalid lines, reporting
|
||||
## warnings in reporter.log. If set to true, errors in
|
||||
## input lines will be handled as fatal errors for the
|
||||
## reader thread; reading will abort immediately and
|
||||
## an error will be logged to reporter.log.
|
||||
## Individual readers can use a different value using
|
||||
## the $config table.
|
||||
## fail_on_invalid_lines = T was the default behavior
|
||||
## until Bro 2.6.
|
||||
const fail_on_invalid_lines = F &redef;
|
||||
|
||||
## Fail on file read problems. If set to true, the ascii
|
||||
## input reader will fail when encountering any problems
|
||||
## while reading a file different from invalid lines.
|
||||
## Examples of such problems are permission problems, or
|
||||
## missing files.
|
||||
## When set to false, these problems will be ignored. This
|
||||
## has an especially big effect for the REREAD mode, which will
|
||||
## seamlessly recover from read errors when a file is
|
||||
## only temporarily inaccessible. For MANUAL or STREAM files,
|
||||
## errors will most likely still be fatal since no automatic
|
||||
## re-reading of the file is attempted.
|
||||
## Individual readers can use a different value using
|
||||
## the $config table.
|
||||
## fail_on_file_problem = T was the default behavior
|
||||
## until Bro 2.6.
|
||||
const fail_on_file_problem = F &redef;
|
||||
}
|
||||
|
|
44
scripts/base/frameworks/input/readers/config.bro
Normal file
44
scripts/base/frameworks/input/readers/config.bro
Normal file
|
@ -0,0 +1,44 @@
|
|||
##! Interface for the config input reader.
|
||||
|
||||
module InputConfig;
|
||||
|
||||
export {
|
||||
## Separator between set and vector elements.
|
||||
## Please note that the separator has to be exactly one character long.
|
||||
const set_separator = Input::set_separator &redef;
|
||||
|
||||
## String to use for empty fields.
|
||||
## By default this is the empty string, meaning that an empty input field
|
||||
## will result in an empty set.
|
||||
const empty_field = "" &redef;
|
||||
|
||||
## Fail on file read problems. If set to true, the config
|
||||
## input reader will fail when encountering any problems
|
||||
## while reading a file different from invalid lines.
|
||||
## Examples of such problems are permission problems, or
|
||||
## missing files.
|
||||
## When set to false, these problems will be ignored. This
|
||||
## has an especially big effect for the REREAD mode, which will
|
||||
## seamlessly recover from read errors when a file is
|
||||
## only temporarily inaccessible. For MANUAL or STREAM files,
|
||||
## errors will most likely still be fatal since no automatic
|
||||
## re-reading of the file is attempted.
|
||||
## Individual readers can use a different value using
|
||||
## the $config table.
|
||||
const fail_on_file_problem = F &redef;
|
||||
|
||||
## Event that is called when a config option is added or changes.
|
||||
##
|
||||
## Note - this does not track the reason for a change (new, changed),
|
||||
## and also does not track removals. If you need this, combine the event
|
||||
## with a table reader.
|
||||
##
|
||||
## name: Name of the input stream.
|
||||
##
|
||||
## source: Source of the input stream.
|
||||
##
|
||||
## id: ID of the configuration option being set.
|
||||
##
|
||||
## value: New value of the configuration option being set.
|
||||
global new_value: event(name: string, source: string, id: string, value: any);
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue