Merge topic/actor-system throug a squashed commit.

This commit is contained in:
Robin Sommer 2018-05-16 23:48:07 +00:00
parent 7a6f5020f6
commit fe7e1ee7f0
466 changed files with 12559 additions and 9655 deletions

3
.gitmodules vendored
View file

@ -4,9 +4,6 @@
[submodule "aux/binpac"] [submodule "aux/binpac"]
path = aux/binpac path = aux/binpac
url = git://git.bro.org/binpac url = git://git.bro.org/binpac
[submodule "aux/broccoli"]
path = aux/broccoli
url = git://git.bro.org/broccoli
[submodule "aux/broctl"] [submodule "aux/broctl"]
path = aux/broctl path = aux/broctl
url = git://git.bro.org/broctl url = git://git.bro.org/broctl

View file

@ -2,7 +2,7 @@ project(Bro C CXX)
# When changing the minimum version here, also adapt # When changing the minimum version here, also adapt
# aux/bro-aux/plugin-support/skeleton/CMakeLists.txt # aux/bro-aux/plugin-support/skeleton/CMakeLists.txt
cmake_minimum_required(VERSION 2.8 FATAL_ERROR) cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR)
include(cmake/CommonCMakeConfig.cmake) include(cmake/CommonCMakeConfig.cmake)
@ -95,6 +95,13 @@ FindRequiredPackage(OpenSSL)
FindRequiredPackage(BIND) FindRequiredPackage(BIND)
FindRequiredPackage(ZLIB) FindRequiredPackage(ZLIB)
find_package(CAF COMPONENTS core io)
if (CAF_FOUND)
include_directories(BEFORE ${CAF_INCLUDE_DIRS})
else ()
list(APPEND MISSING_PREREQ_DESCS CAF)
endif ()
if (NOT BinPAC_ROOT_DIR AND if (NOT BinPAC_ROOT_DIR AND
EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/aux/binpac/CMakeLists.txt) EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/aux/binpac/CMakeLists.txt)
add_subdirectory(aux/binpac) add_subdirectory(aux/binpac)
@ -105,6 +112,12 @@ if (ENABLE_JEMALLOC)
find_package(JeMalloc) find_package(JeMalloc)
endif () endif ()
if ( BISON_VERSION AND BISON_VERSION VERSION_LESS 2.5 )
set(MISSING_PREREQS true)
list(APPEND MISSING_PREREQ_DESCS
" Could not find prerequisite package Bison >= 2.5, found: ${BISON_VERSION}")
endif ()
if (MISSING_PREREQS) if (MISSING_PREREQS)
foreach (prereq ${MISSING_PREREQ_DESCS}) foreach (prereq ${MISSING_PREREQ_DESCS})
message(SEND_ERROR ${prereq}) message(SEND_ERROR ${prereq})
@ -219,12 +232,13 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
######################################################################## ########################################################################
## Recurse on sub-directories ## Recurse on sub-directories
if ( ENABLE_BROKER )
add_subdirectory(aux/broker) add_subdirectory(aux/broker)
set(brodeps ${brodeps} broker) set(brodeps ${brodeps} broker)
add_definitions(-DENABLE_BROKER) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker) ${CMAKE_CURRENT_BINARY_DIR}/aux/broker)
endif () include_directories(BEFORE ${CAF_INCLUDE_DIR_CORE})
include_directories(BEFORE ${CAF_INCLUDE_DIR_IO})
include_directories(BEFORE ${CAF_INCLUDE_DIR_OPENSSL})
add_subdirectory(src) add_subdirectory(src)
add_subdirectory(scripts) add_subdirectory(scripts)
@ -235,7 +249,6 @@ include(CheckOptionalBuildSources)
CheckOptionalBuildSources(aux/broctl Broctl INSTALL_BROCTL) CheckOptionalBuildSources(aux/broctl Broctl INSTALL_BROCTL)
CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS) CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS)
CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI)
######################################################################## ########################################################################
## Packaging Setup ## Packaging Setup
@ -275,9 +288,6 @@ message(
"\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}" "\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}"
"\nCPP: ${CMAKE_CXX_COMPILER}" "\nCPP: ${CMAKE_CXX_COMPILER}"
"\n" "\n"
"\nBroker: ${ENABLE_BROKER}"
"\nBroker Python: ${BROKER_PYTHON_BINDINGS}"
"\nBroccoli: ${INSTALL_BROCCOLI}"
"\nBroctl: ${INSTALL_BROCTL}" "\nBroctl: ${INSTALL_BROCTL}"
"\nAux. Tools: ${INSTALL_AUX_TOOLS}" "\nAux. Tools: ${INSTALL_AUX_TOOLS}"
"\n" "\n"

@ -1 +1 @@
Subproject commit 6484fc12712f6ce97138fec8ca413ec8df12766c Subproject commit a6353cfbf937124d327d3064f09913862d3aff5c

46
configure vendored
View file

@ -50,15 +50,10 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
(automatically on when perftools is present on Linux) (automatically on when perftools is present on Linux)
--enable-perftools-debug use Google's perftools for debugging --enable-perftools-debug use Google's perftools for debugging
--enable-jemalloc link against jemalloc --enable-jemalloc link against jemalloc
--enable-ruby build ruby bindings for broccoli (deprecated)
--enable-broker enable use of the Broker communication library
(requires C++ Actor Framework)
--disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl --disable-broctl don't install Broctl
--disable-auxtools don't build or install auxiliary tools --disable-auxtools don't build or install auxiliary tools
--disable-perftools don't try to build with Google Perftools --disable-perftools don't try to build with Google Perftools
--disable-python don't try to build python bindings for broccoli --disable-python don't try to build python bindings for broker
--disable-pybroker don't try to build python bindings for broker
Required Packages in Non-Standard Locations: Required Packages in Non-Standard Locations:
--with-openssl=PATH path to OpenSSL install root --with-openssl=PATH path to OpenSSL install root
@ -71,18 +66,15 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-flex=PATH path to flex executable --with-flex=PATH path to flex executable
--with-bison=PATH path to bison executable --with-bison=PATH path to bison executable
--with-python=PATH path to Python executable --with-python=PATH path to Python executable
Optional Packages in Non-Standard Locations:
--with-caf=PATH path to C++ Actor Framework installation --with-caf=PATH path to C++ Actor Framework installation
(a required Broker dependency) (a required Broker dependency)
Optional Packages in Non-Standard Locations:
--with-geoip=PATH path to the libGeoIP install root --with-geoip=PATH path to the libGeoIP install root
--with-perftools=PATH path to Google Perftools install root --with-perftools=PATH path to Google Perftools install root
--with-jemalloc=PATH path to jemalloc install root --with-jemalloc=PATH path to jemalloc install root
--with-python-lib=PATH path to libpython --with-python-lib=PATH path to libpython
--with-python-inc=PATH path to Python headers --with-python-inc=PATH path to Python headers
--with-ruby=PATH path to ruby interpreter
--with-ruby-lib=PATH path to ruby library
--with-ruby-inc=PATH path to ruby headers
--with-swig=PATH path to SWIG executable --with-swig=PATH path to SWIG executable
--with-rocksdb=PATH path to RocksDB installation --with-rocksdb=PATH path to RocksDB installation
(an optional Broker dependency) (an optional Broker dependency)
@ -135,21 +127,17 @@ append_cache_entry BRO_ROOT_DIR PATH $prefix
append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry BROKER_PYTHON_BINDINGS BOOL false
append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
append_cache_entry ENABLE_JEMALLOC BOOL false append_cache_entry ENABLE_JEMALLOC BOOL false
append_cache_entry ENABLE_BROKER BOOL false
append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BinPAC_SKIP_INSTALL BOOL true
append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true
append_cache_entry INSTALL_BROCCOLI BOOL true
append_cache_entry INSTALL_BROCTL BOOL true append_cache_entry INSTALL_BROCTL BOOL true
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
append_cache_entry DISABLE_PERFTOOLS BOOL false append_cache_entry DISABLE_PERFTOOLS BOOL false
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true
# parse arguments # parse arguments
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
@ -221,14 +209,6 @@ while [ $# -ne 0 ]; do
--enable-jemalloc) --enable-jemalloc)
append_cache_entry ENABLE_JEMALLOC BOOL true append_cache_entry ENABLE_JEMALLOC BOOL true
;; ;;
--enable-broker)
append_cache_entry ENABLE_BROKER BOOL true
;;
--disable-broker)
;;
--disable-broccoli)
append_cache_entry INSTALL_BROCCOLI BOOL false
;;
--disable-broctl) --disable-broctl)
append_cache_entry INSTALL_BROCTL BOOL false append_cache_entry INSTALL_BROCTL BOOL false
;; ;;
@ -241,12 +221,6 @@ while [ $# -ne 0 ]; do
--disable-python) --disable-python)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
;; ;;
--disable-pybroker)
append_cache_entry DISABLE_PYBROKER BOOL true
;;
--enable-ruby)
append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
;;
--with-openssl=*) --with-openssl=*)
append_cache_entry OPENSSL_ROOT_DIR PATH $optarg append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
;; ;;
@ -288,26 +262,12 @@ while [ $# -ne 0 ]; do
append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg
append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg
;; ;;
--with-ruby=*)
append_cache_entry RUBY_EXECUTABLE PATH $optarg
;;
--with-ruby-lib=*)
append_cache_entry RUBY_LIBRARY PATH $optarg
;;
--with-ruby-inc=*)
append_cache_entry RUBY_INCLUDE_DIRS PATH $optarg
append_cache_entry RUBY_INCLUDE_PATH PATH $optarg
;;
--with-swig=*) --with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg append_cache_entry SWIG_EXECUTABLE PATH $optarg
;; ;;
--with-caf=*) --with-caf=*)
append_cache_entry CAF_ROOT_DIR PATH $optarg append_cache_entry CAF_ROOT_DIR PATH $optarg
;; ;;
--with-libcaf=*)
echo "warning: --with-libcaf deprecated, use --with-caf instead"
append_cache_entry CAF_ROOT_DIR PATH $optarg
;;
--with-rocksdb=*) --with-rocksdb=*)
append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg
;; ;;

View file

@ -1,9 +1,9 @@
set(BROCCOLI_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html)
set(BROCCOLI_DOCS_DST ${CMAKE_BINARY_DIR}/html/broccoli-api)
set(SPHINX_INPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_input) set(SPHINX_INPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_input)
set(SPHINX_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_output) set(SPHINX_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_output)
set(BROXYGEN_SCRIPT_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/broxygen_script_output) set(BROXYGEN_SCRIPT_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/broxygen_script_output)
set(BROXYGEN_CACHE_DIR ${CMAKE_CURRENT_BINARY_DIR}/broxygen_cache) set(BROXYGEN_CACHE_DIR ${CMAKE_CURRENT_BINARY_DIR}/broxygen_cache)
set(BROKER_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broker/doc/html)
set(BROKER_DOCS_DST ${CMAKE_BINARY_DIR}/html/broker-manual)
# Find out what BROPATH to use when executing bro. # Find out what BROPATH to use when executing bro.
execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev
@ -61,10 +61,9 @@ add_custom_target(sphinxdoc
COMMAND "${CMAKE_COMMAND}" -E create_symlink COMMAND "${CMAKE_COMMAND}" -E create_symlink
${SPHINX_OUTPUT_DIR}/html ${SPHINX_OUTPUT_DIR}/html
${CMAKE_BINARY_DIR}/html ${CMAKE_BINARY_DIR}/html
# Copy Broccoli API reference into output dir if it exists. # Copy Broker manual into output dir.
COMMAND test -d ${BROCCOLI_DOCS_SRC} && COMMAND rm -rf ${BROKER_DOCS_DST} &&
( rm -rf ${BROCCOLI_DOCS_DST} && cp -r ${BROKER_DOCS_SRC} ${BROKER_DOCS_DST}
cp -r ${BROCCOLI_DOCS_SRC} ${BROCCOLI_DOCS_DST} ) || true
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "[Sphinx] Generate HTML documentation in ${CMAKE_BINARY_DIR}/html") COMMENT "[Sphinx] Generate HTML documentation in ${CMAKE_BINARY_DIR}/html")
@ -77,7 +76,10 @@ add_custom_target(sphinxdoc_clean
COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_CACHE_DIR} COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_CACHE_DIR}
VERBATIM) VERBATIM)
if (NOT TARGET doc)
add_custom_target(doc) add_custom_target(doc)
endif ()
add_custom_target(docclean) add_custom_target(docclean)
add_dependencies(doc sphinxdoc) add_dependencies(doc sphinxdoc)
add_dependencies(docclean sphinxdoc_clean) add_dependencies(docclean sphinxdoc_clean)

View file

@ -1 +0,0 @@
../../../aux/broccoli/bindings/broccoli-python/README

View file

@ -1 +0,0 @@
../../../aux/broccoli/bindings/broccoli-ruby/README

View file

@ -1 +0,0 @@
../../../aux/broccoli/README

View file

@ -1 +0,0 @@
../../../aux/broccoli/doc/broccoli-manual.rst

View file

@ -1 +0,0 @@
../../../aux/broker/broker-manual.rst

View file

@ -13,12 +13,7 @@ current, independent component releases.
:maxdepth: 1 :maxdepth: 1
BinPAC - A protocol parser generator <binpac/README> BinPAC - A protocol parser generator <binpac/README>
Broccoli - The Bro Client Communication Library (README) <broccoli/README> Broker - Bro's (New) Messaging Library <broker/README>
Broccoli - User Manual <broccoli/broccoli-manual>
Broccoli Python Bindings <broccoli-python/README>
Broccoli Ruby Bindings <broccoli-ruby/README>
Broker - Bro's (New) Messaging Library (README) <broker/README>
Broker - User Manual <broker/broker-manual.rst>
BroControl - Interactive Bro management shell <broctl/README> BroControl - Interactive Bro management shell <broctl/README>
Bro-Aux - Small auxiliary tools for Bro <bro-aux/README> Bro-Aux - Small auxiliary tools for Bro <bro-aux/README>
BTest - A unit testing framework <btest/README> BTest - A unit testing framework <btest/README>
@ -26,5 +21,5 @@ current, independent component releases.
PySubnetTree - Python module for CIDR lookups<pysubnettree/README> PySubnetTree - Python module for CIDR lookups<pysubnettree/README>
trace-summary - Script for generating break-downs of network traffic <trace-summary/README> trace-summary - Script for generating break-downs of network traffic <trace-summary/README>
The `Broccoli API Reference <../broccoli-api/index.html>`_ may also be of The `Broker User Manual <../broker-manual/index.html>`_ may also be of
interest. interest.

View file

@ -259,8 +259,13 @@ class BroDomain(Domain):
} }
def clear_doc(self, docname): def clear_doc(self, docname):
to_delete = []
for (typ, name), doc in self.data['objects'].items(): for (typ, name), doc in self.data['objects'].items():
if doc == docname: if doc == docname:
to_delete.append((typ, name))
for (typ, name) in to_delete:
del self.data['objects'][typ, name] del self.data['objects'][typ, name]
def resolve_xref(self, env, fromdocname, builder, typ, target, node, def resolve_xref(self, env, fromdocname, builder, typ, target, node,

View file

@ -1,174 +1,344 @@
.. _CAF: https://github.com/actor-framework/actor-framework
.. _brokercomm-framework: .. _brokercomm-framework:
====================================== ==============================================
Broker-Enabled Communication Framework Broker-Enabled Communication/Cluster Framework
====================================== ==============================================
.. rst-class:: opening .. rst-class:: opening
Bro can now use the `Broker Library Bro now uses the `Broker Library
<../components/broker/README.html>`_ to exchange information with <../components/broker/README.html>`_ to exchange information with
other Bro processes. other Bro processes. Broker itself uses CAF_ (C++ Actor Framework)
internally for connecting nodes and exchanging arbitrary data over
networks. Broker then introduces, on top of CAF, a topic-based
publish/subscribe communication pattern using a data model that is
compatible to Bro's. Broker itself can be utilized outside the
context of Bro, with Bro itself making use of only a few predefined
Broker message formats that represent Bro events, log entries, etc.
In summary, the Bro's Broker framework provides basic facilities for
connecting broker-enabled peers (e.g. Bro instances) to each other
and exchanging messages (e.g. events and logs). With this comes
changes in how clusters operate and, since Broker significantly
differs from the previous communication framework, there are several
changes in the set of scripts that Bro ships with that may break
your own customizations. This document aims to describe the changes
that have been made, making it easier to port your own scripts. It
also gives examples of Broker and the new cluster framework that
show off all the new features and capabilities.
.. contents:: .. contents::
Porting Guide
=============
Review and use the points below as a guide to port your own scripts
to the latest version of Bro, which uses the new cluster and Broker
communication framework.
General Porting Tips
--------------------
- ``@load policy/frameworks/communication/listen`` and
``@load base/frameworks/communication`` indicates use of the
old communication framework, consider porting to
``@load base/frameworks/broker`` and using the Broker API:
:doc:`/scripts/base/frameworks/broker/main.bro`
- The ``&synchronized`` and ``&persistent`` attributes are deprecated,
consider using `Data Stores`_ instead.
- Instead of using e.g. ``Cluster::manager2worker_events`` (and all
permutations for every node type), what you'd now use is either
:bro:see:`Broker::publish` or :bro:see:`Broker::auto_publish` with
either the topic associated with a specific node or class of nodes,
like :bro:see:`Cluster::node_topic` or
:bro:see:`Cluster::worker_topic`.
- Instead of using the ``send_id`` BIF, use :bro:see:`Broker::publish_id`.
- Use :bro:see:`terminate` instead of :bro:see:`terminate_communication`.
The later refers to the old communication system and no longer effects
the new Broker-based system.
- For replacing :bro:see:`remote_connection_established` and
:bro:see:`remote_connection_closed`, consider :bro:see:`Broker::peer_added`
or :bro:see:`Broker::peer_lost`. There's also :bro:see:`Cluster::node_up`
and :bro:see:`Cluster::node_down`.
Notable / Specific Script API Changes
-------------------------------------
- :bro:see:`Software::tracked` is now partitioned among proxy nodes
instead of synchronized in its entirety to all nodes.
- ``Known::known_devices`` is renamed to :bro:see:`Known::device_store`
and implemented via the new Broker data store interface.
Also use :bro:see:`Known::device_found` instead of updating the
store directly directly.
- ``Known::known_hosts`` is renamed to :bro:see:`Known::host_store` and
implemented via the new Broker data store interface.
- ``Known::known_services`` is renamed to :bro:see:`Known::service_store`
and implemented via the new Broker data store interface.
- ``Known::certs`` is renamed to :bro:see:`Known::cert_store`
and implemented via the new Broker data store interface.
New Cluster Layout / API
========================
Layout / Topology
-----------------
The cluster topology has changed.
- Proxy nodes no longer connect with each other.
- Each worker node connects to all proxies.
- All node types connect to all logger nodes and the manager node.
This looks like:
.. figure:: broker/cluster-layout.png
Some general suggestions as to the purpose/utilization of each node type:
- Workers: are a good first choice for doing the brunt of any work you need
done. They should be spending a lot of time performing the actual job
of parsing/analyzing incoming data from packets, so you might choose
to look at them as doing a "first pass" analysis and then deciding how
the results should be shared with other nodes in the cluster.
- Proxies: serve as intermediaries for data storage and work/calculation
offloading. Good for helping offload work or data in a scalable and
distributed way. i.e. since any given worker is connected to all
proxies and can agree on an "arbitrary key -> proxy node" mapping
(more on that later), you can partition work or data amongst them in a
uniform manner. e.g. you might choose to use proxies as a method of
sharing non-persistent state or as a "second pass" analysis for any
work that you don't want interferring with the workers' capacity to
keep up with capturing and parsing packets.
- Manager: this node will be good at performing decisions that require a
global view of things since it is in a centralized location, connected
to everything. However, that also makes it easy to overload, so try
to use it sparingly and only for tasks that must be done in a
centralized or authoritative location. Optionally, for some
deployments, the Manager can also serve as the sole Logger.
- Loggers: these nodes should simply be spending their time writing out
logs to disk and not used for much else. In the default cluster
configuration, logs get distributed among available loggers in a
round-robin fashion, providing failover capability should any given
logger temporarily go offline.
Data Management/Sharing Strategies
==================================
There's maybe no single, best approach or pattern to use when you need a
Bro script to store or share long-term state and data. The two
approaches that were previously used were either using ``&synchronized``
attribute on tables/sets or by explicitly sending events to specific
nodes on which you wanted data to be stored. The former is no longer
possible, though there are several new possibilities that the new
Broker/Cluster framework offer, namely distributed data store and data
partitioning APIs.
Data Stores
-----------
Broker provides a distributed key-value store interface with optional
choice of using a persistent backend. For more detail, see
:ref:`this example <data_store_example>`.
Some ideas/considerations/scenarios when deciding whether to use
a data store for your use-case:
* If you need the full data set locally in order to achieve low-latency
queries using data store "clones" can provide that.
* If you need data that persists across restarts of Bro processes, then
data stores can also provide that.
* If the data you want to store is complex (tables, sets, records) or
you expect to read, modify, and store back, then data stores may not
be able to provide simple, race-free methods of performing the pattern
of logic that you want.
* If the data set you want to store is excessively large, that's still
problematic even for stores that use a persistent backend as they are
implemented in a way that requires a full snapshot of the store's
contents to fit in memory (this limitation may change in the future).
Data Partitioning
-----------------
New data partitioning strategies are available using the API in
:doc:`/scripts/base/frameworks/cluster/pools.bro`.
One example strategy is to use Highest Random Weight (HRW) hashing to
partition data tables amongst proxy nodes. e.g. using
:bro:see:`Cluster::publish_hrw`. This could allow clusters to
be scaled more easily than the approach of "the entire data set gets
synchronized to all nodes" as the solution to memory limitations becomes
"just add another proxy node". It may also take away some of the
messaging load that used to be required to synchronize data sets across
all nodes.
The tradeoff of this approach, is that nodes that leave the pool (due to
crashing, etc.) cause a temporary gap in the total data set until
workers start hashing keys to a new proxy node that is still alive,
causing data to now be located and updated there.
Broker Framework Examples
=========================
The broker framework provides basic facilities for connecting Bro instances
to eachother and exchanging messages, like events or logs.
See :doc:`/scripts/base/frameworks/broker/main.bro` for an overview
of the main Broker API.
.. _broker_topic_naming:
Topic Naming Conventions
------------------------
All Broker-based messaging involves two components: the information you
want to send (e.g. an event w/ its arguments) along with an associated
topic name string. The topic strings are used as a filtering mechanism:
Broker uses a publish/subscribe communication pattern where peers
advertise interest in topic **prefixes** and only receive messages which
match one of their prefix subscriptions.
Broker itself supports arbitrary topic strings, however Bro generally
follows certain conventions in choosing these topics to help avoid
conflicts and generally make them easier to remember.
As a reminder of how topic subscriptions work, subscribers advertise
interest in a topic **prefix** and then receive any messages publish by a
peer to a topic name that starts with that prefix. E.g. Alice
subscribes to the "alice/dogs" prefix, then would receive the following
message topics published by Bob:
- topic "alice/dogs/corgi"
- topic "alice/dogs"
- topic "alice/dogsarecool/oratleastilikethem"
Alice would **not** receive the following message topics published by Bob:
- topic "alice/cats/siamese"
- topic "alice/cats"
- topic "alice/dog"
- topic "alice"
Note that the topics aren't required to form a slash-delimited hierarchy,
the subscription matching is purely a byte-per-byte prefix comparison.
However, Bro scripts generally will follow a topic naming hierarchy and
any given script will make the topic names it uses apparent via some
redef'able constant in its export section. Generally topics that Bro
scripts use will be along the lines of "bro/<namespace>/<specifics>"
with "<namespace>" being the script's module name (in all-undercase).
For example, you might expect an imaginary "Pretend" framework to
publish/subscribe using topic names like "bro/pretend/my_cool_event".
For cluster operation, see :doc:`/scripts/base/frameworks/cluster/main.bro`
for a list of topics that are useful for steering published events to
the various node classes. E.g. you have the ability to broadcast to all
directly-connected nodes, only those of a given class (e.g. just workers),
or to a specific node within a class.
The topic names that logs get published under are a bit nuanced. In the
default cluster configuration, they are round-robin published to
explicit topic names that identify a single logger. In standalone Bro
processes, logs get published to the topic indicated by
:bro:see:`Broker::default_log_topic_prefix`.
For those writing their own scripts which need new topic names, a
suggestion would be to avoid prefixing any new topics/prefixes with
"bro/" as any changes in scripts shipping with Bro will use that prefix
and it's better to not risk unintended conflicts.
Connecting to Peers Connecting to Peers
=================== -------------------
Communication via Broker must first be turned on via Bro can accept incoming connections by calling :bro:see:`Broker::listen`.
:bro:see:`Broker::enable`.
Bro can accept incoming connections by calling :bro:see:`Broker::listen`
and then monitor connection status updates via the
:bro:see:`Broker::incoming_connection_established` and
:bro:see:`Broker::incoming_connection_broken` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
Bro can initiate outgoing connections by calling :bro:see:`Broker::connect` Bro can initiate outgoing connections by calling :bro:see:`Broker::peer`.
and then monitor connection status updates via the
:bro:see:`Broker::outgoing_connection_established`,
:bro:see:`Broker::outgoing_connection_broken`, and
:bro:see:`Broker::outgoing_connection_incompatible` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
Remote Printing In either case, connection status updates are monitored via the
=============== :bro:see:`Broker::peer_added` and :bro:see:`Broker::peer_lost` events.
To receive remote print messages, first use the
:bro:see:`Broker::subscribe_to_prints` function to advertise to peers a
topic prefix of interest and then create an event handler for
:bro:see:`Broker::print_handler` to handle any print messages that are
received.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro
To send remote print messages, just call :bro:see:`Broker::send_print`.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro
Notice that the subscriber only used the prefix "bro/print/", but is
able to receive messages with full topics of "bro/print/hi",
"bro/print/stuff", and "bro/print/bye". The model here is that the
publisher of a message checks for all subscribers who advertised
interest in a prefix of that message's topic and sends it to them.
Message Format
--------------
For other applications that want to exchange print messages with Bro,
the Broker message format is simply:
.. code:: c++
broker::message{std::string{}};
Remote Events Remote Events
============= -------------
Receiving remote events is similar to remote prints. Just use the To receive remote events, you need to first subscribe to a "topic" to which
:bro:see:`Broker::subscribe_to_events` function and possibly define any the events are being sent. A topic is just a string chosen by the sender,
new events along with handlers that peers may want to send. and named in a way that helps organize events into various categories.
See the :ref:`topic naming conventions section <broker_topic_naming>` for
more on how topics work and are chosen.
Use the :bro:see:`Broker::subscribe` function to subscribe to topics and
define any event handlers for events that peers will send.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
There are two different ways to send events. The first is to call the There are two different ways to send events.
:bro:see:`Broker::send_event` function directly. The second option is to call
the :bro:see:`Broker::auto_event` function where you specify a The first is to call the :bro:see:`Broker::publish` function which you can
particular event that will be automatically sent to peers whenever the supply directly with the event and its arguments or give it the return value of
event is called locally via the normal event invocation syntax. :bro:see:`Broker::make_event` in case you need to send the same event/args
multiple times. When publishing events like this, local event handlers for
the event are not called.
The second option is to call the :bro:see:`Broker::auto_publish` function where
you specify a particular event that will be automatically sent to peers
whenever the event is called locally via the normal event invocation syntax.
When auto-publishing events, local event handler for the event are called
in addition to sending the event to any subscribed peers.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/events-connector.bro
Again, the subscription model is prefix-based. Note that the subscription model is prefix-based, meaning that if you subscribe
to the "bro/events" topic prefix you would receive events that are published
Message Format to topic names "bro/events/foo" and "bro/events/bar" but not "bro/misc".
--------------
For other applications that want to exchange event messages with Bro,
the Broker message format is:
.. code:: c++
broker::message{std::string{}, ...};
The first parameter is the name of the event and the remaining ``...``
are its arguments, which are any of the supported Broker data types as
they correspond to the Bro types for the event named in the first
parameter of the message.
Remote Logging Remote Logging
============== --------------
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
Use the :bro:see:`Broker::subscribe_to_logs` function to advertise interest To toggle remote logs, redef :bro:see:`Log::enable_remote_logging`.
in logs written by peers. The topic names that Bro uses are implicitly of the Use the :bro:see:`Broker::subscribe` function to advertise interest
form "bro/log/<stream-name>". in logs written by peers. The topic names that Bro uses are determined by
:bro:see:`Broker::log_topic`.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
To send remote logs either redef :bro:see:`Log::enable_remote_logging` or
use the :bro:see:`Broker::enable_remote_logs` function. The former
allows any log stream to be sent to peers while the latter enables remote
logging for particular streams.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-connector.bro
Message Format Note that logging events are only raised locally on the node that performs
-------------- the :bro:see:`Log::write` and not automatically published to peers.
For other applications that want to exchange log messages with Bro, .. _data_store_example:
the Broker message format is:
.. code:: c++
broker::message{broker::enum_value{}, broker::record{}};
The enum value corresponds to the stream's :bro:see:`Log::ID` value, and
the record corresponds to a single entry of that log's columns record,
in this case a ``Test::Info`` value.
Tuning Access Control
=====================
By default, endpoints do not restrict the message topics that it sends
to peers and do not restrict what message topics and data store
identifiers get advertised to peers. These are the default
:bro:see:`Broker::EndpointFlags` supplied to :bro:see:`Broker::enable`.
If not using the ``auto_publish`` flag, one can use the
:bro:see:`Broker::publish_topic` and :bro:see:`Broker::unpublish_topic`
functions to manipulate the set of message topics (must match exactly)
that are allowed to be sent to peer endpoints. These settings take
precedence over the per-message ``peers`` flag supplied to functions
that take a :bro:see:`Broker::SendFlags` such as :bro:see:`Broker::send_print`,
:bro:see:`Broker::send_event`, :bro:see:`Broker::auto_event` or
:bro:see:`Broker::enable_remote_logs`.
If not using the ``auto_advertise`` flag, one can use the
:bro:see:`Broker::advertise_topic` and
:bro:see:`Broker::unadvertise_topic` functions
to manipulate the set of topic prefixes that are allowed to be
advertised to peers. If an endpoint does not advertise a topic prefix, then
the only way peers can send messages to it is via the ``unsolicited``
flag of :bro:see:`Broker::SendFlags` and choosing a topic with a matching
prefix (i.e. full topic may be longer than receivers prefix, just the
prefix needs to match).
Distributed Data Stores Distributed Data Stores
======================= -----------------------
There are three flavors of key-value data store interfaces: master, See :doc:`/scripts/base/frameworks/broker/store.bro` for an overview
clone, and frontend. of the Broker data store API.
A frontend is the common interface to query and modify data stores. There are two flavors of key-value data store interfaces: master and clone.
That is, a clone is a specific type of frontend and a master is also a
specific type of frontend, but a standalone frontend can also exist to
e.g. query and modify the contents of a remote master store without
actually "owning" any of the contents itself.
A master data store can be cloned from remote peers which may then A master data store can be cloned from remote peers which may then
perform lightweight, local queries against the clone, which perform lightweight, local queries against the clone, which
@ -177,24 +347,149 @@ modify their content directly, instead they send modifications to the
centralized master store which applies them and then broadcasts them to centralized master store which applies them and then broadcasts them to
all clones. all clones.
Master and clone stores get to choose what type of storage backend to Master stores get to choose what type of storage backend to
use. E.g. In-memory versus SQLite for persistence. Note that if clones use. E.g. In-memory versus SQLite for persistence.
are used, then data store sizes must be able to fit within memory
regardless of the storage backend as a single snapshot of the master
store is sent in a single chunk to initialize the clone.
Data stores also support expiration on a per-key basis either using an Data stores also support expiration on a per-key basis using an amount of
absolute point in time or a relative amount of time since the entry's time relative to the entry's last modification time.
last modification time.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-listener.bro
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro
In the above example, if a local copy of the store contents isn't
needed, just replace the :bro:see:`Broker::create_clone` call with
:bro:see:`Broker::create_frontend`. Queries will then be made against
the remote master store instead of the local clone.
Note that all data store queries must be made within Bro's asynchronous Note that all data store queries must be made within Bro's asynchronous
``when`` statements and must specify a timeout block. ``when`` statements and must specify a timeout block.
Cluster Framework Examples
==========================
This section contains a few brief examples of how various communication
patterns one might use when developing Bro scripts that are to operate in
the context of a cluster.
Manager Sending Events To Workers
---------------------------------
This is fairly straightforward, we just need a topic name which we know
all workers are subscribed combined with the event we want to send them.
.. code:: bro
event manager_to_workers(s: string)
{
print "got event from manager", s;
}
event some_event_handled_on_manager()
{
Broker::publish(Cluster::worker_topic, manager_to_workers,
"hello v0");
# If you know this event is only handled on the manager, you don't
# need any of the following conditions, they're just here as an
# example of how you can further discriminate based on node identity.
# Can check based on the name of the node.
if ( Cluster::node == "manager" )
Broker::publish(Cluster::worker_topic, manager_to_workers,
"hello v1");
# Can check based on the type of the node.
if ( Cluster::local_node_type() == Cluster::MANAGER )
Broker::publish(Cluster::worker_topic, manager_to_workers,
"hello v2");
# The run-time overhead of the above conditions can even be
# eliminated by using the following conditional directives.
# It's evaluated once per node at parse-time and, if false,
# any code within is just ignored / treated as not existing at all.
@if ( Cluster::local_node_type() == Cluster::MANAGER )
Broker::publish(Cluster::worker_topic, manager_to_workers,
"hello v3");
@endif
}
Worker Sending Events To Manager
--------------------------------
This should look almost identical to the previous case of sending an event
from the manager to workers, except it simply changes the topic name to
one which the manager is subscribed.
.. code:: bro
event worker_to_manager(worker_name: string)
{
print "got event from worker", worker_name;
}
event some_event_handled_on_worker()
{
Broker::publish(Cluster::manager_topic, worker_to_manager,
Cluster::node);
}
Worker Sending Events To All Workers
------------------------------------
Since workers are not directly connected to each other in the cluster
topology, this type of communication is a bit different than what we
did before. Instead of using :bro:see:`Broker::publish` we use different
"relay" calls to hop the message from a different node that *is* connected.
.. code:: bro
event worker_to_workers(worker_name: string)
{
print "got event from worker", worker_name;
}
event some_event_handled_on_worker()
{
# We know the manager is connected to all workers, so we could
# choose to relay the event across it. Note that sending the event
# this way will not allow the manager to handle it, even if it
# does have an event handler.
Broker::relay(Cluster::manager_topic, Cluster::worker_topic,
worker_to_workers, Cluster::node + " (via manager)");
# We also know that any given proxy is connected to all workers,
# though now we have a choice of which proxy to use. If we
# want to distribute the work associated with relaying uniformly,
# we can use a round-robin strategy. The key used here is simply
# used by the cluster framework internally to keep track of the
# which node is up next in the round-robin.
Cluster::relay_rr(Cluster::proxy_pool, "example_key",
Cluster::worker_topic, worker_to_workers,
Cluster::node + " (via a proxy)");
}
Worker Distributing Events Uniformly Across Proxies
---------------------------------------------------
If you want to offload some data/work from a worker to your proxies,
we can make use of a `Highest Random Weight (HRW) hashing
<https://en.wikipedia.org/wiki/Rendezvous_hashing>`_ distribution strategy
to uniformly map an arbitrary key space across all available proxies.
.. code:: bro
event worker_to_proxies(worker_name: string)
{
print "got event from worker", worker_name;
}
global my_counter = 0;
event some_event_handled_on_worker()
{
# The key here is used to choose which proxy shall receive
# the event. Different keys may map to different nodes, but
# any given key always maps to the same node provided the
# pool of nodes remains consistent. If a proxy goes offline,
# that key maps to a different node until the original comes
# back up.
Cluster::publish_hrw(Cluster::proxy_pool,
cat("example_key", ++my_counter),
worker_to_proxies, Cluster::node);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

View file

@ -0,0 +1,2 @@
<?xml version="1.0" encoding="UTF-8"?>
<mxfile userAgent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36" version="8.5.2" editor="www.draw.io" type="device"><diagram name="Page-1" id="42789a77-a242-8287-6e28-9cd8cfd52e62">7Vxdc6M2FP01flwPkpCAx0022T60MzuTTrt9lEGx2cXIg0ns9NdXGAkjAbExH3a6+CXWRRIgnXN075WcGbpf778mdLP6gwcsmkEr2M/QlxkUH9cRfzLLW25xLDs3LJMwyE3gaHgK/2XSaEnrSxiwrVYx5TxKw41u9HkcMz/VbDRJ+E6v9swj/a4bumQVw5NPo6r17zBIV9IKLOt44TcWLlfy1i6WFxbU/7lM+Ess7zeD6PnwyS+vqepL1t+uaMB3JRN6mKH7hPM0/7be37MoG1s1bHm7x4arxXMnLE7PasAoAo7jWQH0qeeiTwDlXbzS6IWpdzg8afqmRocFYrBkMeax+HN3eGWWdQpEaZWuI/k1ogsW3RWjcs8jnhybbVOapJ+zCTNsj2GU9WCpsoQIFmUWB6qFH9HtNvT/XIVxfkE2A3mp1OgHS9M3WaYvKRcmnqQrvuQxjX7nfCNbbdOE/2TqKcXsecRBn0lxRaEhq/vM4/SRrsMoA/lfLAloTKVZ3glAWS51aB0+wh7zh9I4Hh55H6bfs7eeY1n6R47B8Vll1eo8y6nf8pfEZ02TK6lEkyVLG+p4eZ1sjksdS/R8ZXzN0uRNVEhYRNPwVScMlbxbFvVkUzFj9K1UYcPDON2Wev6WGUQFJSGKaVJAkAN1HJv1ERSDVm5h23a5hfiSP4MqlV7maDqw41ym2BNTfmmmoJtgCiDtmAKgMzpTKkRZCwAsWTKDJBKje7fIvi3TYrbKDIoisehnaN+twpQ9behhznbC79Dpc+TVgQpqXc0u+Xwd+vLCKZbVgJo2gFowqFSTgQAzpzvYaRQu44yxAq9ihN4B8CtLUrZ/F3rqKtJnHCl13R2dG0+aViW3RkGrDqwluLRDA6yRzRwG2w2NFRA2Cd+/fYJ1CMkt4r7l+rcGnMDxFpZ1DnCenxnx/dsEDoQ6cACuAsfBVeDgIYBT55lWgRPxpVCWTHM+Bk7IguCzBEYEEfBWceLpa5BNrggTrwITCYka92ya+x4WFw9fbfKnYFUD1BfHu2tadLoBp3C4S+52/uBixr6XC8oRrzrtDa654T+399XhGb56L655xZe2CZp7jo0wsICNXAc5GhM8OEeQeBhjghwM9d7zV5IdvhfZemBORC8WcsQdsHLWixXZtudZ7454S2IDixi3yUelcpvmKEOjXjvHvy4gbvLswOTZdVXtikTXUKdRtY2ocUzHDk+iPa5oq4xJRbSt0UTbvppGE2jkRxC5TIix8jZUR8ToaEypJROHRuJQwZU5shyNL3OL1FKmPjlZS6ZDZ99YEoqpz5T9MJTDMcwbhGHINhgG8GUMs1HfDGtIgVpGxAK0TbLOCVBnIufIUUmFaSXaEtdgLTxvoeuRmxeFLGgYsiJX55jlXEZW5OgdAWh01BNZIR6WrEo4S+TU44YSE5uj3YvJcediG9eToy7mzpE+mJvmVXFpDwJDc3/XNrcaGmDYWuqhHi57PYOHtFN2E0+nHPRRcDYmmoZxSdqiAiEj+rWMUzIn6qsweKj6vWuc2w2mx8UUjyp8lWX9qlCFwygh0dfR7krYIaxEoBtQavSshJmqos07OZaTppU1xMzondI00lKjOtbvW9Oq23x1yeYdT37mW38fNNu8aMa4mW12fXar2eYi6TvGhjCsQcYUfl9jU9Ca26a/MG7gfU5SbIqzNToOJNfTufLb2fWZE2Jmsj9ITmyYBDZRbm+RwHYv4yp29Y4ANNazvnJi7sA5MVzh5gU+/yCh4fhbl4CcAcxhYkPXzJKBgfCk9kQHypI57aS+AWHVPOiVIscGEB4eaUCMDeOoAAtqc4/71qxT8eipePLM671roNcPaN/bmholmzaqMPayWzBMasvqZz6N2bwtCfqYAnRiK6inRa5tzr9luqtT9d6jrbrzQY3ZsY97FvP/kR0b89cSdWnTXzgMf0eFB0qOXXVnYpgjltA4T3XxEUvbPElmQr6vA2DGT1aMtaC771j9xevEslGTXa1cqDnKkKLlwwrPvKdk1znu1TDkLE5jKlfDMzh1LjmLnwgoF8fsqC9ykl7JKYrHf6eSVz/+zxr08B8=</diagram></mxfile>

View file

@ -1,18 +1,12 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "connector";
event bro_init() event bro_init()
{ {
Broker::enable(); Broker::peer("127.0.0.1");
Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event Broker::outgoing_connection_established(peer_address: string, event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
peer_port: port,
peer_name: string)
{ {
print "Broker::outgoing_connection_established", print "peer added", endpoint;
peer_address, peer_port, peer_name;
terminate(); terminate();
} }

View file

@ -1,20 +1,17 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "listener";
event bro_init() event bro_init()
{ {
Broker::enable(); Broker::listen("127.0.0.1");
Broker::listen(broker_port, "127.0.0.1");
} }
event Broker::incoming_connection_established(peer_name: string) event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{ {
print "Broker::incoming_connection_established", peer_name; print "peer added", endpoint;
} }
event Broker::incoming_connection_broken(peer_name: string) event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
{ {
print "Broker::incoming_connection_broken", peer_name; print "peer lost", endpoint;
terminate(); terminate();
} }

View file

@ -1,31 +1,35 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "connector";
global my_event: event(msg: string, c: count); global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count); global my_auto_event: event(msg: string, c: count);
event bro_init() event bro_init()
{ {
Broker::enable(); Broker::peer("127.0.0.1");
Broker::connect("127.0.0.1", broker_port, 1sec); Broker::auto_publish("bro/event/my_auto_event", my_auto_event);
Broker::auto_event("bro/event/my_auto_event", my_auto_event);
} }
event Broker::outgoing_connection_established(peer_address: string, event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
peer_port: port,
peer_name: string)
{ {
print "Broker::outgoing_connection_established", print "peer added", endpoint;
peer_address, peer_port, peer_name; Broker::publish("bro/event/my_event", my_event, "hi", 0);
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "hi", 0));
event my_auto_event("stuff", 88); event my_auto_event("stuff", 88);
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "...", 1)); Broker::publish("bro/event/my_event", my_event, "...", 1);
event my_auto_event("more stuff", 51); event my_auto_event("more stuff", 51);
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "bye", 2)); local e = Broker::make_event(my_event, "bye", 2);
Broker::publish("bro/event/my_event", e);
} }
event Broker::outgoing_connection_broken(peer_address: string, event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
peer_port: port)
{ {
terminate(); terminate();
} }
event my_event(msg: string, c: count)
{
print "got my_event", msg, c;
}
event my_auto_event(msg: string, c: count)
{
print "got my_auto_event", msg, c;
}

View file

@ -1,20 +1,17 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "listener";
global msg_count = 0; global msg_count = 0;
global my_event: event(msg: string, c: count); global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count); global my_auto_event: event(msg: string, c: count);
event bro_init() event bro_init()
{ {
Broker::enable(); Broker::subscribe("bro/event/");
Broker::subscribe_to_events("bro/event/"); Broker::listen("127.0.0.1");
Broker::listen(broker_port, "127.0.0.1");
} }
event Broker::incoming_connection_established(peer_name: string) event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{ {
print "Broker::incoming_connection_established", peer_name; print "peer added", endpoint;
} }
event my_event(msg: string, c: count) event my_event(msg: string, c: count)

View file

@ -1,17 +1,11 @@
@load ./testlog @load ./testlog
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "connector";
redef Log::enable_local_logging = F;
redef Log::enable_remote_logging = F;
global n = 0; global n = 0;
event bro_init() event bro_init()
{ {
Broker::enable(); Broker::peer("127.0.0.1");
Broker::enable_remote_logs(Test::LOG);
Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event do_write() event do_write()
@ -24,17 +18,19 @@ event do_write()
event do_write(); event do_write();
} }
event Broker::outgoing_connection_established(peer_address: string, event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
peer_port: port,
peer_name: string)
{ {
print "Broker::outgoing_connection_established", print "peer added", endpoint;
peer_address, peer_port, peer_name;
event do_write(); event do_write();
} }
event Broker::outgoing_connection_broken(peer_address: string, event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
peer_port: port)
{ {
terminate(); terminate();
} }
event Test::log_test(rec: Test::Info)
{
print "wrote log", rec;
Broker::publish("bro/logs/forward/test", Test::log_test, rec);
}

View file

@ -1,24 +1,21 @@
@load ./testlog @load ./testlog
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "listener";
event bro_init() event bro_init()
{ {
Broker::enable(); Broker::subscribe("bro/logs");
Broker::subscribe_to_logs("bro/log/Test::LOG"); Broker::listen("127.0.0.1");
Broker::listen(broker_port, "127.0.0.1");
} }
event Broker::incoming_connection_established(peer_name: string) event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{ {
print "Broker::incoming_connection_established", peer_name; print "peer added", endpoint;
} }
event Test::log_test(rec: Test::Info) event Test::log_test(rec: Test::Info)
{ {
print "wrote log", rec; print "got log event", rec;
if ( rec$num == 5 ) if ( rec$num == 5 )
terminate(); terminate();

View file

@ -1,26 +0,0 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "connector";
event bro_init()
{
Broker::enable();
Broker::connect("127.0.0.1", broker_port, 1sec);
}
event Broker::outgoing_connection_established(peer_address: string,
peer_port: port,
peer_name: string)
{
print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name;
Broker::send_print("bro/print/hi", "hello");
Broker::send_print("bro/print/stuff", "...");
Broker::send_print("bro/print/bye", "goodbye");
}
event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port)
{
terminate();
}

View file

@ -1,25 +0,0 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef Broker::endpoint_name = "listener";
global msg_count = 0;
event bro_init()
{
Broker::enable();
Broker::subscribe_to_prints("bro/print/");
Broker::listen(broker_port, "127.0.0.1");
}
event Broker::incoming_connection_established(peer_name: string)
{
print "Broker::incoming_connection_established", peer_name;
}
event Broker::print_handler(msg: string)
{
++msg_count;
print "got print message", msg;
if ( msg_count == 3 )
terminate();
}

View file

@ -1,53 +1,29 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
global h: opaque of Broker::Handle; global h: opaque of Broker::Store;
function dv(d: Broker::Data): Broker::DataVector
{
local rval: Broker::DataVector;
rval[0] = d;
return rval;
}
global ready: event(); global ready: event();
event Broker::outgoing_connection_broken(peer_address: string, event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
peer_port: port)
{ {
terminate(); terminate();
} }
event Broker::outgoing_connection_established(peer_address: string,
peer_port: port,
peer_name: string)
{
local myset: set[string] = {"a", "b", "c"};
local myvec: vector of string = {"alpha", "beta", "gamma"};
h = Broker::create_master("mystore");
Broker::insert(h, Broker::data("one"), Broker::data(110));
Broker::insert(h, Broker::data("two"), Broker::data(223));
Broker::insert(h, Broker::data("myset"), Broker::data(myset));
Broker::insert(h, Broker::data("myvec"), Broker::data(myvec));
Broker::increment(h, Broker::data("one"));
Broker::decrement(h, Broker::data("two"));
Broker::add_to_set(h, Broker::data("myset"), Broker::data("d"));
Broker::remove_from_set(h, Broker::data("myset"), Broker::data("b"));
Broker::push_left(h, Broker::data("myvec"), dv(Broker::data("delta")));
Broker::push_right(h, Broker::data("myvec"), dv(Broker::data("omega")));
when ( local res = Broker::size(h) )
{
print "master size", res;
event ready();
}
timeout 10sec
{ print "timeout"; }
}
event bro_init() event bro_init()
{ {
Broker::enable(); h = Broker::create_master("mystore");
Broker::connect("127.0.0.1", broker_port, 1secs);
Broker::auto_event("bro/event/ready", ready); local myset: set[string] = {"a", "b", "c"};
local myvec: vector of string = {"alpha", "beta", "gamma"};
Broker::put(h, "one", 110);
Broker::put(h, "two", 223);
Broker::put(h, "myset", myset);
Broker::put(h, "myvec", myvec);
Broker::increment(h, "one");
Broker::decrement(h, "two");
Broker::insert_into_set(h, "myset", "d");
Broker::remove_from(h, "myset", "b");
Broker::push(h, "myvec", "delta");
Broker::peer("127.0.0.1");
} }

View file

@ -1,43 +1,79 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
global h: opaque of Broker::Handle; global h: opaque of Broker::Store;
global expected_key_count = 4; global expected_key_count = 4;
global key_count = 0; global key_count = 0;
# Lookup a value in the store based on an arbitrary key string.
function do_lookup(key: string) function do_lookup(key: string)
{ {
when ( local res = Broker::lookup(h, Broker::data(key)) ) when ( local res = Broker::get(h, key) )
{ {
++key_count; ++key_count;
print "lookup", key, res; print "lookup", key, res;
if ( key_count == expected_key_count ) # End after we iterated over looking up each key in the store twice.
if ( key_count == expected_key_count * 2 )
terminate(); terminate();
} }
timeout 10sec # All data store queries must specify a timeout
timeout 3sec
{ print "timeout", key; } { print "timeout", key; }
} }
event ready() event check_keys()
{ {
h = Broker::create_clone("mystore"); # Here we just query for the list of keys in the store, and show how to
# look up each one's value.
when ( local res = Broker::keys(h) ) when ( local res = Broker::keys(h) )
{ {
print "clone keys", res; print "clone keys", res;
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 0)));
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 1))); if ( res?$result )
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 2))); {
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 3))); # Since we know that the keys we are storing are all strings,
# we can conveniently cast the result of Broker::keys to
# a native Bro type, namely 'set[string]'.
for ( k in res$result as string_set )
do_lookup(k);
# Alternatively, we can use a generic iterator to iterate
# over the results (which we know is of the 'set' type because
# that's what Broker::keys() always returns). If the keys
# we stored were not all of the same type, then you would
# likely want to use this method of inspecting the store's keys.
local i = Broker::set_iterator(res$result);
while ( ! Broker::set_iterator_last(i) )
{
do_lookup(Broker::set_iterator_value(i) as string);
Broker::set_iterator_next(i);
} }
timeout 10sec }
{ print "timeout"; } }
# All data store queries must specify a timeout.
# You also might see timeouts on connecting/initializing a clone since
# it hasn't had time to get fully set up yet.
timeout 1sec
{
print "timeout";
schedule 1sec { check_keys() };
}
}
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{
print "peer added";
# We could create a clone early, like in bro_init and it will periodically
# try to synchronize with its master once it connects, however, we just
# create it now since we know the peer w/ the master store has just
# connected.
h = Broker::create_clone("mystore");
event check_keys();
} }
event bro_init() event bro_init()
{ {
Broker::enable(); Broker::listen("127.0.0.1");
Broker::subscribe_to_events("bro/event/ready");
Broker::listen(broker_port, "127.0.0.1");
} }

View file

@ -13,6 +13,5 @@ export {
event bro_init() &priority=5 event bro_init() &priority=5
{ {
Broker::enable();
Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]); Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]);
} }

View file

@ -45,6 +45,8 @@ Reference Section
script-reference/index.rst script-reference/index.rst
components/index.rst components/index.rst
`Broker User Manual <../broker-manual/index.html>`_
Development Development
=========== ===========

View file

@ -1 +0,0 @@
../../aux/broccoli/bindings/broccoli-python/CHANGES

View file

@ -1 +0,0 @@
../../aux/broccoli/bindings/broccoli-ruby/CHANGES

View file

@ -1 +0,0 @@
../../aux/broccoli/CHANGES

View file

@ -0,0 +1 @@
../../aux/broker/CHANGES

View file

@ -17,23 +17,11 @@ BroControl
.. literalinclude:: CHANGES-broctl.txt .. literalinclude:: CHANGES-broctl.txt
-------- ------
Broccoli Broker
-------- ------
.. literalinclude:: CHANGES-broccoli.txt .. literalinclude:: CHANGES-broker.txt
---------------
Broccoli Python
---------------
.. literalinclude:: CHANGES-broccoli-python.txt
-------------
Broccoli Ruby
-------------
.. literalinclude:: CHANGES-broccoli-ruby.txt
-------- --------
Capstats Capstats

View file

@ -35,16 +35,16 @@ before you begin:
To build Bro from source, the following additional dependencies are required: To build Bro from source, the following additional dependencies are required:
* CMake 2.8 or greater (http://www.cmake.org) * CMake 2.8.12 or greater (http://www.cmake.org)
* Make * Make
* C/C++ compiler with C++11 support (GCC 4.8+ or Clang 3.3+) * C/C++ compiler with C++11 support (GCC 4.8+ or Clang 3.3+)
* SWIG (http://www.swig.org) * SWIG (http://www.swig.org)
* Bison (GNU Parser Generator) * Bison 2.5 or greater (https://www.gnu.org/software/bison/)
* Flex (Fast Lexical Analyzer) * Flex (lexical analyzer generator) (https://github.com/westes/flex)
* Libpcap headers (http://www.tcpdump.org) * Libpcap headers (http://www.tcpdump.org)
* OpenSSL headers (http://www.openssl.org) * OpenSSL headers (http://www.openssl.org)
* zlib headers * zlib headers (https://zlib.net/)
* Python * Python (https://www.python.org/)
To install the required dependencies, you can use: To install the required dependencies, you can use:
@ -73,7 +73,7 @@ To install the required dependencies, you can use:
.. console:: .. console::
sudo pkg install bash cmake swig bison python py27-sqlite3 sudo pkg install bash cmake swig30 bison python py27-sqlite3 py27-ipaddress
For older versions of FreeBSD (especially FreeBSD 9.x), the system compiler For older versions of FreeBSD (especially FreeBSD 9.x), the system compiler
is not new enough to compile Bro. For these systems, you will have to install is not new enough to compile Bro. For these systems, you will have to install
@ -101,14 +101,17 @@ To install the required dependencies, you can use:
clicking "Install"). clicking "Install").
OS X comes with all required dependencies except for CMake_, SWIG_, OS X comes with all required dependencies except for CMake_, SWIG_,
and OpenSSL (OpenSSL headers were removed in OS X 10.11, therefore OpenSSL Bison, and OpenSSL (OpenSSL headers were removed in OS X 10.11,
must be installed manually for OS X versions 10.11 or newer). therefore OpenSSL must be installed manually for OS X versions 10.11
Distributions of these dependencies can or newer).
likely be obtained from your preferred Mac OS X package management
system (e.g. Homebrew_, MacPorts_, or Fink_). Specifically for Distributions of these dependencies can likely be obtained from your
Homebrew, the ``cmake``, ``swig``, and ``openssl`` packages preferred Mac OS X package management system (e.g. Homebrew_,
provide the required dependencies. For MacPorts, the ``cmake``, ``swig``, MacPorts_, or Fink_). Specifically for Homebrew, the ``cmake``,
``swig-python``, and ``openssl`` packages provide the required dependencies. ``swig``, ``openssl``, and ``bison`` packages
provide the required dependencies. For MacPorts, the ``cmake``,
``swig``, ``swig-python``, ``openssl``, and ``bison`` packages provide
the required dependencies.
Optional Dependencies Optional Dependencies
@ -117,7 +120,6 @@ Optional Dependencies
Bro can make use of some optional libraries and tools if they are found at Bro can make use of some optional libraries and tools if they are found at
build time: build time:
* C++ Actor Framework (CAF) version 0.14 (http://actor-framework.org)
* LibGeoIP (for geolocating IP addresses) * LibGeoIP (for geolocating IP addresses)
* sendmail (enables Bro and BroControl to send mail) * sendmail (enables Bro and BroControl to send mail)
* curl (used by a Bro script that implements active HTTP) * curl (used by a Bro script that implements active HTTP)

View file

@ -168,8 +168,8 @@ Bro Diagnostics
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| cluster.log | Bro cluster messages | :bro:type:`Cluster::Info` | | cluster.log | Bro cluster messages | :bro:type:`Cluster::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| communication.log | Communication events between Bro or | :bro:type:`Communication::Info` | | broker.log | Peering status events between Bro or | :bro:type:`Broker::Info` |
| | Broccoli instances | | | | Broker-enabled processes | |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| loaded_scripts.log | Shows all scripts loaded by Bro | :bro:type:`LoadedScripts::Info` | | loaded_scripts.log | Shows all scripts loaded by Bro | :bro:type:`LoadedScripts::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+

View file

@ -325,29 +325,14 @@ variable declared while scripts using a different namespace or no
namespace altogether will not have access to the variable. namespace altogether will not have access to the variable.
Alternatively, if a global variable is declared within an ``export { ... }`` Alternatively, if a global variable is declared within an ``export { ... }``
block that variable is available to any other script through the block that variable is available to any other script through the
naming convention of ``MODULE::variable_name``. naming convention of ``<module name>::<variable name>``, i.e. the variable
needs to be "scoped" by the name of the module in which it was declared.
The declaration below is taken from the
:doc:`/scripts/policy/protocols/conn/known-hosts.bro` script and
declares a variable called ``known_hosts`` as a global set of unique
IP addresses within the ``Known`` namespace and exports it for use
outside of the ``Known`` namespace. Were we to want to use the
``known_hosts`` variable we'd be able to access it through
``Known::known_hosts``.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/conn/known-hosts.bro
:lines: 8-10, 32, 37
The sample above also makes use of an ``export { ... }`` block. When the module
keyword is used in a script, the variables declared are said to be in
that module's "namespace". Where as a global variable can be accessed
by its name alone when it is not declared within a module, a global
variable declared within a module must be exported and then accessed
via ``MODULE_NAME::VARIABLE_NAME``. As in the example above, we would be
able to access the ``known_hosts`` in a separate script variable via
``Known::known_hosts`` due to the fact that ``known_hosts`` was declared as
a global variable within an export block under the ``Known`` namespace.
When the ``module`` keyword is used in a script, the variables declared
are said to be in that module's "namespace". Where as a global variable
can be accessed by its name alone when it is not declared within a
module, a global variable declared within a module must be exported and
then accessed via ``<module name>::<variable name>``.
Constants Constants
~~~~~~~~~ ~~~~~~~~~

View file

@ -1,2 +1,3 @@
@load ./main @load ./main
@load ./store @load ./store
@load ./log

View file

@ -0,0 +1,80 @@
@load ./main
module Broker;
export {
## The Broker logging stream identifier.
redef enum Log::ID += { LOG };
## The type of a Broker activity being logged.
type Type: enum {
## An informational status update.
STATUS,
## An error situation.
ERROR
};
## A record type containing the column fields of the Broker log.
type Info: record {
## The network time at which a Broker event occurred.
ts: time &log;
## The type of the Broker event.
ty: Type &log;
## The event being logged.
ev: string &log;
## The peer (if any) with which a Broker event is
## concerned.
peer: NetworkInfo &log &optional;
## An optional message describing the Broker event in more detail
message: string &log &optional;
};
}
event bro_init() &priority=5
{
Log::create_stream(Broker::LOG, [$columns=Info, $path="broker"]);
}
function log_status(ev: string, endpoint: EndpointInfo, msg: string)
{
local r: Info;
r = [$ts = network_time(),
$ev = ev,
$ty = STATUS,
$message = msg];
if ( endpoint?$network )
r$peer = endpoint$network;
Log::write(Broker::LOG, r);
}
event Broker::peer_added(endpoint: EndpointInfo, msg: string)
{
log_status("peer-added", endpoint, msg);
}
event Broker::peer_removed(endpoint: EndpointInfo, msg: string)
{
log_status("peer-removed", endpoint, msg);
}
event Broker::peer_lost(endpoint: EndpointInfo, msg: string)
{
log_status("connection-terminated", endpoint, msg);
}
event Broker::error(code: ErrorCode, msg: string)
{
local ev = cat(code);
ev = subst_string(ev, "Broker::", "");
ev = subst_string(ev, "_", "-");
ev = to_lower(ev);
Log::write(Broker::LOG, [$ts = network_time(),
$ev = ev,
$ty = ERROR,
$message = msg]);
}

View file

@ -1,55 +1,160 @@
##! Various data structure definitions for use with Bro's communication system. ##! The Broker-based communication API and its various options.
module Log;
export {
type Log::ID: enum {
## Dummy place-holder.
UNKNOWN
};
}
module Broker; module Broker;
export { export {
## Default port for Broker communication. Where not specified
## otherwise, this is the port to connect to and listen on.
const default_port = 9999/tcp &redef;
## A name used to identify this endpoint to peers. ## Default interval to retry listening on a port if it's currently in
## use already.
const default_listen_retry = 30sec &redef;
## Default address on which to listen.
## ##
## .. bro:see:: Broker::connect Broker::listen ## .. bro:see:: Broker::listen
const endpoint_name = "" &redef; const default_listen_address = "" &redef;
## Change communication behavior. ## Default interval to retry connecting to a peer if it cannot be made to work
type EndpointFlags: record { ## initially, or if it ever becomes disconnected.
## Whether to restrict message topics that can be published to peers. const default_connect_retry = 30sec &redef;
auto_publish: bool &default = T;
## Whether to restrict what message topics or data store identifiers ## If false, do not use SSL for network connections. By default, SSL will even
## the local endpoint advertises to peers (e.g. subscribing to ## be used if no certificates / CAs have been configured. In that case
## events or making a master data store available). ## (which is the default) the communication will be encrypted, but not
auto_advertise: bool &default = T; ## authenticated.
const disable_ssl = F &redef;
## Path to a file containing concatenated trusted certificates
## in PEM format. If set, Bro will require valid certificates forx
## all peers.
const ssl_cafile = "" &redef;
## Path to an OpenSSL-style directory of trusted certificates.
## If set, Bro will require valid certificates forx
## all peers.
const ssl_capath = "" &redef;
## Path to a file containing a X.509 certificate for this
## node in PEM format. If set, Bro will require valid certificates for
## all peers.
const ssl_certificate = "" &redef;
## Passphrase to decrypt the private key specified by
## :bro:see:`Broker::ssl_keyfile`. If set, Bro will require valid
## certificates for all peers.
const ssl_passphrase = "" &redef;
## Path to the file containing the private key for this node's
## certificate. If set, Bro will require valid certificates for
## all peers.
const ssl_keyfile = "" &redef;
## Forward all received messages to subscribing peers.
const forward_messages = F &redef;
## The default topic prefix where logs will be published. The log's stream
## id is appended when writing to a particular stream.
const default_log_topic_prefix = "bro/logs/" &redef;
## The default implementation for :bro:see:`Broker::log_topic`.
function default_log_topic(id: Log::ID, path: string): string
{
return default_log_topic_prefix + cat(id);
}
## A function that will be called for each log entry to determine what
## broker topic string will be used for sending it to peers. The
## default implementation will return a value based on
## :bro:see:`Broker::default_log_topic_prefix`.
##
## id: the ID associated with the log stream entry that will be sent.
##
## path: the path to which the log stream entry will be output.
##
## Returns: a string representing the broker topic to which the log
## will be sent.
const log_topic: function(id: Log::ID, path: string): string = default_log_topic &redef;
type ErrorCode: enum {
## The unspecified default error code.
UNSPECIFIED = 1,
## Version incompatibility.
PEER_INCOMPATIBLE = 2,
## Referenced peer does not exist.
PEER_INVALID = 3,
## Remote peer not listening.
PEER_UNAVAILABLE = 4,
## An peering request timed out.
PEER_TIMEOUT = 5,
## Master with given name already exist.
MASTER_EXISTS = 6,
## Master with given name does not exist.
NO_SUCH_MASTER = 7,
## The given data store key does not exist.
NO_SUCH_KEY = 8,
## The store operation timed out.
REQUEST_TIMEOUT = 9,
## The operation expected a different type than provided
TYPE_CLASH = 10,
## The data value cannot be used to carry out the desired operation.
INVALID_DATA = 11,
## The storage backend failed to execute the operation.
BACKEND_FAILURE = 12,
## The storage backend failed to execute the operation.
STALE_DATA = 13,
## Catch-all for a CAF-level problem.
CAF_ERROR = 100
}; };
## Fine-grained tuning of communication behavior for a particular message. ## The possible states of a peer endpoint.
type SendFlags: record { type PeerStatus: enum {
## Send the message to the local endpoint. ## The peering process is initiated.
self: bool &default = F; INITIALIZING,
## Send the message to peer endpoints that advertise interest in ## Connection establishment in process.
## the topic associated with the message. CONNECTING,
peers: bool &default = T; ## Connection established, peering pending.
## Send the message to peer endpoints even if they don't advertise CONNECTED,
## interest in the topic associated with the message. ## Successfully peered.
unsolicited: bool &default = F; PEERED,
## Connection to remote peer lost.
DISCONNECTED,
## Reconnecting to peer after a lost connection.
RECONNECTING,
}; };
type NetworkInfo: record {
## The IP address or hostname where the endpoint listens.
address: string &log;
## The port where the endpoint is bound to.
bound_port: port &log;
};
type EndpointInfo: record {
## A unique identifier of the node.
id: string;
## Network-level information.
network: NetworkInfo &optional;
};
type PeerInfo: record {
peer: EndpointInfo;
status: PeerStatus;
};
type PeerInfos: vector of PeerInfo;
## Opaque communication data. ## Opaque communication data.
type Data: record { type Data: record {
d: opaque of Broker::Data &optional; data: opaque of Broker::Data &optional;
}; };
## Opaque communication data. ## Opaque communication data sequence.
type DataVector: vector of Broker::Data; type DataVector: vector of Broker::Data;
## Opaque event communication data. ## Opaque event communication data.
type EventArgs: record { type Event: record {
## The name of the event. Not set if invalid event or arguments. ## The name of the event. Not set if invalid event or arguments.
name: string &optional; name: string &optional;
## The arguments to the event. ## The arguments to the event.
@ -63,52 +168,23 @@ export {
val: Broker::Data; val: Broker::Data;
}; };
## Enable use of communication.
##
## flags: used to tune the local Broker endpoint behavior.
##
## Returns: true if communication is successfully initialized.
global enable: function(flags: EndpointFlags &default = EndpointFlags()): bool;
## Changes endpoint flags originally supplied to :bro:see:`Broker::enable`.
##
## flags: the new endpoint behavior flags to use.
##
## Returns: true if flags were changed.
global set_endpoint_flags: function(flags: EndpointFlags &default = EndpointFlags()): bool;
## Allow sending messages to peers if associated with the given topic.
## This has no effect if auto publication behavior is enabled via the flags
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
##
## topic: a topic to allow messages to be published under.
##
## Returns: true if successful.
global publish_topic: function(topic: string): bool;
## Disallow sending messages to peers if associated with the given topic.
## This has no effect if auto publication behavior is enabled via the flags
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
##
## topic: a topic to disallow messages to be published under.
##
## Returns: true if successful.
global unpublish_topic: function(topic: string): bool;
## Listen for remote connections. ## Listen for remote connections.
## ##
## p: the TCP port to listen on.
##
## a: an address string on which to accept connections, e.g. ## a: an address string on which to accept connections, e.g.
## "127.0.0.1". An empty string refers to @p INADDR_ANY. ## "127.0.0.1". An empty string refers to @p INADDR_ANY.
## ##
## reuse: equivalent to behavior of SO_REUSEADDR. ## p: the TCP port to listen on. The value 0 means that the OS should choose
## the next available free port.
## ##
## Returns: true if the local endpoint is now listening for connections. ## retry: If non-zero, retries listening in regular intervals if the port cannot be
## acquired immediately. 0 disables retries.
## ##
## .. bro:see:: Broker::incoming_connection_established ## Returns: the bound port or 0/? on failure.
global listen: function(p: port, a: string &default = "", reuse: bool &default = T): bool; ##
## .. bro:see:: Broker::status
global listen: function(a: string &default = default_listen_address,
p: port &default = default_port,
retry: interval &default = default_listen_retry): port;
## Initiate a remote connection. ## Initiate a remote connection.
## ##
## a: an address to connect to, e.g. "localhost" or "127.0.0.1". ## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
@ -123,60 +199,63 @@ export {
## it's a new peer. The actual connection may not be established ## it's a new peer. The actual connection may not be established
## until a later point in time. ## until a later point in time.
## ##
## .. bro:see:: Broker::outgoing_connection_established ## .. bro:see:: Broker::status
global connect: function(a: string, p: port, retry: interval): bool; global peer: function(a: string, p: port &default=default_port,
retry: interval &default=default_connect_retry): bool;
## Remove a remote connection. ## Remove a remote connection.
## ##
## a: the address used in previous successful call to :bro:see:`Broker::connect`. ## Note that this does not terminate the connection to the peer, it
## just means that we won't exchange any further information with it
## unless peering resumes later.
## ##
## p: the port used in previous successful call to :bro:see:`Broker::connect`. ## a: the address used in previous successful call to :bro:see:`Broker::peer`.
##
## p: the port used in previous successful call to :bro:see:`Broker::peer`.
## ##
## Returns: true if the arguments match a previously successful call to ## Returns: true if the arguments match a previously successful call to
## :bro:see:`Broker::connect`. ## :bro:see:`Broker::peer`.
global disconnect: function(a: string, p: port): bool; ##
## TODO: We do not have a function yet to terminate a connection.
global unpeer: function(a: string, p: port): bool;
## Print a simple message to any interested peers. The receiver can use ## Returns: a list of all peer connections.
## :bro:see:`Broker::print_handler` to handle messages. global peers: function(): vector of PeerInfo;
## Returns: a unique identifier for the local broker endpoint.
global node_id: function(): string;
## Sends all pending log messages to remote peers. This normally
## doesn't need to be used except for test cases that are time-sensitive.
global flush_logs: function(): count;
## Publishes the value of an identifier to a given topic. The subscribers
## will update their local value for that identifier on receipt.
## ##
## topic: a topic associated with the printed message. ## topic: a topic associated with the message.
## ##
## msg: the print message to send to peers. ## id: the identifier to publish.
##
## flags: tune the behavior of how the message is sent.
## ##
## Returns: true if the message is sent. ## Returns: true if the message is sent.
global send_print: function(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool; global publish_id: function(topic: string, id: string): bool;
## Register interest in all peer print messages that use a certain topic ## Register interest in all peer event messages that use a certain topic
## prefix. Use :bro:see:`Broker::print_handler` to handle received ## prefix.
## messages.
## ##
## topic_prefix: a prefix to match against remote message topics. ## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches ## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob". ## "alice" and "amy" but not "bob".
## ##
## Returns: true if it's a new print subscription and it is now registered. ## Returns: true if it's a new event subscription and it is now registered.
global subscribe_to_prints: function(topic_prefix: string): bool; global subscribe: function(topic_prefix: string): bool;
## Unregister interest in all peer print messages that use a topic prefix. ## Unregister interest in all peer event messages that use a topic prefix.
## ##
## topic_prefix: a prefix previously supplied to a successful call to ## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_prints`. ## :bro:see:`Broker::subscribe`.
## ##
## Returns: true if interest in the topic prefix is no longer advertised. ## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_prints: function(topic_prefix: string): bool; global unsubscribe: function(topic_prefix: string): bool;
## Send an event to any interested peers.
##
## topic: a topic associated with the event message.
##
## args: event arguments as made by :bro:see:`Broker::event_args`.
##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if the message is sent.
global send_event: function(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool;
## Automatically send an event to any interested peers whenever it is ## Automatically send an event to any interested peers whenever it is
## locally dispatched (e.g. using "event my_event(...);" in a script). ## locally dispatched (e.g. using "event my_event(...);" in a script).
@ -187,83 +266,18 @@ export {
## ##
## ev: a Bro event value. ## ev: a Bro event value.
## ##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if automatic event sending is now enabled. ## Returns: true if automatic event sending is now enabled.
global auto_event: function(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool; global auto_publish: function(topic: string, ev: any): bool;
## Stop automatically sending an event to peers upon local dispatch. ## Stop automatically sending an event to peers upon local dispatch.
## ##
## topic: a topic originally given to :bro:see:`Broker::auto_event`. ## topic: a topic originally given to :bro:see:`Broker::auto_publish`.
## ##
## ev: an event originally given to :bro:see:`Broker::auto_event`. ## ev: an event originally given to :bro:see:`Broker::auto_publish`.
## ##
## Returns: true if automatic events will not occur for the topic/event ## Returns: true if automatic events will not occur for the topic/event
## pair. ## pair.
global auto_event_stop: function(topic: string, ev: any): bool; global auto_unpublish: function(topic: string, ev: any): bool;
## Register interest in all peer event messages that use a certain topic
## prefix.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new event subscription and it is now registered.
global subscribe_to_events: function(topic_prefix: string): bool;
## Unregister interest in all peer event messages that use a topic prefix.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_events`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_events: function(topic_prefix: string): bool;
## Enable remote logs for a given log stream.
##
## id: the log stream to enable remote logs for.
##
## flags: tune the behavior of how log entry messages are sent.
##
## Returns: true if remote logs are enabled for the stream.
global enable_remote_logs: function(id: Log::ID, flags: SendFlags &default = SendFlags()): bool;
## Disable remote logs for a given log stream.
##
## id: the log stream to disable remote logs for.
##
## Returns: true if remote logs are disabled for the stream.
global disable_remote_logs: function(id: Log::ID): bool;
## Check if remote logs are enabled for a given log stream.
##
## id: the log stream to check.
##
## Returns: true if remote logs are enabled for the given stream.
global remote_logs_enabled: function(id: Log::ID): bool;
## Register interest in all peer log messages that use a certain topic
## prefix. Logs are implicitly sent with topic "bro/log/<stream-name>" and
## the receiving side processes them through the logging framework as usual.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new log subscription and it is now registered.
global subscribe_to_logs: function(topic_prefix: string): bool;
## Unregister interest in all peer log messages that use a topic prefix.
## Logs are implicitly sent with topic "bro/log/<stream-name>" and the
## receiving side processes them through the logging framework as usual.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_logs`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_logs: function(topic_prefix: string): bool;
} }
@load base/bif/comm.bif @load base/bif/comm.bif
@ -271,106 +285,67 @@ export {
module Broker; module Broker;
@ifdef ( Broker::__enable ) event retry_listen(a: string, p: port, retry: interval)
function enable(flags: EndpointFlags &default = EndpointFlags()) : bool
{ {
return __enable(flags); listen(a, p, retry);
} }
function set_endpoint_flags(flags: EndpointFlags &default = EndpointFlags()): bool function listen(a: string, p: port, retry: interval): port
{ {
return __set_endpoint_flags(flags); local bound = __listen(a, p);
if ( bound == 0/tcp && retry != 0secs )
schedule retry { retry_listen(a, p, retry) };
return bound;
} }
function publish_topic(topic: string): bool function peer(a: string, p: port, retry: interval): bool
{ {
return __publish_topic(topic); return __peer(a, p, retry);
} }
function unpublish_topic(topic: string): bool function unpeer(a: string, p: port): bool
{ {
return __unpublish_topic(topic); return __unpeer(a, p);
} }
function listen(p: port, a: string &default = "", reuse: bool &default = T): bool function peers(): vector of PeerInfo
{ {
return __listen(p, a, reuse); return __peers();
} }
function connect(a: string, p: port, retry: interval): bool function node_id(): string
{ {
return __connect(a, p, retry); return __node_id();
} }
function disconnect(a: string, p: port): bool function flush_logs(): count
{ {
return __disconnect(a, p); return __flush_logs();
} }
function send_print(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool function publish_id(topic: string, id: string): bool
{ {
return __send_print(topic, msg, flags); return __publish_id(topic, id);
} }
function subscribe_to_prints(topic_prefix: string): bool function subscribe(topic_prefix: string): bool
{ {
return __subscribe_to_prints(topic_prefix); return __subscribe(topic_prefix);
} }
function unsubscribe_to_prints(topic_prefix: string): bool function unsubscribe(topic_prefix: string): bool
{ {
return __unsubscribe_to_prints(topic_prefix); return __unsubscribe(topic_prefix);
} }
function send_event(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool function auto_publish(topic: string, ev: any): bool
{ {
return __event(topic, args, flags); return __auto_publish(topic, ev);
} }
function auto_event(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool function auto_unpublish(topic: string, ev: any): bool
{ {
return __auto_event(topic, ev, flags); return __auto_unpublish(topic, ev);
} }
function auto_event_stop(topic: string, ev: any): bool
{
return __auto_event_stop(topic, ev);
}
function subscribe_to_events(topic_prefix: string): bool
{
return __subscribe_to_events(topic_prefix);
}
function unsubscribe_to_events(topic_prefix: string): bool
{
return __unsubscribe_to_events(topic_prefix);
}
function enable_remote_logs(id: Log::ID, flags: SendFlags &default = SendFlags()): bool
{
return __enable_remote_logs(id, flags);
}
function disable_remote_logs(id: Log::ID): bool
{
return __disable_remote_logs(id);
}
function remote_logs_enabled(id: Log::ID): bool
{
return __remote_logs_enabled(id);
}
function subscribe_to_logs(topic_prefix: string): bool
{
return __subscribe_to_logs(topic_prefix);
}
function unsubscribe_to_logs(topic_prefix: string): bool
{
return __unsubscribe_to_logs(topic_prefix);
}
@endif

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,16 @@
# Load the core cluster support. # Load the core cluster support.
@load ./main @load ./main
@load ./pools
@if ( Cluster::is_enabled() ) @if ( Cluster::is_enabled() )
# Give the node being started up it's peer name. # Give the node being started up it's peer name.
redef peer_description = Cluster::node; redef peer_description = Cluster::node;
@if ( Cluster::enable_round_robin_logging )
redef Broker::log_topic = Cluster::rr_log_topic;
@endif
# Add a cluster prefix. # Add a cluster prefix.
@prefixes += cluster @prefixes += cluster
@ -19,13 +24,6 @@ redef peer_description = Cluster::node;
@load ./setup-connections @load ./setup-connections
# Don't load the listening script until we're a bit more sure that the
# cluster framework is actually being enabled.
@load frameworks/communication/listen
## Set the port that this node is supposed to listen on.
redef Communication::listen_port = Cluster::nodes[Cluster::node]$p;
@if ( Cluster::local_node_type() == Cluster::MANAGER ) @if ( Cluster::local_node_type() == Cluster::MANAGER )
@load ./nodes/manager @load ./nodes/manager
# If no logger is defined, then the manager receives logs. # If no logger is defined, then the manager receives logs.

View file

@ -7,10 +7,111 @@
##! ``@load base/frameworks/cluster``. ##! ``@load base/frameworks/cluster``.
@load base/frameworks/control @load base/frameworks/control
@load base/frameworks/broker
module Cluster; module Cluster;
export { export {
## Whether to distribute log messages among available logging nodes.
const enable_round_robin_logging = T &redef;
## The topic name used for exchanging general messages that are relevant to
## any node in a cluster. Used with broker-enabled cluster communication.
const broadcast_topic = "bro/cluster/broadcast" &redef;
## The topic name used for exchanging messages that are relevant to
## logger nodes in a cluster. Used with broker-enabled cluster communication.
const logger_topic = "bro/cluster/logger" &redef;
## The topic name used for exchanging messages that are relevant to
## manager nodes in a cluster. Used with broker-enabled cluster communication.
const manager_topic = "bro/cluster/manager" &redef;
## The topic name used for exchanging messages that are relevant to
## proxy nodes in a cluster. Used with broker-enabled cluster communication.
const proxy_topic = "bro/cluster/proxy" &redef;
## The topic name used for exchanging messages that are relevant to
## worker nodes in a cluster. Used with broker-enabled cluster communication.
const worker_topic = "bro/cluster/worker" &redef;
## The topic name used for exchanging messages that are relevant to
## time machine nodes in a cluster. Used with broker-enabled cluster communication.
const time_machine_topic = "bro/cluster/time_machine" &redef;
## The topic prefix used for exchanging messages that are relevant to
## a named node in a cluster. Used with broker-enabled cluster communication.
const node_topic_prefix = "bro/cluster/node/" &redef;
## Name of the node on which master data stores will be created if no other
## has already been specified by the user in :bro:see:`Cluster::stores`.
## An empty value means "use whatever name corresponds to the manager
## node".
const default_master_node = "" &redef;
## The type of data store backend that will be used for all data stores if
## no other has already been specified by the user in :bro:see:`Cluster::stores`.
const default_backend = Broker::MEMORY &redef;
## The type of persistent data store backend that will be used for all data
## stores if no other has already been specified by the user in
## :bro:see:`Cluster::stores`. This will be used when script authors call
## :bro:see:`Cluster::create_store` with the *persistent* argument set true.
const default_persistent_backend = Broker::SQLITE &redef;
## Setting a default dir will, for persistent backends that have not
## been given an explicit file path via :bro:see:`Cluster::stores`,
## automatically create a path within this dir that is based on the name of
## the data store.
const default_store_dir = "" &redef;
## Information regarding a cluster-enabled data store.
type StoreInfo: record {
## The name of the data store.
name: string &optional;
## The store handle.
store: opaque of Broker::Store &optional;
## The name of the cluster node on which the master version of the data
## store resides.
master_node: string &default=default_master_node;
## Whether the data store is the master version or a clone.
master: bool &default=F;
## The type of backend used for storing data.
backend: Broker::BackendType &default=default_backend;
## Parameters used for configuring the backend.
options: Broker::BackendOptions &default=Broker::BackendOptions();
## A resync/reconnect interval to pass through to
## :bro:see:`Broker::create_clone`.
clone_resync_interval: interval &default=Broker::default_clone_resync_interval;
## A staleness duration to pass through to
## :bro:see:`Broker::create_clone`.
clone_stale_interval: interval &default=Broker::default_clone_stale_interval;
## A mutation buffer interval to pass through to
## :bro:see:`Broker::create_clone`.
clone_mutation_buffer_interval: interval &default=Broker::default_clone_mutation_buffer_interval;
};
## A table of cluster-enabled data stores that have been created, indexed
## by their name. This table will be populated automatically by
## :bro:see:`Cluster::create_store`, but if you need to customize
## the options related to a particular data store, you may redef this
## table. Calls to :bro:see:`Cluster::create_store` will first check
## the table for an entry of the same name and, if found, will use the
## predefined options there when setting up the store.
global stores: table[string] of StoreInfo &default=StoreInfo() &redef;
## Sets up a cluster-enabled data store. They will also still properly
## function for uses that are not operating a cluster.
##
## name: the name of the data store to create.
##
## persistent: whether the data store must be persistent.
##
## Returns: the store's information. For master stores, the store will be
## ready to use immediately. For clones, the store field will not
## be set until the node containing the master store has connected.
global create_store: function(name: string, persistent: bool &default=F): StoreInfo;
## The cluster logging stream identifier. ## The cluster logging stream identifier.
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
@ -18,6 +119,8 @@ export {
type Info: record { type Info: record {
## The time at which a cluster message was generated. ## The time at which a cluster message was generated.
ts: time; ts: time;
## The name of the node that is creating the log record.
node: string;
## A message indicating information about the cluster's operation. ## A message indicating information about the cluster's operation.
message: string; message: string;
} &log; } &log;
@ -46,43 +149,6 @@ export {
TIME_MACHINE, TIME_MACHINE,
}; };
## Events raised by a manager and handled by the workers.
const manager2worker_events = /Drop::.*/ &redef;
## Events raised by a manager and handled by proxies.
const manager2proxy_events = /EMPTY/ &redef;
## Events raised by a manager and handled by loggers.
const manager2logger_events = /EMPTY/ &redef;
## Events raised by proxies and handled by loggers.
const proxy2logger_events = /EMPTY/ &redef;
## Events raised by proxies and handled by a manager.
const proxy2manager_events = /EMPTY/ &redef;
## Events raised by proxies and handled by workers.
const proxy2worker_events = /EMPTY/ &redef;
## Events raised by workers and handled by loggers.
const worker2logger_events = /EMPTY/ &redef;
## Events raised by workers and handled by a manager.
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
## Events raised by workers and handled by proxies.
const worker2proxy_events = /EMPTY/ &redef;
## Events raised by TimeMachine instances and handled by a manager.
const tm2manager_events = /EMPTY/ &redef;
## Events raised by TimeMachine instances and handled by workers.
const tm2worker_events = /EMPTY/ &redef;
## Events sent by the control host (i.e., BroControl) when dynamically
## connecting to a running instance to update settings or request data.
const control_events = Control::controller_events &redef;
## Record type to indicate a node in a cluster. ## Record type to indicate a node in a cluster.
type Node: record { type Node: record {
## Identifies the type of cluster node in this node's configuration. ## Identifies the type of cluster node in this node's configuration.
@ -92,22 +158,17 @@ export {
## If the *ip* field is a non-global IPv6 address, this field ## If the *ip* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``. ## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &default=""; zone_id: string &default="";
## The port to which this local node can connect when ## The port that this node will listen on for peer connections.
## establishing communication.
p: port; p: port;
## Identifier for the interface a worker is sniffing. ## Identifier for the interface a worker is sniffing.
interface: string &optional; interface: string &optional;
## Name of the logger node this node uses. For manager, proxies and workers.
logger: string &optional;
## Name of the manager node this node uses. For workers and proxies. ## Name of the manager node this node uses. For workers and proxies.
manager: string &optional; manager: string &optional;
## Name of the proxy node this node uses. For workers and managers.
proxy: string &optional;
## Names of worker nodes that this node connects with.
## For managers and proxies.
workers: set[string] &optional;
## Name of a time machine node with which this node connects. ## Name of a time machine node with which this node connects.
time_machine: string &optional; time_machine: string &optional;
## A unique identifier assigned to the node by the broker framework.
## This field is only set while a node is connected.
id: string &optional;
}; };
## This function can be called at any time to determine if the cluster ## This function can be called at any time to determine if the cluster
@ -134,6 +195,8 @@ export {
## named cluster-layout.bro somewhere in the BROPATH. It will be ## named cluster-layout.bro somewhere in the BROPATH. It will be
## automatically loaded if the CLUSTER_NODE environment variable is set. ## automatically loaded if the CLUSTER_NODE environment variable is set.
## Note that BroControl handles all of this automatically. ## Note that BroControl handles all of this automatically.
## The table is typically indexed by node names/labels (e.g. "manager"
## or "worker-1").
const nodes: table[string] of Node = {} &redef; const nodes: table[string] of Node = {} &redef;
## Indicates whether or not the manager will act as the logger and receive ## Indicates whether or not the manager will act as the logger and receive
@ -148,6 +211,60 @@ export {
## Interval for retrying failed connections between cluster nodes. ## Interval for retrying failed connections between cluster nodes.
const retry_interval = 1min &redef; const retry_interval = 1min &redef;
## When using broker-enabled cluster framework, nodes broadcast this event
## to exchange their user-defined name along with a string that uniquely
## identifies it for the duration of its lifetime. This string may change
## if the node dies and has to reconnect later.
global hello: event(name: string, id: string);
## When using broker-enabled cluster framework, this event will be emitted
## locally whenever a cluster node connects or reconnects.
global node_up: event(name: string, id: string);
## When using broker-enabled cluster framework, this event will be emitted
## locally whenever a connected cluster node becomes disconnected.
global node_down: event(name: string, id: string);
## Write a message to the cluster logging stream.
global log: function(msg: string);
## Retrieve the topic associated with a specific node in the cluster.
##
## name: the name of the cluster node (e.g. "manager").
##
## Returns: a topic string that may used to send a message exclusively to
## a given cluster node.
global node_topic: function(name: string): string;
}
type NamedNode: record {
name: string;
node: Node;
};
function nodes_with_type(node_type: NodeType): vector of NamedNode
{
local rval: vector of NamedNode = vector();
local names: vector of string = vector();
for ( name in Cluster::nodes )
names[|names|] = name;
names = sort(names, strcmp);
for ( i in names )
{
name = names[i];
local n = Cluster::nodes[name];
if ( n$node_type != node_type )
next;
rval[|rval|] = NamedNode($name=name, $node=n);
}
return rval;
} }
function is_enabled(): bool function is_enabled(): bool
@ -160,16 +277,64 @@ function local_node_type(): NodeType
return is_enabled() ? nodes[node]$node_type : NONE; return is_enabled() ? nodes[node]$node_type : NONE;
} }
event remote_connection_handshake_done(p: event_peer) &priority=5 function node_topic(name: string): string
{ {
if ( p$descr in nodes && nodes[p$descr]$node_type == WORKER ) return node_topic_prefix + name;
}
event Cluster::hello(name: string, id: string) &priority=10
{
if ( name !in nodes )
{
Reporter::error(fmt("Got Cluster::hello msg from unexpected node: %s", name));
return;
}
local n = nodes[name];
if ( n?$id )
{
if ( n$id != id )
Reporter::error(fmt("Got Cluster::hello msg from duplicate node:%s",
name));
}
else
event Cluster::node_up(name, id);
n$id = id;
Cluster::log(fmt("got hello from %s (%s)", name, id));
if ( n$node_type == WORKER )
++worker_count; ++worker_count;
} }
event remote_connection_closed(p: event_peer) &priority=5 event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
{ {
if ( p$descr in nodes && nodes[p$descr]$node_type == WORKER ) if ( ! Cluster::is_enabled() )
return;
local e = Broker::make_event(Cluster::hello, node, Broker::node_id());
Broker::publish(Cluster::broadcast_topic, e);
}
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=10
{
for ( node_name in nodes )
{
local n = nodes[node_name];
if ( n?$id && n$id == endpoint$id )
{
Cluster::log(fmt("node down: %s", node_name));
delete n$id;
if ( n$node_type == WORKER )
--worker_count; --worker_count;
event Cluster::node_down(node_name, endpoint$id);
break;
}
}
} }
event bro_init() &priority=5 event bro_init() &priority=5
@ -183,3 +348,90 @@ event bro_init() &priority=5
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]); Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]);
} }
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
{
local info = stores[name];
info$name = name;
if ( Cluster::default_store_dir != "" )
{
local default_options = Broker::BackendOptions();
local path = Cluster::default_store_dir + "/" + name;
if ( info$options$sqlite$path == default_options$sqlite$path )
info$options$sqlite$path = path + ".sqlite";
if ( info$options$rocksdb$path == default_options$rocksdb$path )
info$options$rocksdb$path = path + ".rocksdb";
}
if ( persistent )
{
switch ( info$backend ) {
case Broker::MEMORY:
info$backend = Cluster::default_persistent_backend;
break;
case Broker::SQLITE:
fallthrough;
case Broker::ROCKSDB:
# no-op: user already asked for a specific persistent backend.
break;
default:
Reporter::error(fmt("unhandled data store type: %s", info$backend));
break;
}
}
if ( ! Cluster::is_enabled() )
{
if ( info?$store )
{
Reporter::warning(fmt("duplicate cluster store creation for %s", name));
return info;
}
info$store = Broker::create_master(name, info$backend, info$options);
info$master = T;
stores[name] = info;
return info;
}
if ( info$master_node == "" )
{
local mgr_nodes = nodes_with_type(Cluster::MANAGER);
if ( |mgr_nodes| == 0 )
Reporter::fatal(fmt("empty master node name for cluster store " +
"'%s', but there's no manager node to default",
name));
info$master_node = mgr_nodes[0]$name;
}
else if ( info$master_node !in Cluster::nodes )
Reporter::fatal(fmt("master node '%s' for cluster store '%s' does not exist",
info$master_node, name));
if ( Cluster::node == info$master_node )
{
info$store = Broker::create_master(name, info$backend, info$options);
info$master = T;
stores[name] = info;
Cluster::log(fmt("created master store: %s", name));
return info;
}
info$master = F;
stores[name] = info;
info$store = Broker::create_clone(info$name,
info$clone_resync_interval,
info$clone_stale_interval,
info$clone_mutation_buffer_interval);
Cluster::log(fmt("created clone store: %s", info$name));
return info;
}
function log(msg: string)
{
Log::write(Cluster::LOG, [$ts = network_time(), $node = node, $message = msg]);
}

View file

@ -0,0 +1,440 @@
##! Defines an interface for managing pools of cluster nodes. Pools are
##! are useful way to distribute work or data among nodes within a cluster.
@load ./main
@load base/utils/hash_hrw
module Cluster;
export {
## Store state of a cluster within within the context of a work pool.
type PoolNode: record {
## The node name (e.g. "manager").
name: string;
## An alias of *name* used to prevent hashing collisions when creating
## *site_id*.
alias: string;
## A 32-bit unique identifier for the pool node, derived from name/alias.
site_id: count;
## Whether the node is currently alive and can receive work.
alive: bool &default=F;
};
## A pool specification.
type PoolSpec: record {
## A topic string that can be used to reach all nodes within a pool.
topic: string &default = "";
## The type of nodes that are contained within the pool.
node_type: Cluster::NodeType &default = Cluster::PROXY;
## The maximum number of nodes that may belong to the pool.
## If not set, then all available nodes will be added to the pool,
## else the cluster framework will automatically limit the pool
## membership according to the threshhold.
max_nodes: count &optional;
## Whether the pool requires exclusive access to nodes. If true,
## then *max_nodes* nodes will not be assigned to any other pool.
## When using this flag, *max_nodes* must also be set.
exclusive: bool &default = F;
};
type PoolNodeTable: table[string] of PoolNode;
type RoundRobinTable: table[string] of int;
## A pool used for distributing data/work among a set of cluster nodes.
type Pool: record {
## The specification of the pool that was used when registering it.
spec: PoolSpec &default = PoolSpec();
## Nodes in the pool, indexed by their name (e.g. "manager").
nodes: PoolNodeTable &default = PoolNodeTable();
## A list of nodes in the pool in a deterministic order.
node_list: vector of PoolNode &default = vector();
## The Rendezvous hashing structure.
hrw_pool: HashHRW::Pool &default = HashHRW::Pool();
## Round-Robin table indexed by arbitrary key and storing the next
## index of *node_list* that will be eligible to receive work (if it's
## alive at the time of next request).
rr_key_seq: RoundRobinTable &default = RoundRobinTable();
};
## The specification for :bro:see:`Cluster::proxy_pool`.
global proxy_pool_spec: PoolSpec =
PoolSpec($topic = "bro/cluster/pool/proxy",
$node_type = Cluster::PROXY) &redef;
## The specification for :bro:see:`Cluster::worker_pool`.
global worker_pool_spec: PoolSpec =
PoolSpec($topic = "bro/cluster/pool/worker",
$node_type = Cluster::WORKER) &redef;
## The specification for :bro:see:`Cluster::logger_pool`.
global logger_pool_spec: PoolSpec =
PoolSpec($topic = "bro/cluster/pool/logger",
$node_type = Cluster::LOGGER) &redef;
## A pool containing all the proxy nodes of a cluster.
## The pool's node membership/availability is automatically
## maintained by the cluster framework.
global proxy_pool: Pool;
## A pool containing all the worker nodes of a cluster.
## The pool's node membership/availability is automatically
## maintained by the cluster framework.
global worker_pool: Pool;
## A pool containing all the logger nodes of a cluster.
## The pool's node membership/availability is automatically
## maintained by the cluster framework.
global logger_pool: Pool;
## Registers and initializes a pool.
global register_pool: function(spec: PoolSpec): Pool;
## Retrieve the topic associated with the node mapped via Rendezvous hash
## of an arbitrary key.
##
## pool: the pool of nodes to consider.
##
## key: data used for input to the hashing function that will uniformly
## distribute keys among available nodes.
##
## Returns: a topic string associated with a cluster node that is alive
## or an empty string if nothing is alive.
global hrw_topic: function(pool: Pool, key: any): string;
## Retrieve the topic associated with the node in a round-robin fashion.
##
## pool: the pool of nodes to consider.
##
## key: an arbitrary string to identify the purpose for which you're
## requesting the topic. e.g. consider using namespacing of your script
## like "Intel::cluster_rr_key".
##
## Returns: a topic string associated with a cluster node that is alive,
## or an empty string if nothing is alive.
global rr_topic: function(pool: Pool, key: string): string;
## Distributes log message topics among logger nodes via round-robin.
## This will be automatically assigned to :bro:see:`Broker::log_topic`
## if :bro:see:`Cluster::enable_round_robin_logging` is enabled.
## If no logger nodes are active, then this will return the value
## of :bro:see:`Broker::default_log_topic`.
global rr_log_topic: function(id: Log::ID, path: string): string;
}
## Initialize a node as a member of a pool.
##
## pool: the pool to which the node will belong.
##
## name: the name of the node (e.g. "manager").
##
## Returns: F if a node of the same name already exists in the pool, else T.
global init_pool_node: function(pool: Pool, name: string): bool;
## Mark a pool node as alive/online/available. :bro:see:`Cluster::hrw_topic`
## will distribute keys to nodes marked as alive.
##
## pool: the pool to which the node belongs.
##
## name: the name of the node to mark.
##
## Returns: F if the node does not exist in the pool, else T.
global mark_pool_node_alive: function(pool: Pool, name: string): bool;
## Mark a pool node as dead/offline/unavailable. :bro:see:`Cluster::hrw_topic`
## will not distribute keys to nodes marked as dead.
##
## pool: the pool to which the node belongs.
##
## name: the name of the node to mark.
##
## Returns: F if the node does not exist in the pool, else T.
global mark_pool_node_dead: function(pool: Pool, name: string): bool;
global registered_pools: vector of Pool = vector();
function register_pool(spec: PoolSpec): Pool
{
local rval = Pool($spec = spec);
registered_pools[|registered_pools|] = rval;
return rval;
}
function hrw_topic(pool: Pool, key: any): string
{
if ( |pool$hrw_pool$sites| == 0 )
return "";
local site = HashHRW::get_site(pool$hrw_pool, key);
local pn: PoolNode = site$user_data;
return node_topic_prefix + pn$name;
}
function rr_topic(pool: Pool, key: string): string
{
if ( key !in pool$rr_key_seq )
pool$rr_key_seq[key] = 0;
local next_idx = pool$rr_key_seq[key];
local start = next_idx;
local rval = "";
if ( next_idx >= |pool$node_list| )
return rval;
while ( T )
{
local pn = pool$node_list[next_idx];
++next_idx;
if ( next_idx == |pool$node_list| )
next_idx = 0;
if ( pn$alive )
{
rval = node_topic_prefix + pn$name;
break;
}
if ( next_idx == start )
# no nodes alive
break;
}
pool$rr_key_seq[key] = next_idx;
return rval;
}
function rr_log_topic(id: Log::ID, path: string): string
{
local rval = rr_topic(logger_pool, "Cluster::rr_log_topic");
if ( rval != "" )
return rval;
rval = Broker::default_log_topic(id, path);
return rval;
}
event Cluster::node_up(name: string, id: string) &priority=10
{
for ( i in registered_pools )
{
local pool = registered_pools[i];
if ( name in pool$nodes )
mark_pool_node_alive(pool, name);
}
}
event Cluster::node_down(name: string, id: string) &priority=10
{
for ( i in registered_pools )
{
local pool = registered_pools[i];
if ( name in pool$nodes )
mark_pool_node_dead(pool, name);
}
}
function site_id_in_pool(pool: Pool, site_id: count): bool
{
for ( i in pool$nodes )
{
local pn = pool$nodes[i];
if ( pn$site_id == site_id )
return T;
}
return F;
}
function init_pool_node(pool: Pool, name: string): bool
{
if ( name in pool$nodes )
return F;
local loop = T;
local c = 0;
while ( loop )
{
# site id collisions are unlikely, but using aliases handles it...
# alternatively could terminate and ask user to pick a new node name
# if it ends up colliding.
local alias = name + fmt(".%s", c);
local site_id = fnv1a32(alias);
if ( site_id_in_pool(pool, site_id) )
++c;
else
{
local pn = PoolNode($name=name, $alias=alias, $site_id=site_id,
$alive=Cluster::node == name);
pool$nodes[name] = pn;
pool$node_list[|pool$node_list|] = pn;
loop = F;
}
}
return T;
}
function mark_pool_node_alive(pool: Pool, name: string): bool
{
if ( name !in pool$nodes )
return F;
local pn = pool$nodes[name];
pn$alive = T;
HashHRW::add_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn));
return T;
}
function mark_pool_node_dead(pool: Pool, name: string): bool
{
if ( name !in pool$nodes )
return F;
local pn = pool$nodes[name];
pn$alive = F;
HashHRW::rem_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn));
return T;
}
event bro_init()
{
worker_pool = register_pool(worker_pool_spec);
proxy_pool = register_pool(proxy_pool_spec);
logger_pool = register_pool(logger_pool_spec);
}
type PoolEligibilityTracking: record {
eligible_nodes: vector of NamedNode &default = vector();
next_idx: count &default = 0;
excluded: count &default = 0;
};
global pool_eligibility: table[Cluster::NodeType] of PoolEligibilityTracking = table();
function pool_sorter(a: Pool, b: Pool): int
{
return strcmp(a$spec$topic, b$spec$topic);
}
# Needs to execute before the bro_init in setup-connections
event bro_init() &priority=-5
{
if ( ! Cluster::is_enabled() )
return;
# Sorting now ensures the node distribution process is stable even if
# there's a change in the order of time-of-registration between Bro runs.
sort(registered_pools, pool_sorter);
pool_eligibility[Cluster::WORKER] =
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::WORKER));
pool_eligibility[Cluster::PROXY] =
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::PROXY));
pool_eligibility[Cluster::LOGGER] =
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::LOGGER));
if ( manager_is_logger )
{
local mgr = nodes_with_type(Cluster::MANAGER);
if ( |mgr| > 0 )
{
local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes;
eln[|eln|] = mgr[0];
}
}
local pool: Pool;
local pet: PoolEligibilityTracking;
local en: vector of NamedNode;
for ( i in registered_pools )
{
pool = registered_pools[i];
if ( pool$spec$node_type !in pool_eligibility )
Reporter::fatal(fmt("invalid pool node type: %s", pool$spec$node_type));
if ( ! pool$spec$exclusive )
next;
if ( ! pool$spec?$max_nodes )
Reporter::fatal("Cluster::PoolSpec 'max_nodes' field must be set when using the 'exclusive' flag");
pet = pool_eligibility[pool$spec$node_type];
pet$excluded += pool$spec$max_nodes;
}
for ( nt in pool_eligibility )
{
pet = pool_eligibility[nt];
if ( pet$excluded > |pet$eligible_nodes| )
Reporter::fatal(fmt("not enough %s nodes to satisfy pool exclusivity requirements: need %d nodes", nt, pet$excluded));
}
for ( i in registered_pools )
{
pool = registered_pools[i];
if ( ! pool$spec$exclusive )
next;
pet = pool_eligibility[pool$spec$node_type];
local e = 0;
while ( e < pool$spec$max_nodes )
{
init_pool_node(pool, pet$eligible_nodes[e]$name);
++e;
}
local nen: vector of NamedNode = vector();
for ( j in pet$eligible_nodes )
{
if ( j < e )
next;
nen[|nen|] = pet$eligible_nodes[j];
}
pet$eligible_nodes = nen;
}
for ( i in registered_pools )
{
pool = registered_pools[i];
if ( pool$spec$exclusive )
next;
pet = pool_eligibility[pool$spec$node_type];
local nodes_to_init = |pet$eligible_nodes|;
if ( pool$spec?$max_nodes &&
pool$spec$max_nodes < |pet$eligible_nodes| )
nodes_to_init = pool$spec$max_nodes;
local nodes_inited = 0;
while ( nodes_inited < nodes_to_init )
{
init_pool_node(pool, pet$eligible_nodes[pet$next_idx]$name);
++nodes_inited;
++pet$next_idx;
if ( pet$next_idx == |pet$eligible_nodes| )
pet$next_idx = 0;
}
}
}

View file

@ -2,142 +2,122 @@
##! as defined by :bro:id:`Cluster::nodes`. ##! as defined by :bro:id:`Cluster::nodes`.
@load ./main @load ./main
@load base/frameworks/communication @load ./pools
@load base/frameworks/broker
@if ( Cluster::node in Cluster::nodes )
module Cluster; module Cluster;
event bro_init() &priority=9 function connect_peer(node_type: NodeType, node_name: string)
{ {
local me = nodes[node]; local nn = nodes_with_type(node_type);
for ( i in Cluster::nodes ) for ( i in nn )
{ {
local n = nodes[i]; local n = nn[i];
# Connections from the control node for runtime control and update events. if ( n$name != node_name )
# Every node in a cluster is eligible for control from this host. next;
if ( n$node_type == CONTROL )
Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id,
$connect=F, $class="control",
$events=control_events];
if ( me$node_type == LOGGER )
{
if ( n$node_type == MANAGER && n$logger == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=manager2logger_events, $request_logs=T];
if ( n$node_type == PROXY && n$logger == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=proxy2logger_events, $request_logs=T];
if ( n$node_type == WORKER && n$logger == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=worker2logger_events, $request_logs=T];
}
else if ( me$node_type == MANAGER )
{
if ( n$node_type == LOGGER && me$logger == i )
Communication::nodes["logger"] =
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $retry=retry_interval,
$class=node];
if ( n$node_type == WORKER && n$manager == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=worker2manager_events,
$request_logs=Cluster::manager_is_logger];
if ( n$node_type == PROXY && n$manager == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=proxy2manager_events,
$request_logs=Cluster::manager_is_logger];
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=retry_interval,
$events=tm2manager_events];
}
else if ( me$node_type == PROXY )
{
if ( n$node_type == LOGGER && me$logger == i )
Communication::nodes["logger"] =
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $retry=retry_interval,
$class=node];
if ( n$node_type == WORKER && n$proxy == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
$sync=T, $auth=T, $events=worker2proxy_events];
# accepts connections from the previous one.
# (This is not ideal for setups with many proxies)
# FIXME: Once we're using multiple proxies, we should also figure out some $class scheme ...
if ( n$node_type == PROXY )
{
if ( n?$proxy )
Communication::nodes[i]
= [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $auth=F, $sync=T, $retry=retry_interval];
else if ( me?$proxy && me$proxy == i )
Communication::nodes[me$proxy]
= [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
$connect=F, $auth=T, $sync=T];
}
# Finally the manager, to send it status updates.
if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=retry_interval,
$class=node,
$events=manager2proxy_events];
}
else if ( me$node_type == WORKER )
{
if ( n$node_type == LOGGER && me$logger == i )
Communication::nodes["logger"] =
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $retry=retry_interval,
$class=node];
if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=retry_interval,
$class=node,
$events=manager2worker_events];
if ( n$node_type == PROXY && me$proxy == i )
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=retry_interval,
$sync=T, $class=node,
$events=proxy2worker_events];
if ( n$node_type == TIME_MACHINE &&
me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T,
$retry=retry_interval,
$events=tm2worker_events];
local status = Broker::peer(cat(n$node$ip), n$node$p,
Cluster::retry_interval);
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
n$node$ip, n$node$p, Cluster::retry_interval,
status));
} }
} }
function connect_peers_with_type(node_type: NodeType)
{
local rval: vector of NamedNode = vector();
local nn = nodes_with_type(node_type);
for ( i in nn )
{
local n = nn[i];
local status = Broker::peer(cat(n$node$ip), n$node$p,
Cluster::retry_interval);
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
n$node$ip, n$node$p, Cluster::retry_interval,
status));
}
} }
@endif event bro_init() &priority=-10
{
local self = nodes[node];
for ( i in registered_pools )
{
local pool = registered_pools[i];
if ( node in pool$nodes )
Broker::subscribe(pool$spec$topic);
}
switch ( self$node_type ) {
case NONE:
return;
case CONTROL:
break;
case LOGGER:
Broker::subscribe(Cluster::logger_topic);
Broker::subscribe(Broker::default_log_topic_prefix);
break;
case MANAGER:
Broker::subscribe(Cluster::manager_topic);
if ( Cluster::manager_is_logger )
Broker::subscribe(Broker::default_log_topic_prefix);
break;
case PROXY:
Broker::subscribe(Cluster::proxy_topic);
break;
case WORKER:
Broker::subscribe(Cluster::worker_topic);
break;
case TIME_MACHINE:
Broker::subscribe(Cluster::time_machine_topic);
break;
default:
Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type));
return;
}
Broker::subscribe(Cluster::broadcast_topic);
Broker::subscribe(node_topic(node));
Broker::listen(Broker::default_listen_address,
self$p,
Broker::default_listen_retry);
Cluster::log(fmt("listening on %s:%s", Broker::default_listen_address, self$p));
switch ( self$node_type ) {
case MANAGER:
connect_peers_with_type(LOGGER);
if ( self?$time_machine )
connect_peer(TIME_MACHINE, self$time_machine);
break;
case PROXY:
connect_peers_with_type(LOGGER);
if ( self?$manager )
connect_peer(MANAGER, self$manager);
break;
case WORKER:
connect_peers_with_type(LOGGER);
connect_peers_with_type(PROXY);
if ( self?$manager )
connect_peer(MANAGER, self$manager);
if ( self?$time_machine )
connect_peer(TIME_MACHINE, self$time_machine);
break;
}
}

View file

@ -1,2 +0,0 @@
The communication framework facilitates connecting to remote Bro or
Broccoli instances to share state and transfer events.

View file

@ -1 +0,0 @@
@load ./main

View file

@ -1,354 +0,0 @@
##! Facilitates connecting to remote Bro or Broccoli instances to share state
##! and/or transfer events.
@load base/frameworks/packet-filter
@load base/utils/addrs
module Communication;
export {
## The communication logging stream identifier.
redef enum Log::ID += { LOG };
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
## are wildcards.
const listen_interface = 0.0.0.0 &redef;
## Which port to listen on. Note that BroControl sets this
## automatically.
const listen_port = 47757/tcp &redef;
## This defines if a listening socket should use SSL.
const listen_ssl = F &redef;
## Defines if a listening socket can bind to IPv6 addresses.
##
## Note that this is overridden by the BroControl IPv6Comm option.
const listen_ipv6 = F &redef;
## If :bro:id:`Communication::listen_interface` is a non-global
## IPv6 address and requires a specific :rfc:`4007` ``zone_id``,
## it can be specified here.
const listen_ipv6_zone_id = "" &redef;
## Defines the interval at which to retry binding to
## :bro:id:`Communication::listen_interface` on
## :bro:id:`Communication::listen_port` if it's already in use.
const listen_retry = 30 secs &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## compression.
global compression_level = 0 &redef;
## A record type containing the column fields of the communication log.
type Info: record {
## The network time at which a communication event occurred.
ts: time &log;
## The peer name (if any) with which a communication event is
## concerned.
peer: string &log &optional;
## Where the communication event message originated from, that
## is, either from the scripting layer or inside the Bro process.
src_name: string &log &optional;
## .. todo:: currently unused.
connected_peer_desc: string &log &optional;
## .. todo:: currently unused.
connected_peer_addr: addr &log &optional;
## .. todo:: currently unused.
connected_peer_port: port &log &optional;
## The severity of the communication event message.
level: string &log &optional;
## A message describing the communication event between Bro or
## Broccoli instances.
message: string &log;
};
## A remote peer to which we would like to talk.
## If there's no entry for a peer, it may still connect
## and request state, but not send us any.
type Node: record {
## Remote address.
host: addr;
## If the *host* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are
## initiating the connection (based on the *connect* field).
p: port &optional;
## When accepting a connection, the configuration only
## applies if the class matches the one transmitted by
## the peer.
##
## When initiating a connection, the class is sent to
## the other side.
class: string &optional;
## Events requested from remote side.
events: pattern &optional;
## Whether we are going to connect (rather than waiting
## for the other side to connect to us).
connect: bool &default = F;
## If disconnected, reconnect after this many seconds.
retry: interval &default = 0 secs;
## Whether to accept remote events.
accept_input: bool &default = T;
## Whether to perform state synchronization with peer.
sync: bool &default = F;
## Whether to request logs from the peer.
request_logs: bool &default = F;
## When performing state synchronization, whether we consider
## our state to be authoritative (only one side can be
## authoritative). If so, we will send the peer our current
## set when the connection is set up.
auth: bool &default = F;
## If not set, no capture filter is sent.
## If set to an empty string, then the default capture filter
## is sent.
capture_filter: string &optional;
## Whether to use SSL-based communication.
ssl: bool &default = F;
## Compression level is 0-9, with 0 = no compression.
compression: count &default = compression_level;
## The remote peer.
peer: event_peer &optional;
## Indicates the status of the node.
connected: bool &default = F;
};
## The table of Bro or Broccoli nodes that Bro will initiate connections
## to or respond to connections from. Note that BroControl sets this
## automatically.
global nodes: table[string] of Node &redef;
## A table of peer nodes for which this node issued a
## :bro:id:`Communication::connect_peer` call but with which a connection
## has not yet been established or with which a connection has been
## closed and is currently in the process of retrying to establish.
## When a connection is successfully established, the peer is removed
## from the table.
global pending_peers: table[peer_id] of Node;
## A table of peer nodes for which this node has an established connection.
## Peers are automatically removed if their connection is closed and
## automatically added back if a connection is re-established later.
global connected_peers: table[peer_id] of Node;
## Connect to a node in :bro:id:`Communication::nodes` independent
## of its "connect" flag.
##
## peer: the string used to index a particular node within the
## :bro:id:`Communication::nodes` table.
global connect_peer: function(peer: string);
}
const src_names = {
[REMOTE_SRC_CHILD] = "child",
[REMOTE_SRC_PARENT] = "parent",
[REMOTE_SRC_SCRIPT] = "script",
};
event bro_init() &priority=5
{
Log::create_stream(Communication::LOG, [$columns=Info, $path="communication"]);
}
function do_script_log_common(level: count, src: count, msg: string)
{
Log::write(Communication::LOG, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src],
$peer = get_event_peer()$descr,
$message = msg]);
}
# This is a core generated event.
event remote_log(level: count, src: count, msg: string)
{
do_script_log_common(level, src, msg);
}
# This is a core generated event.
event remote_log_peer(p: event_peer, level: count, src: count, msg: string)
{
local rmsg = fmt("[#%d/%s:%d] %s", p$id, addr_to_uri(p$host), p$p, msg);
do_script_log_common(level, src, rmsg);
}
function do_script_log(p: event_peer, msg: string)
{
do_script_log_common(REMOTE_LOG_INFO, REMOTE_SRC_SCRIPT, msg);
}
function connect_peer(peer: string)
{
local node = nodes[peer];
local p = listen_port;
if ( node?$p )
p = node$p;
local class = node?$class ? node$class : "";
local zone_id = node?$zone_id ? node$zone_id : "";
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE )
Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = "can't trigger connect"]);
pending_peers[id] = node;
}
function setup_peer(p: event_peer, node: Node)
{
if ( node?$events )
{
do_script_log(p, fmt("requesting events matching %s", node$events));
request_remote_events(p, node$events);
}
if ( node?$capture_filter && node$capture_filter != "" )
{
local filter = node$capture_filter;
do_script_log(p, fmt("sending capture_filter: %s", filter));
send_capture_filter(p, filter);
}
if ( node$accept_input )
{
do_script_log(p, "accepting state");
set_accept_state(p, T);
}
set_compression_level(p, node$compression);
if ( node$sync )
{
do_script_log(p, "requesting synchronized state");
request_remote_sync(p, node$auth);
}
if ( node$request_logs )
{
do_script_log(p, "requesting logs");
request_remote_logs(p);
}
node$peer = p;
node$connected = T;
connected_peers[p$id] = node;
}
event remote_connection_established(p: event_peer)
{
if ( is_remote_event() )
return;
do_script_log(p, "connection established");
if ( p$id in pending_peers )
{
# We issued the connect.
local node = pending_peers[p$id];
setup_peer(p, node);
delete pending_peers[p$id];
}
else
{ # The other side connected to us.
local found = F;
for ( i in nodes )
{
node = nodes[i];
if ( node$host == p$host )
{
local c = 0;
# See if classes match = either both have
# the same class, or neither of them has
# a class.
if ( p?$class && p$class != "" )
++c;
if ( node?$class && node$class != "" )
++c;
if ( c == 1 ||
(c == 2 && p$class != node$class) )
next;
found = T;
setup_peer(p, node);
break;
}
}
if ( ! found )
set_compression_level(p, compression_level);
}
complete_handshake(p);
}
event remote_connection_closed(p: event_peer)
{
if ( is_remote_event() )
return;
do_script_log(p, "connection closed");
if ( p$id in connected_peers )
{
local node = connected_peers[p$id];
node$connected = F;
delete connected_peers[p$id];
if ( node$retry != 0secs )
# The core will retry.
pending_peers[p$id] = node;
}
}
event remote_state_inconsistency(operation: string, id: string,
expected_old: string, real_old: string)
{
if ( is_remote_event() )
return;
local msg = fmt("state inconsistency: %s should be %s but is %s before %s",
id, expected_old, real_old, operation);
Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = msg]);
}
# Actually initiate the connections that need to be established.
event bro_init() &priority = -10 # let others modify nodes
{
if ( |nodes| > 0 )
enable_communication();
for ( tag in nodes )
{
if ( ! nodes[tag]$connect )
next;
connect_peer(tag);
}
}

View file

@ -5,6 +5,13 @@
module Control; module Control;
export { export {
## The topic prefix used for exchanging control messages via Broker.
const topic_prefix = "bro/control";
## Whether the controllee should call :bro:see:`Broker::listen`.
## In a cluster, this isn't needed since the setup process calls it.
const controllee_listen = T &redef;
## The address of the host that will be controlled. ## The address of the host that will be controlled.
const host = 0.0.0.0 &redef; const host = 0.0.0.0 &redef;
@ -22,12 +29,6 @@ export {
## This can be used by commands that take an argument. ## This can be used by commands that take an argument.
const arg = "" &redef; const arg = "" &redef;
## Events that need to be handled by controllers.
const controller_events = /Control::.*_request/ &redef;
## Events that need to be handled by controllees.
const controllee_events = /Control::.*_response/ &redef;
## The commands that can currently be given on the command line for ## The commands that can currently be given on the command line for
## remote control. ## remote control.
const commands: set[string] = { const commands: set[string] = {
@ -73,8 +74,7 @@ export {
global shutdown_response: event(); global shutdown_response: event();
} }
event terminate_event() event terminate_event()
{ {
terminate_communication(); terminate();
} }

View file

@ -6,69 +6,96 @@
module Intel; module Intel;
redef record Item += { export {
## This field is used internally for cluster transparency to avoid ## Broker topic for management of intel items. Sending insert_item and
## re-dispatching intelligence items over and over from workers. ## remove_item events, peers can manage intelligence data.
first_dispatch: bool &default=T; const item_topic = "bro/intel/items" &redef;
};
## Broker topic for management of intel indicators as stored on workers
## for matching. Sending insert_indicator and remove_indicator events,
## the back-end manages indicators.
const indicator_topic = "bro/intel/indicators" &redef;
## Broker topic for matching events, generated by workers and sent to
## the back-end for metadata enrichment and logging.
const match_topic = "bro/intel/match" &redef;
}
# Internal events for cluster data distribution.
global insert_item: event(item: Item);
global insert_indicator: event(item: Item);
# If this process is not a manager process, we don't want the full metadata. # If this process is not a manager process, we don't want the full metadata.
@if ( Cluster::local_node_type() != Cluster::MANAGER ) @if ( Cluster::local_node_type() != Cluster::MANAGER )
redef have_full_data = F; redef have_full_data = F;
@endif @endif
# Internal event for cluster data distribution.
global cluster_new_item: event(item: Item);
# Primary intelligence management is done by the manager.
# The manager informs the workers about new items and item removal.
redef Cluster::manager2worker_events += /^Intel::(cluster_new_item|purge_item)$/;
# A worker queries the manager to insert, remove or indicate the match of an item.
redef Cluster::worker2manager_events += /^Intel::(cluster_new_item|remove_item|match_no_items)$/;
@if ( Cluster::local_node_type() == Cluster::MANAGER ) @if ( Cluster::local_node_type() == Cluster::MANAGER )
event bro_init()
{
Broker::subscribe(item_topic);
Broker::subscribe(match_topic);
Broker::auto_publish(indicator_topic, remove_indicator);
}
# Handling of new worker nodes. # Handling of new worker nodes.
event remote_connection_handshake_done(p: event_peer) event Cluster::node_up(name: string, id: string)
{ {
# When a worker connects, send it the complete minimal data store. # When a worker connects, send it the complete minimal data store.
# It will be kept up to date after this by the cluster_new_item event. # It will be kept up to date after this by the insert_indicator event.
if ( p$descr in Cluster::nodes && Cluster::nodes[p$descr]$node_type == Cluster::WORKER ) if ( name in Cluster::nodes && Cluster::nodes[name]$node_type == Cluster::WORKER )
{ {
send_id(p, "Intel::min_data_store"); Broker::publish_id(Cluster::node_topic(name), "Intel::min_data_store");
} }
} }
# Handling of matches triggered by worker nodes. # On the manager, the new_item event indicates a new indicator that
event Intel::match_no_items(s: Seen) &priority=5 # has to be distributed.
event Intel::new_item(item: Item) &priority=5
{
Broker::publish(indicator_topic, Intel::insert_indicator, item);
}
# Handling of item insertion triggered by remote node.
event Intel::insert_item(item: Intel::Item) &priority=5
{
Intel::_insert(item, T);
}
# Handling of item removal triggered by remote node.
event Intel::remove_item(item: Item, purge_indicator: bool) &priority=5
{
remove(item, purge_indicator);
}
# Handling of match triggered by remote node.
event Intel::match_remote(s: Seen) &priority=5
{ {
if ( Intel::find(s) ) if ( Intel::find(s) )
event Intel::match(s, Intel::get_items(s)); event Intel::match(s, Intel::get_items(s));
} }
# Handling of item removal triggered by worker nodes.
event Intel::remove_item(item: Item, purge_indicator: bool)
{
remove(item, purge_indicator);
}
@endif @endif
# Handling of item insertion. @if ( Cluster::local_node_type() == Cluster::WORKER )
event Intel::new_item(item: Intel::Item) &priority=5 event bro_init()
{ {
# The cluster manager always rebroadcasts intelligence. Broker::subscribe(indicator_topic);
# Workers redistribute it if it was locally generated.
if ( Cluster::local_node_type() == Cluster::MANAGER || Broker::auto_publish(match_topic, match_remote);
item$first_dispatch ) Broker::auto_publish(item_topic, remove_item);
{
item$first_dispatch=F;
event Intel::cluster_new_item(item);
}
} }
# Handling of item insertion by remote node. # On a worker, the new_item event requires to trigger the insertion
event Intel::cluster_new_item(item: Intel::Item) &priority=5 # on the manager to update the back-end data store.
event Intel::new_item(item: Intel::Item) &priority=5
{ {
# Ignore locally generated events to avoid event storms. Broker::publish(item_topic, Intel::insert_item, item);
if ( is_remote_event() )
Intel::insert(item);
} }
# Handling of new indicators published by the manager.
event Intel::insert_indicator(item: Intel::Item) &priority=5
{
Intel::_insert(item, F);
}
@endif

View file

@ -177,12 +177,12 @@ export {
} }
# Internal handler for matches with no metadata available. # Internal handler for matches with no metadata available.
global match_no_items: event(s: Seen); global match_remote: event(s: Seen);
# Internal events for cluster data distribution. # Internal events for (cluster) data distribution.
global new_item: event(item: Item); global new_item: event(item: Item);
global remove_item: event(item: Item, purge_indicator: bool); global remove_item: event(item: Item, purge_indicator: bool);
global purge_item: event(item: Item); global remove_indicator: event(item: Item);
# Optionally store metadata. This is used internally depending on # Optionally store metadata. This is used internally depending on
# if this is a cluster deployment or not. # if this is a cluster deployment or not.
@ -357,7 +357,7 @@ function Intel::seen(s: Seen)
} }
else else
{ {
event Intel::match_no_items(s); event Intel::match_remote(s);
} }
} }
} }
@ -389,9 +389,11 @@ hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=5
} }
} }
function insert(item: Item) # Function to insert metadata of an item. The function returns T
# if the given indicator is new.
function insert_meta_data(item: Item): bool
{ {
# Create and fill out the metadata item. # Prepare the metadata entry.
local meta = item$meta; local meta = item$meta;
local meta_tbl: table [string] of MetaData; local meta_tbl: table [string] of MetaData;
local is_new: bool = T; local is_new: bool = T;
@ -399,11 +401,11 @@ function insert(item: Item)
# All intelligence is case insensitive at the moment. # All intelligence is case insensitive at the moment.
local lower_indicator = to_lower(item$indicator); local lower_indicator = to_lower(item$indicator);
if ( item$indicator_type == ADDR ) switch ( item$indicator_type )
{ {
case ADDR:
local host = to_addr(item$indicator); local host = to_addr(item$indicator);
if ( have_full_data )
{
if ( host !in data_store$host_data ) if ( host !in data_store$host_data )
data_store$host_data[host] = table(); data_store$host_data[host] = table();
else else
@ -414,15 +416,10 @@ function insert(item: Item)
} }
meta_tbl = data_store$host_data[host]; meta_tbl = data_store$host_data[host];
} break;
case SUBNET:
add min_data_store$host_data[host];
}
else if ( item$indicator_type == SUBNET )
{
local net = to_subnet(item$indicator); local net = to_subnet(item$indicator);
if ( have_full_data )
{
if ( !check_subnet(net, data_store$subnet_data) ) if ( !check_subnet(net, data_store$subnet_data) )
data_store$subnet_data[net] = table(); data_store$subnet_data[net] = table();
else else
@ -433,14 +430,8 @@ function insert(item: Item)
} }
meta_tbl = data_store$subnet_data[net]; meta_tbl = data_store$subnet_data[net];
} break;
default:
add min_data_store$subnet_data[net];
}
else
{
if ( have_full_data )
{
if ( [lower_indicator, item$indicator_type] !in data_store$string_data ) if ( [lower_indicator, item$indicator_type] !in data_store$string_data )
data_store$string_data[lower_indicator, item$indicator_type] = table(); data_store$string_data[lower_indicator, item$indicator_type] = table();
else else
@ -452,23 +443,59 @@ function insert(item: Item)
} }
meta_tbl = data_store$string_data[lower_indicator, item$indicator_type]; meta_tbl = data_store$string_data[lower_indicator, item$indicator_type];
break;
} }
# Insert new metadata or update if already present.
meta_tbl[meta$source] = meta;
return is_new;
}
# Function to encapsulate insertion logic. The first_dispatch parameter
# indicates whether the item might be new for other nodes.
function _insert(item: Item, first_dispatch: bool &default = T)
{
# Assume that the item is new by default.
local is_new: bool = T;
# All intelligence is case insensitive at the moment.
local lower_indicator = to_lower(item$indicator);
# Insert indicator into MinDataStore (might exist already).
switch ( item$indicator_type )
{
case ADDR:
local host = to_addr(item$indicator);
add min_data_store$host_data[host];
break;
case SUBNET:
local net = to_subnet(item$indicator);
add min_data_store$subnet_data[net];
break;
default:
add min_data_store$string_data[lower_indicator, item$indicator_type]; add min_data_store$string_data[lower_indicator, item$indicator_type];
break;
} }
if ( have_full_data ) if ( have_full_data )
{ {
# Insert new metadata or update if already present # Insert new metadata or update if already present.
meta_tbl[meta$source] = meta; is_new = insert_meta_data(item);
} }
if ( is_new ) if ( first_dispatch && is_new )
# Trigger insert for cluster in case the item is new # Announce a (possibly) new item if this is the first dispatch and
# or insert was called on a worker # we know it is new or have to assume that on a worker.
event Intel::new_item(item); event Intel::new_item(item);
} }
function insert(item: Item)
{
# Insert possibly new item.
_insert(item, T);
}
# Function to check whether an item is present. # Function to check whether an item is present.
function item_exists(item: Item): bool function item_exists(item: Item): bool
{ {
@ -549,12 +576,12 @@ function remove(item: Item, purge_indicator: bool)
break; break;
} }
# Trigger deletion in minimal data stores # Trigger deletion in minimal data stores
event Intel::purge_item(item); event Intel::remove_indicator(item);
} }
} }
# Handling of indicator removal in minimal data stores. # Handling of indicator removal in minimal data stores.
event purge_item(item: Item) event remove_indicator(item: Item)
{ {
switch ( item$indicator_type ) switch ( item$indicator_type )
{ {
@ -571,4 +598,3 @@ event purge_item(item: Item)
break; break;
} }
} }

View file

@ -225,9 +225,22 @@ global blocks: table[addr] of BlockInfo = {}
@if ( Cluster::is_enabled() ) @if ( Cluster::is_enabled() )
@load base/frameworks/cluster
redef Cluster::manager2worker_events += /NetControl::catch_release_block_(new|delete)/; @if ( Cluster::local_node_type() == Cluster::MANAGER )
redef Cluster::worker2manager_events += /NetControl::catch_release_(add|delete|encountered)/; event bro_init()
{
Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_new);
Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_delete);
}
@else
event bro_init()
{
Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_add);
Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_delete);
Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_encountered);
}
@endif
@endif @endif
function cr_check_rule(r: Rule): bool function cr_check_rule(r: Rule): bool

View file

@ -16,10 +16,25 @@ export {
global cluster_netcontrol_delete_rule: event(id: string, reason: string); global cluster_netcontrol_delete_rule: event(id: string, reason: string);
} }
## Workers need ability to forward commands to manager. @if ( Cluster::local_node_type() == Cluster::MANAGER )
redef Cluster::worker2manager_events += /NetControl::cluster_netcontrol_(add|remove|delete)_rule/; event bro_init()
## Workers need to see the result events from the manager. {
redef Cluster::manager2worker_events += /NetControl::rule_(added|removed|timeout|error|exists|new|destroyed)/; Broker::auto_publish(Cluster::worker_topic, NetControl::rule_added);
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_removed);
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_timeout);
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_error);
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_exists);
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_new);
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_destroyed);
}
@else
event bro_init()
{
Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_add_rule);
Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_remove_rule);
Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_delete_rule);
}
@endif
function activate(p: PluginState, priority: int) function activate(p: PluginState, priority: int)
{ {

View file

@ -6,8 +6,6 @@ module NetControl;
@load ../plugin @load ../plugin
@load base/frameworks/broker @load base/frameworks/broker
@ifdef ( Broker::__enable )
export { export {
type AclRule : record { type AclRule : record {
command: string; command: string;
@ -243,7 +241,7 @@ function acld_add_rule_fun(p: PluginState, r: Rule) : bool
if ( ar$command == "" ) if ( ar$command == "" )
return F; return F;
Broker::send_event(p$acld_config$acld_topic, Broker::event_args(acld_add_rule, p$acld_id, r, ar)); Broker::publish(p$acld_config$acld_topic, acld_add_rule, p$acld_id, r, ar);
return T; return T;
} }
@ -266,19 +264,20 @@ function acld_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool
ar$comment = reason; ar$comment = reason;
} }
Broker::send_event(p$acld_config$acld_topic, Broker::event_args(acld_remove_rule, p$acld_id, r, ar)); Broker::publish(p$acld_config$acld_topic, acld_remove_rule, p$acld_id, r, ar);
return T; return T;
} }
function acld_init(p: PluginState) function acld_init(p: PluginState)
{ {
Broker::enable(); Broker::peer(cat(p$acld_config$acld_host), p$acld_config$acld_port);
Broker::connect(cat(p$acld_config$acld_host), p$acld_config$acld_port, 1sec); Broker::subscribe(p$acld_config$acld_topic);
Broker::subscribe_to_events(p$acld_config$acld_topic);
} }
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string) event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{ {
local peer_address = cat(endpoint$network$address);
local peer_port = endpoint$network$bound_port;
if ( [peer_port, peer_address] !in netcontrol_acld_peers ) if ( [peer_port, peer_address] !in netcontrol_acld_peers )
# ok, this one was none of ours... # ok, this one was none of ours...
return; return;
@ -315,5 +314,3 @@ function create_acld(config: AcldConfig) : PluginState
return p; return p;
} }
@endif

View file

@ -8,8 +8,6 @@ module NetControl;
@load ../plugin @load ../plugin
@load base/frameworks/broker @load base/frameworks/broker
@ifdef ( Broker::__enable )
export { export {
## This record specifies the configuration that is passed to :bro:see:`NetControl::create_broker`. ## This record specifies the configuration that is passed to :bro:see:`NetControl::create_broker`.
type BrokerConfig: record { type BrokerConfig: record {
@ -151,7 +149,7 @@ function broker_add_rule_fun(p: PluginState, r: Rule) : bool
if ( ! broker_check_rule(p, r) ) if ( ! broker_check_rule(p, r) )
return F; return F;
Broker::send_event(p$broker_config$topic, Broker::event_args(broker_add_rule, p$broker_id, r)); Broker::publish(p$broker_config$topic, Broker::make_event(broker_add_rule, p$broker_id, r));
return T; return T;
} }
@ -160,19 +158,20 @@ function broker_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool
if ( ! broker_check_rule(p, r) ) if ( ! broker_check_rule(p, r) )
return F; return F;
Broker::send_event(p$broker_config$topic, Broker::event_args(broker_remove_rule, p$broker_id, r, reason)); Broker::publish(p$broker_config$topic, Broker::make_event(broker_remove_rule, p$broker_id, r, reason));
return T; return T;
} }
function broker_init(p: PluginState) function broker_init(p: PluginState)
{ {
Broker::enable(); Broker::peer(cat(p$broker_config$host), p$broker_config$bport);
Broker::connect(cat(p$broker_config$host), p$broker_config$bport, 1sec); Broker::subscribe(p$broker_config$topic);
Broker::subscribe_to_events(p$broker_config$topic);
} }
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string) event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{ {
local peer_address = cat(endpoint$network$address);
local peer_port = endpoint$network$bound_port;
if ( [peer_port, peer_address] !in netcontrol_broker_peers ) if ( [peer_port, peer_address] !in netcontrol_broker_peers )
return; return;
@ -219,5 +218,3 @@ function create_broker(config: BrokerConfig, can_expire: bool) : PluginState
return p; return p;
} }
@endif

View file

@ -8,14 +8,5 @@
@load ./actions/page @load ./actions/page
@load ./actions/add-geodata @load ./actions/add-geodata
# The cluster framework must be loaded first.
@load base/frameworks/cluster
@if ( Cluster::is_enabled() )
@load ./cluster
@else
@load ./non-cluster
@endif
# Load here so that it can check whether clustering is enabled. # Load here so that it can check whether clustering is enabled.
@load ./actions/pp-alarms @load ./actions/pp-alarms

View file

@ -156,8 +156,10 @@ function pretty_print_alarm(out: file, n: Info)
@if ( Cluster::is_enabled() ) @if ( Cluster::is_enabled() )
pdescr = "local"; pdescr = "local";
if ( n?$src_peer ) if ( n?$peer_descr )
pdescr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host); pdescr = n$peer_descr;
else if ( n?$peer_name )
pdescr = n$peer_name;
pdescr = fmt("<%s> ", pdescr); pdescr = fmt("<%s> ", pdescr);
@endif @endif

View file

@ -1,53 +0,0 @@
##! Implements notice functionality across clusters. Worker nodes
##! will disable notice/alarm logging streams and forward notice
##! events to the manager node for logging/processing.
@load ./main
@load base/frameworks/cluster
module Notice;
export {
## This is the event used to transport notices on the cluster.
##
## n: The notice information to be sent to the cluster manager for
## further processing.
global cluster_notice: event(n: Notice::Info);
}
## Manager can communicate notice suppression to workers.
redef Cluster::manager2worker_events += /Notice::begin_suppression/;
## Workers need ability to forward notices to manager.
redef Cluster::worker2manager_events += /Notice::cluster_notice/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type, identifier: string)
{
local suppress_until = ts + suppress_for;
suppressing[note, identifier] = suppress_until;
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event Notice::cluster_notice(n: Notice::Info)
{
# Raise remotely received notices on the manager
NOTICE(n);
}
@endif
module GLOBAL;
## This is the entry point in the global namespace for the notice framework.
function NOTICE(n: Notice::Info)
{
# Suppress this notice if necessary.
if ( Notice::is_being_suppressed(n) )
return;
if ( Cluster::local_node_type() == Cluster::MANAGER )
Notice::internal_NOTICE(n);
else
# For non-managers, send the notice on to the manager.
event Notice::cluster_notice(n);
}

View file

@ -4,6 +4,8 @@
##! what is bad activity for sites. More extensive documentation about using ##! what is bad activity for sites. More extensive documentation about using
##! the notice framework can be found in :doc:`/frameworks/notice`. ##! the notice framework can be found in :doc:`/frameworks/notice`.
@load base/frameworks/cluster
module Notice; module Notice;
export { export {
@ -117,9 +119,10 @@ export {
## Associated count, or perhaps a status code. ## Associated count, or perhaps a status code.
n: count &log &optional; n: count &log &optional;
## Peer that raised this notice. ## Name of remote peer that raised this notice.
src_peer: event_peer &optional; peer_name: string &optional;
## Textual description for the peer that raised this notice. ## Textual description for the peer that raised this notice,
## including name, host address and port.
peer_descr: string &log &optional; peer_descr: string &log &optional;
## The actions which have been applied to this notice. ## The actions which have been applied to this notice.
@ -316,8 +319,36 @@ export {
## ##
## n: The record of notice data. ## n: The record of notice data.
global internal_NOTICE: function(n: Notice::Info); global internal_NOTICE: function(n: Notice::Info);
## This is the event used to transport notices on the cluster.
##
## n: The notice information to be sent to the cluster manager for
## further processing.
global cluster_notice: event(n: Notice::Info);
} }
module GLOBAL;
function NOTICE(n: Notice::Info)
{
if ( Notice::is_being_suppressed(n) )
return;
@if ( Cluster::is_enabled() )
if ( Cluster::local_node_type() == Cluster::MANAGER )
Notice::internal_NOTICE(n);
else
{
n$peer_name = n$peer_descr = Cluster::node;
Broker::publish(Cluster::manager_topic, Notice::cluster_notice, n);
}
@else
Notice::internal_NOTICE(n);
@endif
}
module Notice;
# This is used as a hack to implement per-item expiration intervals. # This is used as a hack to implement per-item expiration intervals.
function per_notice_suppression_interval(t: table[Notice::Type, string] of time, idx: any): interval function per_notice_suppression_interval(t: table[Notice::Type, string] of time, idx: any): interval
{ {
@ -368,24 +399,6 @@ event bro_init() &priority=5
$interv=24hrs, $postprocessor=log_mailing_postprocessor]); $interv=24hrs, $postprocessor=log_mailing_postprocessor]);
} }
# TODO: fix this.
#function notice_tags(n: Notice::Info) : table[string] of string
# {
# local tgs: table[string] of string = table();
# if ( is_remote_event() )
# {
# if ( n$src_peer$descr != "" )
# tgs["es"] = n$src_peer$descr;
# else
# tgs["es"] = fmt("%s/%s", n$src_peer$host, n$src_peer$p);
# }
# else
# {
# tgs["es"] = peer_description;
# }
# return tgs;
# }
function email_headers(subject_desc: string, dest: string): string function email_headers(subject_desc: string, dest: string): string
{ {
local header_text = string_cat( local header_text = string_cat(
@ -507,12 +520,26 @@ hook Notice::notice(n: Notice::Info) &priority=-5
[n$note, n$identifier] !in suppressing && [n$note, n$identifier] !in suppressing &&
n$suppress_for != 0secs ) n$suppress_for != 0secs )
{ {
local suppress_until = n$ts + n$suppress_for;
suppressing[n$note, n$identifier] = suppress_until;
event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier); event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
} }
} }
event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type,
identifier: string)
{
local suppress_until = ts + suppress_for;
suppressing[note, identifier] = suppress_until;
}
event bro_init()
{
if ( ! Cluster::is_enabled() )
return;
Broker::auto_publish(Cluster::worker_topic, Notice::begin_suppression);
Broker::auto_publish(Cluster::proxy_topic, Notice::begin_suppression);
}
function is_being_suppressed(n: Notice::Info): bool function is_being_suppressed(n: Notice::Info): bool
{ {
if ( n?$identifier && [n$note, n$identifier] in suppressing ) if ( n?$identifier && [n$note, n$identifier] in suppressing )
@ -612,12 +639,6 @@ function apply_policy(n: Notice::Info)
n$dst = n$iconn$resp_h; n$dst = n$iconn$resp_h;
} }
if ( ! n?$src_peer )
n$src_peer = get_event_peer();
if ( ! n?$peer_descr )
n$peer_descr = n$src_peer?$descr ?
n$src_peer$descr : fmt("%s", n$src_peer$host);
if ( ! n?$email_body_sections ) if ( ! n?$email_body_sections )
n$email_body_sections = vector(); n$email_body_sections = vector();
if ( ! n?$email_delay_tokens ) if ( ! n?$email_delay_tokens )
@ -652,6 +673,7 @@ function internal_NOTICE(n: Notice::Info)
hook Notice::notice(n); hook Notice::notice(n);
} }
module GLOBAL; event Notice::cluster_notice(n: Notice::Info)
{
global NOTICE: function(n: Notice::Info); NOTICE(n);
}

View file

@ -1,14 +0,0 @@
@load ./main
module GLOBAL;
## This is the entry point in the global namespace for the notice framework.
function NOTICE(n: Notice::Info)
{
# Suppress this notice if necessary.
if ( Notice::is_being_suppressed(n) )
return;
Notice::internal_NOTICE(n);
}

View file

@ -13,8 +13,14 @@ export {
global cluster_flow_clear: event(name: string); global cluster_flow_clear: event(name: string);
} }
## Workers need ability to forward commands to manager. @if ( Cluster::local_node_type() != Cluster::MANAGER )
redef Cluster::worker2manager_events += /OpenFlow::cluster_flow_(mod|clear)/; # Workers need ability to forward commands to manager.
event bro_init()
{
Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_mod);
Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_clear);
}
@endif
# the flow_mod function wrapper # the flow_mod function wrapper
function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool

View file

@ -5,8 +5,6 @@
module OpenFlow; module OpenFlow;
@ifdef ( Broker::__enable )
export { export {
redef enum Plugin += { redef enum Plugin += {
BROKER, BROKER,
@ -49,27 +47,28 @@ function broker_describe(state: ControllerState): string
function broker_flow_mod_fun(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool function broker_flow_mod_fun(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
{ {
Broker::send_event(state$broker_topic, Broker::event_args(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod)); Broker::publish(state$broker_topic, Broker::make_event(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod));
return T; return T;
} }
function broker_flow_clear_fun(state: OpenFlow::ControllerState): bool function broker_flow_clear_fun(state: OpenFlow::ControllerState): bool
{ {
Broker::send_event(state$broker_topic, Broker::event_args(broker_flow_clear, state$_name, state$broker_dpid)); Broker::publish(state$broker_topic, Broker::make_event(broker_flow_clear, state$_name, state$broker_dpid));
return T; return T;
} }
function broker_init(state: OpenFlow::ControllerState) function broker_init(state: OpenFlow::ControllerState)
{ {
Broker::enable(); Broker::peer(cat(state$broker_host), state$broker_port);
Broker::connect(cat(state$broker_host), state$broker_port, 1sec); Broker::subscribe(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker.
Broker::subscribe_to_events(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker.
} }
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string) event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
{ {
local peer_address = cat(endpoint$network$address);
local peer_port = endpoint$network$bound_port;
if ( [peer_port, peer_address] !in broker_peers ) if ( [peer_port, peer_address] !in broker_peers )
# ok, this one was none of ours... # ok, this one was none of ours...
return; return;
@ -94,5 +93,3 @@ function broker_new(name: string, host: addr, host_port: port, topic: string, dp
return c; return c;
} }
@endif

View file

@ -6,6 +6,7 @@
@load base/utils/directions-and-hosts @load base/utils/directions-and-hosts
@load base/utils/numbers @load base/utils/numbers
@load base/frameworks/cluster
module Software; module Software;
@ -70,6 +71,7 @@ export {
const asset_tracking = LOCAL_HOSTS &redef; const asset_tracking = LOCAL_HOSTS &redef;
## Other scripts should call this function when they detect software. ## Other scripts should call this function when they detect software.
##
## id: The connection id where the software was discovered. ## id: The connection id where the software was discovered.
## ##
## info: A record representing the software discovered. ## info: A record representing the software discovered.
@ -102,15 +104,21 @@ export {
## The set of software associated with an address. Data expires from ## The set of software associated with an address. Data expires from
## this table after one day by default so that a detected piece of ## this table after one day by default so that a detected piece of
## software will be logged once each day. ## software will be logged once each day. In a cluster, this table is
global tracked: table[addr] of SoftwareSet ## uniformly distributed among proxy nodes.
&create_expire=1day global tracked: table[addr] of SoftwareSet &create_expire=1day;
&synchronized
&redef;
## This event can be handled to access the :bro:type:`Software::Info` ## This event can be handled to access the :bro:type:`Software::Info`
## record as it is sent on to the logging framework. ## record as it is sent on to the logging framework.
global log_software: event(rec: Info); global log_software: event(rec: Info);
## This event can be handled to access software information whenever it's
## version is found to have changed.
global version_change: event(old: Info, new: Info);
## This event is raised when software is about to be registered for
## tracking in :bro:see:`Software::tracked`.
global register: event(info: Info);
} }
event bro_init() &priority=5 event bro_init() &priority=5
@ -437,40 +445,43 @@ function software_fmt(i: Info): string
return fmt("%s %s", i$name, software_fmt_version(i$version)); return fmt("%s %s", i$name, software_fmt_version(i$version));
} }
# Insert a mapping into the table event Software::register(info: Info)
# Overides old entries for the same software and generates events if needed.
event register(id: conn_id, info: Info)
{ {
# Host already known? local ts: SoftwareSet;
if ( info$host !in tracked )
tracked[info$host] = table(); if ( info$host in tracked )
ts = tracked[info$host];
else
ts = tracked[info$host] = SoftwareSet();
local ts = tracked[info$host];
# Software already registered for this host? We don't want to endlessly # Software already registered for this host? We don't want to endlessly
# log the same thing. # log the same thing.
if ( info$name in ts ) if ( info$name in ts )
{ {
local old = ts[info$name]; local old = ts[info$name];
local changed = cmp_versions(old$version, info$version) != 0;
if ( changed )
event Software::version_change(old, info);
else if ( ! info$force_log )
# If the version hasn't changed, then we're just redetecting the # If the version hasn't changed, then we're just redetecting the
# same thing, then we don't care. This results in no extra logging. # same thing, then we don't care.
# But if the $force_log value is set then we'll continue.
if ( ! info$force_log && cmp_versions(old$version, info$version) == 0 )
return; return;
} }
ts[info$name] = info;
ts[info$name] = info;
Log::write(Software::LOG, info); Log::write(Software::LOG, info);
} }
function found(id: conn_id, info: Info): bool function found(id: conn_id, info: Info): bool
{ {
if ( info$force_log || addr_matches_host(info$host, asset_tracking) ) if ( ! info$force_log && ! addr_matches_host(info$host, asset_tracking) )
{ return F;
if ( ! info?$ts ) if ( ! info?$ts )
info$ts = network_time(); info$ts = network_time();
if ( info?$version ) # we have a version number and don't have to parse. check if the name is also set... if ( info?$version )
{ {
if ( ! info?$name ) if ( ! info?$name )
{ {
@ -478,22 +489,26 @@ function found(id: conn_id, info: Info): bool
return F; return F;
} }
} }
else # no version present, we have to parse... else if ( ! info?$unparsed_version )
{
if ( !info?$unparsed_version )
{ {
Reporter::error("No unparsed version string present in Info record with version in Software::found"); Reporter::error("No unparsed version string present in Info record with version in Software::found");
return F; return F;
} }
if ( ! info?$version )
{
local sw = parse(info$unparsed_version); local sw = parse(info$unparsed_version);
info$unparsed_version = sw$unparsed_version; info$unparsed_version = sw$unparsed_version;
info$name = sw$name; info$name = sw$name;
info$version = sw$version; info$version = sw$version;
} }
event register(id, info); @if ( Cluster::is_enabled() )
Cluster::publish_hrw(Cluster::proxy_pool, info$host, Software::register,
info);
@else
event Software::register(info);
@endif
return T; return T;
} }
else
return F;
}

View file

@ -55,18 +55,20 @@ export {
global cluster_threshold_crossed: event(ss_name: string, key: SumStats::Key, thold_index: count); global cluster_threshold_crossed: event(ss_name: string, key: SumStats::Key, thold_index: count);
} }
# Add events to the cluster framework to make this work.
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|get_result|threshold_crossed)/;
redef Cluster::manager2worker_events += /SumStats::(get_a_key)/;
redef Cluster::worker2manager_events += /SumStats::cluster_(send_result|key_intermediate_response)/;
redef Cluster::worker2manager_events += /SumStats::(send_a_key|send_no_key)/;
# This variable is maintained to know what keys have recently sent or received # This variable is maintained to know what keys have recently sent or received
# intermediate updates so they don't overwhelm the manager. # intermediate updates so they don't overwhelm the manager.
global recent_global_view_keys: set[string, Key] &create_expire=1min; global recent_global_view_keys: set[string, Key] &create_expire=1min;
@if ( Cluster::local_node_type() != Cluster::MANAGER ) @if ( Cluster::local_node_type() != Cluster::MANAGER )
event bro_init() &priority=100
{
Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_send_result);
Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_key_intermediate_response);
Broker::auto_publish(Cluster::manager_topic, SumStats::send_a_key);
Broker::auto_publish(Cluster::manager_topic, SumStats::send_no_key);
}
# Result tables indexed on a uid that are currently being sent to the # Result tables indexed on a uid that are currently being sent to the
# manager. # manager.
global sending_results: table[string] of ResultTable = table() &read_expire=1min; global sending_results: table[string] of ResultTable = table() &read_expire=1min;
@ -207,6 +209,14 @@ function request_key(ss_name: string, key: Key): Result
@if ( Cluster::local_node_type() == Cluster::MANAGER ) @if ( Cluster::local_node_type() == Cluster::MANAGER )
event bro_init() &priority=100
{
Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_ss_request);
Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_get_result);
Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_threshold_crossed);
Broker::auto_publish(Cluster::worker_topic, SumStats::get_a_key);
}
# This variable is maintained by manager nodes as they collect and aggregate # This variable is maintained by manager nodes as they collect and aggregate
# results. # results.
# Index on a uid. # Index on a uid.

View file

@ -603,6 +603,29 @@ type ThreadStats: record {
num_threads: count; num_threads: count;
}; };
## Statistics about Broker communication.
##
## .. bro:see:: get_broker_stats
type BrokerStats: record {
num_peers: count;
## Number of active data stores.
num_stores: count;
## Number of pending data store queries.
num_pending_queries: count;
## Number of total log messages received.
num_events_incoming: count;
## Number of total log messages sent.
num_events_outgoing: count;
## Number of total log records received.
num_logs_incoming: count;
## Number of total log records sent.
num_logs_outgoing: count;
## Number of total identifiers received.
num_ids_incoming: count;
## Number of total identifiers sent.
num_ids_outgoing: count;
};
## Deprecated. ## Deprecated.
## ##
## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere ## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere
@ -737,7 +760,7 @@ type IPAddrAnonymizationClass: enum {
## A locally unique ID identifying a communication peer. The ID is returned by ## A locally unique ID identifying a communication peer. The ID is returned by
## :bro:id:`connect`. ## :bro:id:`connect`.
## ##
## .. bro:see:: connect Communication ## .. bro:see:: connect
type peer_id: count; type peer_id: count;
## A communication peer. ## A communication peer.
@ -760,7 +783,7 @@ type event_peer: record {
p: port; p: port;
is_local: bool; ##< True if this record describes the local process. is_local: bool; ##< True if this record describes the local process.
descr: string; ##< The peer's :bro:see:`peer_description`. descr: string; ##< The peer's :bro:see:`peer_description`.
class: string &optional; ##< The self-assigned *class* of the peer. See :bro:see:`Communication::Node`. class: string &optional; ##< The self-assigned *class* of the peer.
}; };
## Deprecated. ## Deprecated.
@ -4783,6 +4806,11 @@ export {
const max_frag_data = 30000 &redef; const max_frag_data = 30000 &redef;
} }
module Cluster;
export {
type Cluster::Pool: record {};
}
module GLOBAL; module GLOBAL;
## Seed for hashes computed internally for probabilistic data structures. Using ## Seed for hashes computed internally for probabilistic data structures. Using
@ -4797,8 +4825,8 @@ const bits_per_uid: count = 96 &redef;
# Load these frameworks here because they use fairly deep integration with # Load these frameworks here because they use fairly deep integration with
# BiFs and script-land defined types. # BiFs and script-land defined types.
@load base/frameworks/broker
@load base/frameworks/logging @load base/frameworks/logging
@load base/frameworks/broker
@load base/frameworks/input @load base/frameworks/input
@load base/frameworks/analyzer @load base/frameworks/analyzer
@load base/frameworks/files @load base/frameworks/files

View file

@ -14,6 +14,7 @@
@load base/utils/exec @load base/utils/exec
@load base/utils/files @load base/utils/files
@load base/utils/geoip-distance @load base/utils/geoip-distance
@load base/utils/hash_hrw
@load base/utils/numbers @load base/utils/numbers
@load base/utils/paths @load base/utils/paths
@load base/utils/patterns @load base/utils/patterns
@ -32,7 +33,6 @@
@load base/frameworks/signatures @load base/frameworks/signatures
@load base/frameworks/packet-filter @load base/frameworks/packet-filter
@load base/frameworks/software @load base/frameworks/software
@load base/frameworks/communication
@load base/frameworks/control @load base/frameworks/control
@load base/frameworks/cluster @load base/frameworks/cluster
@load base/frameworks/intel @load base/frameworks/intel

View file

@ -116,8 +116,12 @@ event bro_init() &priority=5
Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports); Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports);
} }
# Setup the clusterized config that is needed to tie messages together on a cluster. @if ( Cluster::is_enabled() )
redef Cluster::worker2manager_events += /DHCP::aggregate_msgs/; event bro_init()
{
Broker::auto_publish(Cluster::manager_topic, DHCP::aggregate_msgs);
}
@endif
function join_data_expiration(t: table[count] of Info, idx: count): interval function join_data_expiration(t: table[count] of Info, idx: count): interval
{ {

View file

@ -11,6 +11,7 @@
@load ./main @load ./main
@load base/utils/files @load base/utils/files
@load base/frameworks/cluster
module IRC; module IRC;
@ -23,9 +24,33 @@ export {
## Sniffed mime type of the file. ## Sniffed mime type of the file.
dcc_mime_type: string &log &optional; dcc_mime_type: string &log &optional;
}; };
## The broker topic name to which expected DCC transfer updates are
## relayed.
const dcc_transfer_update_topic = "bro/irc/dcc_transfer_update" &redef;
} }
global dcc_expected_transfers: table[addr, port] of Info &synchronized &read_expire=5mins; global dcc_expected_transfers: table[addr, port] of Info &read_expire=5mins;
event dcc_transfer_add(host: addr, p: port, info: Info)
{
dcc_expected_transfers[host, p] = info;
Analyzer::schedule_analyzer(0.0.0.0, host, p,
Analyzer::ANALYZER_IRC_DATA, 5 min);
}
event dcc_transfer_remove(host: addr, p: port)
{
delete dcc_expected_transfers[host, p];
}
event bro_init()
{
local lnt = Cluster::local_node_type();
if ( lnt == Cluster::WORKER )
Broker::subscribe(dcc_transfer_update_topic);
}
function log_dcc(f: fa_file) function log_dcc(f: fa_file)
{ {
@ -51,6 +76,9 @@ function log_dcc(f: fa_file)
delete irc$dcc_mime_type; delete irc$dcc_mime_type;
delete dcc_expected_transfers[cid$resp_h, cid$resp_p]; delete dcc_expected_transfers[cid$resp_h, cid$resp_p];
Cluster::relay_rr(Cluster::proxy_pool, dcc_transfer_update_topic,
dcc_transfer_update_topic, dcc_transfer_remove,
cid$resp_h, cid$resp_p);
return; return;
} }
} }
@ -74,6 +102,9 @@ event irc_dcc_message(c: connection, is_orig: bool,
local p = count_to_port(dest_port, tcp); local p = count_to_port(dest_port, tcp);
Analyzer::schedule_analyzer(0.0.0.0, address, p, Analyzer::ANALYZER_IRC_DATA, 5 min); Analyzer::schedule_analyzer(0.0.0.0, address, p, Analyzer::ANALYZER_IRC_DATA, 5 min);
dcc_expected_transfers[address, p] = c$irc; dcc_expected_transfers[address, p] = c$irc;
Cluster::relay_rr(Cluster::proxy_pool, dcc_transfer_update_topic,
dcc_transfer_update_topic, dcc_transfer_add,
address, p, c$irc);
} }
event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10 event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10
@ -86,5 +117,10 @@ event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10
event connection_state_remove(c: connection) &priority=-5 event connection_state_remove(c: connection) &priority=-5
{ {
if ( [c$id$resp_h, c$id$resp_p] in dcc_expected_transfers ) if ( [c$id$resp_h, c$id$resp_p] in dcc_expected_transfers )
{
delete dcc_expected_transfers[c$id$resp_h, c$id$resp_p]; delete dcc_expected_transfers[c$id$resp_h, c$id$resp_p];
Cluster::relay_rr(Cluster::proxy_pool, dcc_transfer_update_topic,
dcc_transfer_update_topic, dcc_transfer_remove,
c$id$resp_h, c$id$resp_p);
}
} }

View file

@ -0,0 +1,76 @@
##! An implementation of highest random weight (HRW) hashing, also called
##! rendezvous hashing. See
##! `<https://en.wikipedia.org/wiki/Rendezvous_hashing>`_.
module HashHRW;
export {
## A site/node is a unique location to which you want a subset of keys
## to be distributed.
type Site: record {
## A unique identifier for the site, should not exceed what
## can be contained in a 32-bit integer.
id: count;
## Other data to associate with the site.
user_data: any &optional;
};
## A table of sites, indexed by their id.
type SiteTable: table[count] of Site;
## A collection of sites to distribute keys across.
type Pool: record {
sites: SiteTable &default=SiteTable();
};
## Add a site to a pool.
##
## Returns: F is the site is already in the pool, else T.
global add_site: function(pool: Pool, site: Site): bool;
## Remove a site from a pool.
##
## Returns: F if the site is not in the pool, else T.
global rem_site: function(pool: Pool, site: Site): bool;
## Returns: the site to which the key maps.
global get_site: function(pool: Pool, key: any): Site;
}
function add_site(pool: Pool, site: Site): bool
{
if ( site$id in pool$sites )
return F;
pool$sites[site$id] = site;
return T;
}
function rem_site(pool: Pool, site: Site): bool
{
if ( site$id !in pool$sites )
return F;
delete pool$sites[site$id];
return T;
}
function get_site(pool: Pool, key: any): Site
{
local best_site_id = 0;
local best_weight = -1;
local d = fnv1a32(key);
for ( site_id in pool$sites )
{
local w = hrw_weight(d, site_id);
if ( w > best_weight || (w == best_weight && site_id > best_site_id) )
{
best_weight = w;
best_site_id = site_id;
}
}
return pool$sites[best_site_id];
}

View file

@ -2,7 +2,6 @@
# Scripts which are commented out in test-all-policy.bro. # Scripts which are commented out in test-all-policy.bro.
@load protocols/ssl/notary.bro @load protocols/ssl/notary.bro
@load frameworks/communication/listen.bro
@load frameworks/control/controllee.bro @load frameworks/control/controllee.bro
@load frameworks/control/controller.bro @load frameworks/control/controller.bro
@load frameworks/files/extract-all-files.bro @load frameworks/files/extract-all-files.bro

View file

@ -1,13 +0,0 @@
##! Loading this script will make the Bro instance listen for remote
##! Bro instances to connect.
@load base/frameworks/communication
module Communication;
event bro_init() &priority=-10
{
enable_communication();
listen(listen_interface, listen_port, listen_ssl, listen_ipv6,
listen_ipv6_zone_id, listen_retry);
}

View file

@ -8,12 +8,28 @@
##! bro <scripts> frameworks/control/controllee ##! bro <scripts> frameworks/control/controllee
@load base/frameworks/control @load base/frameworks/control
# If an instance is a controllee, it implicitly needs to listen for remote @load base/frameworks/broker
# connections.
@load frameworks/communication/listen
module Control; module Control;
event bro_init() &priority=-10
{
Broker::subscribe(Control::topic_prefix);
Broker::auto_publish(Control::topic_prefix + "/id_value_response",
Control::id_value_response);
Broker::auto_publish(Control::topic_prefix + "/peer_status_response",
Control::peer_status_response);
Broker::auto_publish(Control::topic_prefix + "/net_stats_response",
Control::net_stats_response);
Broker::auto_publish(Control::topic_prefix + "/configuration_update_response",
Control::configuration_update_response);
Broker::auto_publish(Control::topic_prefix + "/shutdown_response",
Control::shutdown_response);
if ( Control::controllee_listen )
Broker::listen();
}
event Control::id_value_request(id: string) event Control::id_value_request(id: string)
{ {
local val = lookup_ID(id); local val = lookup_ID(id);
@ -23,14 +39,18 @@ event Control::id_value_request(id: string)
event Control::peer_status_request() event Control::peer_status_request()
{ {
local status = ""; local status = "";
for ( p in Communication::nodes )
{
local peer = Communication::nodes[p];
if ( ! peer$connected )
next;
status += fmt("%.6f peer=%s host=%s\n", # @todo: need to expose broker::endpoint::peers and broker::peer_status
network_time(), peer$peer$descr, peer$host); local peers = Broker::peers();
for ( i in peers )
{
local bpeer = peers[i];
status += fmt("%.6f peer=%s host=%s status=%s\n",
network_time(),
bpeer$peer$id,
bpeer$peer$network$address,
bpeer$status);
} }
event Control::peer_status_response(status); event Control::peer_status_response(status);
@ -61,5 +81,5 @@ event Control::shutdown_request()
# Send the acknowledgement event. # Send the acknowledgement event.
event Control::shutdown_response(); event Control::shutdown_response();
# Schedule the shutdown to let the current event queue flush itself first. # Schedule the shutdown to let the current event queue flush itself first.
event terminate_event(); schedule 1sec { terminate_event() };
} }

View file

@ -7,7 +7,7 @@
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::host_port=<host_port> Control::cmd=<command> [Control::arg=<arg>] ##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::host_port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
@load base/frameworks/control @load base/frameworks/control
@load base/frameworks/communication @load base/frameworks/broker
module Control; module Control;
@ -19,19 +19,24 @@ event bro_init() &priority=5
# shutdown. # shutdown.
if ( cmd !in commands ) if ( cmd !in commands )
{ {
# TODO: do an actual error here. Maybe through the reporter events? Reporter::error(fmt("The '%s' control command is unknown.", cmd));
print fmt("The '%s' control command is unknown.", cmd);
terminate(); terminate();
} }
# Establish the communication configuration and only request response Broker::auto_publish(Control::topic_prefix + "/id_value_request",
# messages. Control::id_value_request);
Communication::nodes["control"] = [$host=host, $zone_id=zone_id, Broker::auto_publish(Control::topic_prefix + "/peer_status_request",
$p=host_port, $sync=F, $connect=T, Control::peer_status_request);
$class="control", $events=Control::controllee_events]; Broker::auto_publish(Control::topic_prefix + "/net_stats_request",
Control::net_stats_request);
Broker::auto_publish(Control::topic_prefix + "/configuration_update_request",
Control::configuration_update_request);
Broker::auto_publish(Control::topic_prefix + "/shutdown_request",
Control::shutdown_request);
Broker::subscribe(Control::topic_prefix);
Broker::peer(cat(host), host_port);
} }
event Control::id_value_response(id: string, val: string) &priority=-10 event Control::id_value_response(id: string, val: string) &priority=-10
{ {
event terminate_event(); event terminate_event();
@ -57,11 +62,11 @@ event Control::shutdown_response() &priority=-10
event terminate_event(); event terminate_event();
} }
function configuration_update_func(p: event_peer) function configurable_ids(): id_table
{ {
# Send all &redef'able consts to the peer. local rval: id_table = table();
local globals = global_ids(); local globals = global_ids();
local cnt = 0;
for ( id in globals ) for ( id in globals )
{ {
if ( id in ignore_ids ) if ( id in ignore_ids )
@ -77,39 +82,59 @@ function configuration_update_func(p: event_peer)
# NOTE: functions are currently not fully supported for serialization and hence # NOTE: functions are currently not fully supported for serialization and hence
# aren't sent. # aren't sent.
if ( t$constant && t$redefinable && t$type_name != "func" ) if ( t$constant && t$redefinable && t$type_name != "func" )
{ rval[id] = t;
send_id(p, id);
++cnt;
}
} }
print fmt("sent %d IDs", cnt); return rval;
event terminate_event();
} }
event remote_connection_handshake_done(p: event_peer) &priority=-10 function send_control_request()
{ {
if ( cmd == "id_value" ) switch ( cmd ) {
{ case "id_value":
if ( arg != "" ) if ( arg == "" )
Reporter::fatal("The Control::id_value command requires that Control::arg also has some value.");
event Control::id_value_request(arg); event Control::id_value_request(arg);
else break;
{
# TODO: do an actual error here. Maybe through the reporter events? case "peer_status":
print "The id_value command requires that Control::arg have some value.";
terminate();
}
}
else if ( cmd == "peer_status" )
event Control::peer_status_request(); event Control::peer_status_request();
else if ( cmd == "net_stats" ) break;
case "net_stats":
event Control::net_stats_request(); event Control::net_stats_request();
else if ( cmd == "shutdown" ) break;
case "shutdown":
event Control::shutdown_request(); event Control::shutdown_request();
else if ( cmd == "configuration_update" ) break;
{
configuration_update_func(p); case "configuration_update":
# Signal configuration update to peer.
event Control::configuration_update_request(); event Control::configuration_update_request();
break;
default:
Reporter::fatal(fmt("unhandled Control::cmd, %s", cmd));
break;
} }
} }
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=-10
{
if ( cmd == "configuration_update" )
{
# Send all &redef'able consts to the peer.
local ids = configurable_ids();
for ( id in ids )
{
local topic = fmt("%s/id/%s", Control::topic_prefix, id);
Broker::publish_id(topic, id);
}
Reporter::info(fmt("Control framework sent %d IDs", |ids|));
}
send_control_request();
}

View file

@ -22,23 +22,16 @@ export {
const interesting_version_changes: set[string] = { } &redef; const interesting_version_changes: set[string] = { } &redef;
} }
event log_software(rec: Info) event Software::version_change(old: Software::Info, new: Software::Info)
{ {
local ts = tracked[rec$host]; if ( old$name !in interesting_version_changes )
return;
if ( rec$name in ts ) local msg = fmt("%.6f %s '%s' version changed from %s to %s",
{ network_time(), old$software_type, old$name,
local old = ts[rec$name];
# Is it a potentially interesting version change?
if ( rec$name in interesting_version_changes )
{
local msg = fmt("%.6f %s switched from %s to %s (%s)",
network_time(), rec$software_type,
software_fmt_version(old$version), software_fmt_version(old$version),
software_fmt(rec), rec$software_type); software_fmt_version(new$version));
NOTICE([$note=Software_Version_Change, $src=rec$host,
$msg=msg, $sub=software_fmt(rec)]); NOTICE([$note=Software_Version_Change, $src=new$host,
} $msg=msg, $sub=software_fmt(new)]);
}
} }

View file

@ -12,22 +12,11 @@ export {
## Apply BPF filters to each worker in a way that causes them to ## Apply BPF filters to each worker in a way that causes them to
## automatically flow balance traffic between them. ## automatically flow balance traffic between them.
AUTO_BPF, AUTO_BPF,
# Load balance traffic across the workers by making each one apply
# a restrict filter to only listen to a single MAC address. This
# is a somewhat common deployment option for sites doing network
# based load balancing with MAC address rewriting and passing the
# traffic to a single interface. Multiple MAC addresses will show
# up on the same interface and need filtered to a single address.
#MAC_ADDR_BPF,
}; };
## Defines the method of load balancing to use. ## Defines the method of load balancing to use.
const method = AUTO_BPF &redef; const method = AUTO_BPF &redef;
# Configure the cluster framework to enable the load balancing filter configuration.
#global send_filter: event(for_node: string, filter: string);
#global confirm_filter_installation: event(success: bool);
redef record Cluster::Node += { redef record Cluster::Node += {
## A BPF filter for load balancing traffic sniffed on a single ## A BPF filter for load balancing traffic sniffed on a single
## interface across a number of processes. In normal uses, this ## interface across a number of processes. In normal uses, this
@ -37,97 +26,86 @@ export {
}; };
} }
#redef Cluster::manager2worker_events += /LoadBalancing::send_filter/;
#redef Cluster::worker2manager_events += /LoadBalancing::confirm_filter_installation/;
@if ( Cluster::is_enabled() ) @if ( Cluster::is_enabled() )
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event bro_init() &priority=5 event bro_init() &priority=5
{ {
if ( method != AUTO_BPF ) if ( method != AUTO_BPF )
return; return;
local worker_ip_interface: table[addr, string] of count = table(); local worker_ip_interface: table[addr, string] of count = table();
for ( n in Cluster::nodes ) local sorted_node_names: vector of string = vector();
{ local node: Cluster::Node;
local this_node = Cluster::nodes[n]; local name: string;
# Only workers! # Sort nodes list so that every node iterates over it in same order.
if ( this_node$node_type != Cluster::WORKER || for ( name in Cluster::nodes )
! this_node?$interface ) sorted_node_names[|sorted_node_names|] = name;
sort(sorted_node_names, strcmp);
for ( idx in sorted_node_names )
{
name = sorted_node_names[idx];
node = Cluster::nodes[name];
if ( node$node_type != Cluster::WORKER )
next; next;
if ( [this_node$ip, this_node$interface] !in worker_ip_interface ) if ( ! node?$interface )
worker_ip_interface[this_node$ip, this_node$interface] = 0; next;
++worker_ip_interface[this_node$ip, this_node$interface];
if ( [node$ip, node$interface] !in worker_ip_interface )
worker_ip_interface[node$ip, node$interface] = 0;
++worker_ip_interface[node$ip, node$interface];
} }
# Now that we've counted up how many processes are running on an interface # Now that we've counted up how many processes are running per
# let's create the filters for each worker. # interface, let's create the filters for each worker.
local lb_proc_track: table[addr, string] of count = table(); local lb_proc_track: table[addr, string] of count = table();
for ( no in Cluster::nodes )
{
local that_node = Cluster::nodes[no];
if ( that_node$node_type == Cluster::WORKER &&
that_node?$interface && [that_node$ip, that_node$interface] in worker_ip_interface )
{
if ( [that_node$ip, that_node$interface] !in lb_proc_track )
lb_proc_track[that_node$ip, that_node$interface] = 0;
local this_lb_proc = lb_proc_track[that_node$ip, that_node$interface]; for ( idx in sorted_node_names )
local total_lb_procs = worker_ip_interface[that_node$ip, that_node$interface]; {
name = sorted_node_names[idx];
node = Cluster::nodes[name];
if ( node$node_type != Cluster::WORKER )
next;
if ( ! node?$interface )
next;
if ( [node$ip, node$interface] !in worker_ip_interface )
next;
if ( [node$ip, node$interface] !in lb_proc_track )
lb_proc_track[node$ip, node$interface] = 0;
local this_lb_proc = lb_proc_track[node$ip, node$interface];
local total_lb_procs = worker_ip_interface[node$ip, node$interface];
++lb_proc_track[node$ip, node$interface];
++lb_proc_track[that_node$ip, that_node$interface];
if ( total_lb_procs > 1 ) if ( total_lb_procs > 1 )
node$lb_filter = PacketFilter::sampling_filter(total_lb_procs,
this_lb_proc);
}
# Finally, install filter for the current node if it needs one.
for ( idx in sorted_node_names )
{ {
that_node$lb_filter = PacketFilter::sampling_filter(total_lb_procs, this_lb_proc); name = sorted_node_names[idx];
Communication::nodes[no]$capture_filter = that_node$lb_filter; node = Cluster::nodes[name];
}
}
}
}
#event remote_connection_established(p: event_peer) &priority=-5 if ( name != Cluster::node )
# { next;
# if ( is_remote_event() )
# return;
#
# local for_node = p$descr;
# # Send the filter to the peer.
# if ( for_node in Cluster::nodes &&
# Cluster::nodes[for_node]?$lb_filter )
# {
# local filter = Cluster::nodes[for_node]$lb_filter;
# event LoadBalancing::send_filter(for_node, filter);
# }
# }
#event LoadBalancing::confirm_filter_installation(success: bool) if ( ! node?$lb_filter )
# { next;
# # This doesn't really matter yet since we aren't getting back a meaningful success response.
# }
@endif restrict_filters["lb_filter"] = node$lb_filter;
@if ( Cluster::local_node_type() == Cluster::WORKER )
#event LoadBalancing::send_filter(for_node: string, filter: string)
event remote_capture_filter(p: event_peer, filter: string)
{
#if ( for_node !in Cluster::nodes )
# return;
#
#if ( Cluster::node == for_node )
# {
restrict_filters["lb_filter"] = filter;
PacketFilter::install(); PacketFilter::install();
#event LoadBalancing::confirm_filter_installation(T); }
# }
} }
@endif @endif
@endif

View file

@ -4,6 +4,7 @@
##! use on a network per day. ##! use on a network per day.
@load base/utils/directions-and-hosts @load base/utils/directions-and-hosts
@load base/frameworks/cluster
module Known; module Known;
@ -20,22 +21,127 @@ export {
host: addr &log; host: addr &log;
}; };
## Toggles between different implementations of this script.
## When true, use a Broker data store, else use a regular Bro set
## with keys uniformly distributed over proxy nodes in cluster
## operation.
const use_host_store = T &redef;
## The hosts whose existence should be logged and tracked. ## The hosts whose existence should be logged and tracked.
## See :bro:type:`Host` for possible choices. ## See :bro:type:`Host` for possible choices.
const host_tracking = LOCAL_HOSTS &redef; const host_tracking = LOCAL_HOSTS &redef;
## Holds the set of all known hosts. Keys in the store are addresses
## and their associated value will always be the "true" boolean.
global host_store: Cluster::StoreInfo;
## The Broker topic name to use for :bro:see:`Known::host_store`.
const host_store_name = "bro/known/hosts" &redef;
## The expiry interval of new entries in :bro:see:`Known::host_store`.
## This also changes the interval at which hosts get logged.
const host_store_expiry = 1day &redef;
## The timeout interval to use for operations against
## :bro:see:`Known::host_store`.
const host_store_timeout = 15sec &redef;
## The set of all known addresses to store for preventing duplicate ## The set of all known addresses to store for preventing duplicate
## logging of addresses. It can also be used from other scripts to ## logging of addresses. It can also be used from other scripts to
## inspect if an address has been seen in use. ## inspect if an address has been seen in use.
## Maintain the list of known hosts for 24 hours so that the existence ## Maintain the list of known hosts for 24 hours so that the existence
## of each individual address is logged each day. ## of each individual address is logged each day.
global known_hosts: set[addr] &create_expire=1day &synchronized &redef; ##
## In cluster operation, this set is distributed uniformly across
## proxy nodes.
global hosts: set[addr] &create_expire=1day &redef;
## An event that can be handled to access the :bro:type:`Known::HostsInfo` ## An event that can be handled to access the :bro:type:`Known::HostsInfo`
## record as it is sent on to the logging framework. ## record as it is sent on to the logging framework.
global log_known_hosts: event(rec: HostsInfo); global log_known_hosts: event(rec: HostsInfo);
} }
event bro_init()
{
if ( ! Known::use_host_store )
return;
Known::host_store = Cluster::create_store(Known::host_store_name);
}
event Known::host_found(info: HostsInfo)
{
if ( ! Known::use_host_store )
return;
when ( local r = Broker::put_unique(Known::host_store$store, info$host,
T, Known::host_store_expiry) )
{
if ( r$status == Broker::SUCCESS )
{
if ( r$result as bool )
Log::write(Known::HOSTS_LOG, info);
}
else
Reporter::error(fmt("%s: data store put_unique failure",
Known::host_store_name));
}
timeout Known::host_store_timeout
{
# Can't really tell if master store ended up inserting a key.
Log::write(Known::HOSTS_LOG, info);
}
}
event known_host_add(info: HostsInfo)
{
if ( use_host_store )
return;
if ( info$host in Known::hosts )
return;
add Known::hosts[info$host];
@if ( ! Cluster::is_enabled() ||
Cluster::local_node_type() == Cluster::PROXY )
Log::write(Known::HOSTS_LOG, info);
@endif
}
event Cluster::node_up(name: string, id: string)
{
if ( use_host_store )
return;
if ( Cluster::local_node_type() != Cluster::WORKER )
return;
# Drop local suppression cache on workers to force HRW key repartitioning.
Known::hosts = set();
}
event Cluster::node_down(name: string, id: string)
{
if ( use_host_store )
return;
if ( Cluster::local_node_type() != Cluster::WORKER )
return;
# Drop local suppression cache on workers to force HRW key repartitioning.
Known::hosts = set();
}
event Known::host_found(info: HostsInfo)
{
if ( use_host_store )
return;
Cluster::publish_hrw(Cluster::proxy_pool, info$host, known_host_add, info);
event known_host_add(info);
}
event bro_init() event bro_init()
{ {
Log::create_stream(Known::HOSTS_LOG, [$columns=HostsInfo, $ev=log_known_hosts, $path="known_hosts"]); Log::create_stream(Known::HOSTS_LOG, [$columns=HostsInfo, $ev=log_known_hosts, $path="known_hosts"]);
@ -43,17 +149,15 @@ event bro_init()
event connection_established(c: connection) &priority=5 event connection_established(c: connection) &priority=5
{ {
if ( c$orig$state != TCP_ESTABLISHED )
return;
if ( c$resp$state != TCP_ESTABLISHED )
return;
local id = c$id; local id = c$id;
for ( host in set(id$orig_h, id$resp_h) ) for ( host in set(id$orig_h, id$resp_h) )
{ if ( addr_matches_host(host, host_tracking) )
if ( host !in known_hosts && event Known::host_found([$ts = network_time(), $host = host]);
c$orig$state == TCP_ESTABLISHED &&
c$resp$state == TCP_ESTABLISHED &&
addr_matches_host(host, host_tracking) )
{
add known_hosts[host];
Log::write(Known::HOSTS_LOG, [$ts=network_time(), $host=host]);
}
}
} }

View file

@ -4,6 +4,7 @@
##! during the session, the protocol will also be logged. ##! during the session, the protocol will also be logged.
@load base/utils/directions-and-hosts @load base/utils/directions-and-hosts
@load base/frameworks/cluster
module Known; module Known;
@ -26,14 +27,46 @@ export {
service: set[string] &log; service: set[string] &log;
}; };
## Toggles between different implementations of this script.
## When true, use a Broker data store, else use a regular Bro set
## with keys uniformly distributed over proxy nodes in cluster
## operation.
const use_service_store = T &redef;
## The hosts whose services should be tracked and logged. ## The hosts whose services should be tracked and logged.
## See :bro:type:`Host` for possible choices. ## See :bro:type:`Host` for possible choices.
const service_tracking = LOCAL_HOSTS &redef; const service_tracking = LOCAL_HOSTS &redef;
type AddrPortPair: record {
host: addr;
p: port;
};
## Holds the set of all known services. Keys in the store are
## :bro:type:`Known::AddrPortPair` and their associated value is
## always the boolean value of "true".
global service_store: Cluster::StoreInfo;
## The Broker topic name to use for :bro:see:`Known::service_store`.
const service_store_name = "bro/known/services" &redef;
## The expiry interval of new entries in :bro:see:`Known::service_store`.
## This also changes the interval at which services get logged.
const service_store_expiry = 1day &redef;
## The timeout interval to use for operations against
## :bro:see:`Known::service_store`.
const service_store_timeout = 15sec &redef;
## Tracks the set of daily-detected services for preventing the logging ## Tracks the set of daily-detected services for preventing the logging
## of duplicates, but can also be inspected by other scripts for ## of duplicates, but can also be inspected by other scripts for
## different purposes. ## different purposes.
global known_services: set[addr, port] &create_expire=1day &synchronized; ##
## In cluster operation, this set is uniformly distributed across
## proxy nodes.
##
## This set is automatically populated and shouldn't be directly modified.
global services: set[addr, port] &create_expire=1day;
## Event that can be handled to access the :bro:type:`Known::ServicesInfo` ## Event that can be handled to access the :bro:type:`Known::ServicesInfo`
## record as it is sent on to the logging framework. ## record as it is sent on to the logging framework.
@ -46,27 +79,89 @@ redef record connection += {
known_services_done: bool &default=F; known_services_done: bool &default=F;
}; };
event bro_init() &priority=5
event bro_init()
{ {
Log::create_stream(Known::SERVICES_LOG, [$columns=ServicesInfo, if ( ! Known::use_service_store )
$ev=log_known_services, return;
$path="known_services"]);
Known::service_store = Cluster::create_store(Known::service_store_name);
} }
event log_it(ts: time, a: addr, p: port, services: set[string]) event service_info_commit(info: ServicesInfo)
{
if ( [a, p] !in known_services )
{
add known_services[a, p];
local i: ServicesInfo; {
i$ts=ts; if ( ! Known::use_service_store )
i$host=a; return;
i$port_num=p;
i$port_proto=get_port_transport_proto(p); local key = AddrPortPair($host = info$host, $p = info$port_num);
i$service=services;
Log::write(Known::SERVICES_LOG, i); when ( local r = Broker::put_unique(Known::service_store$store, key,
T, Known::service_store_expiry) )
{
if ( r$status == Broker::SUCCESS )
{
if ( r$result as bool )
Log::write(Known::SERVICES_LOG, info);
} }
else
Reporter::error(fmt("%s: data store put_unique failure",
Known::service_store_name));
}
timeout Known::service_store_timeout
{
Log::write(Known::SERVICES_LOG, info);
}
}
event known_service_add(info: ServicesInfo)
{
if ( Known::use_service_store )
return;
if ( [info$host, info$port_num] in Known::services )
return;
add Known::services[info$host, info$port_num];
@if ( ! Cluster::is_enabled() ||
Cluster::local_node_type() == Cluster::PROXY )
Log::write(Known::SERVICES_LOG, info);
@endif
}
event Cluster::node_up(name: string, id: string)
{
if ( Known::use_service_store )
return;
if ( Cluster::local_node_type() != Cluster::WORKER )
return;
# Drop local suppression cache on workers to force HRW key repartitioning.
Known::services = set();
}
event Cluster::node_down(name: string, id: string)
{
if ( Known::use_service_store )
return;
if ( Cluster::local_node_type() != Cluster::WORKER )
return;
# Drop local suppression cache on workers to force HRW key repartitioning.
Known::services = set();
}
event service_info_commit(info: ServicesInfo)
{
if ( Known::use_service_store )
return;
local key = cat(info$host, info$port_num);
Cluster::publish_hrw(Cluster::proxy_pool, key, known_service_add, info);
event known_service_add(info);
} }
function known_services_done(c: connection) function known_services_done(c: connection)
@ -74,18 +169,31 @@ function known_services_done(c: connection)
local id = c$id; local id = c$id;
c$known_services_done = T; c$known_services_done = T;
if ( ! addr_matches_host(id$resp_h, service_tracking) || if ( ! addr_matches_host(id$resp_h, service_tracking) )
"ftp-data" in c$service || # don't include ftp data sessions
("DNS" in c$service && c$resp$size == 0) ) # for dns, require that the server talks.
return; return;
# If no protocol was detected, wait a short if ( |c$service| == 1 )
# time before attempting to log in case a protocol is detected {
# on another connection. if ( "ftp-data" in c$service )
# Don't include ftp data sessions.
return;
if ( "DNS" in c$service && c$resp$size == 0 )
# For dns, require that the server talks.
return;
}
local info = ServicesInfo($ts = network_time(), $host = id$resp_h,
$port_num = id$resp_p,
$port_proto = get_port_transport_proto(id$resp_p),
$service = c$service);
# If no protocol was detected, wait a short time before attempting to log
# in case a protocol is detected on another connection.
if ( |c$service| == 0 ) if ( |c$service| == 0 )
schedule 5min { log_it(network_time(), id$resp_h, id$resp_p, c$service) }; schedule 5min { service_info_commit(info) };
else else
event log_it(network_time(), id$resp_h, id$resp_p, c$service); event service_info_commit(info);
} }
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=-5 event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=-5
@ -96,6 +204,19 @@ event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &pr
# Handle the connection ending in case no protocol was ever detected. # Handle the connection ending in case no protocol was ever detected.
event connection_state_remove(c: connection) &priority=-5 event connection_state_remove(c: connection) &priority=-5
{ {
if ( ! c$known_services_done && c$resp$state == TCP_ESTABLISHED ) if ( c$known_services_done )
return;
if ( c$resp$state != TCP_ESTABLISHED )
return;
known_services_done(c); known_services_done(c);
} }
event bro_init() &priority=5
{
Log::create_stream(Known::SERVICES_LOG, [$columns=ServicesInfo,
$ev=log_known_services,
$path="known_services"]);
}

View file

@ -31,25 +31,40 @@ event signature_match(state: signature_state, msg: string, data: string) &priori
local si: Software::Info; local si: Software::Info;
si = [$name=msg, $unparsed_version=msg, $host=c$id$resp_h, $host_p=c$id$resp_p, $software_type=WEB_APPLICATION]; si = [$name=msg, $unparsed_version=msg, $host=c$id$resp_h, $host_p=c$id$resp_p, $software_type=WEB_APPLICATION];
si$url = build_url_http(c$http); si$url = build_url_http(c$http);
if ( c$id$resp_h in Software::tracked && Software::found(c$id, si);
si$name in Software::tracked[c$id$resp_h] ) }
event Software::register(info: Software::Info) &priority=5
{ {
if ( info$host !in Software::tracked )
return;
local ss = Software::tracked[info$host];
if ( info$name !in ss )
return;
local old_info = ss[info$name];
if ( ! old_info?$url )
return;
if ( ! info?$url )
return;
# If the new url is a substring of an existing, known url then let's # If the new url is a substring of an existing, known url then let's
# use that as the new url for the software. # use that as the new url for the software.
# PROBLEM: different version of the same software on the same server with a shared root path # PROBLEM: different version of the same software on the same server with a shared root path
local is_substring = 0; local is_substring = 0;
if ( Software::tracked[c$id$resp_h][si$name]?$url &&
|si$url| <= |Software::tracked[c$id$resp_h][si$name]$url| )
is_substring = strstr(Software::tracked[c$id$resp_h][si$name]$url, si$url);
if ( is_substring == 1 ) if ( |info$url| <= |old_info$url| )
{ is_substring = strstr(old_info$url, info$url);
Software::tracked[c$id$resp_h][si$name]$url = si$url;
if ( is_substring != 1 )
return;
old_info$url = info$url;
# Force the software to be logged because it indicates a URL # Force the software to be logged because it indicates a URL
# closer to the root of the site. # closer to the root of the site.
si$force_log = T; info$force_log = T;
}
}
Software::found(c$id, si);
} }

View file

@ -4,6 +4,7 @@
@load base/utils/directions-and-hosts @load base/utils/directions-and-hosts
@load base/protocols/ssl @load base/protocols/ssl
@load base/files/x509 @load base/files/x509
@load base/frameworks/cluster
module Known; module Known;
@ -30,26 +31,138 @@ export {
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS.
const cert_tracking = LOCAL_HOSTS &redef; const cert_tracking = LOCAL_HOSTS &redef;
## Toggles between different implementations of this script.
## When true, use a Broker data store, else use a regular Bro set
## with keys uniformly distributed over proxy nodes in cluster
## operation.
const use_cert_store = T &redef;
type AddrCertHashPair: record {
host: addr;
hash: string;
};
## Holds the set of all known certificates. Keys in the store are of
## type :bro:type:`Known::AddrCertHashPair` and their associated value is
## always the boolean value of "true".
global cert_store: Cluster::StoreInfo;
## The Broker topic name to use for :bro:see:`Known::cert_store`.
const cert_store_name = "bro/known/certs" &redef;
## The expiry interval of new entries in :bro:see:`Known::cert_store`.
## This also changes the interval at which certs get logged.
const cert_store_expiry = 1day &redef;
## The timeout interval to use for operations against
## :bro:see:`Known::cert_store`.
const cert_store_timeout = 15sec &redef;
## The set of all known certificates to store for preventing duplicate ## The set of all known certificates to store for preventing duplicate
## logging. It can also be used from other scripts to ## logging. It can also be used from other scripts to
## inspect if a certificate has been seen in use. The string value ## inspect if a certificate has been seen in use. The string value
## in the set is for storing the DER formatted certificate' SHA1 hash. ## in the set is for storing the DER formatted certificate' SHA1 hash.
global certs: set[addr, string] &create_expire=1day &synchronized &redef; ##
## In cluster operation, this set is uniformly distributed across
## proxy nodes.
global certs: set[addr, string] &create_expire=1day &redef;
## Event that can be handled to access the loggable record as it is sent ## Event that can be handled to access the loggable record as it is sent
## on to the logging framework. ## on to the logging framework.
global log_known_certs: event(rec: CertsInfo); global log_known_certs: event(rec: CertsInfo);
} }
event bro_init() &priority=5 event bro_init()
{ {
Log::create_stream(Known::CERTS_LOG, [$columns=CertsInfo, $ev=log_known_certs, $path="known_certs"]); if ( ! Known::use_cert_store )
return;
Known::cert_store = Cluster::create_store(Known::cert_store_name);
}
event Known::cert_found(info: CertsInfo, hash: string)
{
if ( ! Known::use_cert_store )
return;
local key = AddrCertHashPair($host = info$host, $hash = hash);
when ( local r = Broker::put_unique(Known::cert_store$store, key,
T, Known::cert_store_expiry) )
{
if ( r$status == Broker::SUCCESS )
{
if ( r$result as bool )
Log::write(Known::CERTS_LOG, info);
}
else
Reporter::error(fmt("%s: data store put_unique failure",
Known::cert_store_name));
}
timeout Known::cert_store_timeout
{
# Can't really tell if master store ended up inserting a key.
Log::write(Known::CERTS_LOG, info);
}
}
event known_cert_add(info: CertsInfo, hash: string)
{
if ( Known::use_cert_store )
return;
if ( [info$host, hash] in Known::certs )
return;
add Known::certs[info$host, hash];
@if ( ! Cluster::is_enabled() ||
Cluster::local_node_type() == Cluster::PROXY )
Log::write(Known::CERTS_LOG, info);
@endif
}
event Known::cert_found(info: CertsInfo, hash: string)
{
if ( Known::use_cert_store )
return;
local key = cat(info$host, hash);
Cluster::publish_hrw(Cluster::proxy_pool, key, known_cert_add, info, hash);
event known_cert_add(info, hash);
}
event Cluster::node_up(name: string, id: string)
{
if ( Known::use_cert_store )
return;
if ( Cluster::local_node_type() != Cluster::WORKER )
return;
Known::certs = table();
}
event Cluster::node_down(name: string, id: string)
{
if ( Known::use_cert_store )
return;
if ( Cluster::local_node_type() != Cluster::WORKER )
return;
Known::certs = table();
} }
event ssl_established(c: connection) &priority=3 event ssl_established(c: connection) &priority=3
{ {
if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| < 1 || if ( ! c$ssl?$cert_chain )
! c$ssl$cert_chain[0]?$x509 ) return;
if ( |c$ssl$cert_chain| < 1 )
return;
if ( ! c$ssl$cert_chain[0]?$x509 )
return; return;
local fuid = c$ssl$cert_chain_fuids[0]; local fuid = c$ssl$cert_chain_fuids[0];
@ -61,16 +174,21 @@ event ssl_established(c: connection) &priority=3
return; return;
} }
local host = c$id$resp_h;
if ( ! addr_matches_host(host, cert_tracking) )
return;
local hash = c$ssl$cert_chain[0]$sha1; local hash = c$ssl$cert_chain[0]$sha1;
local cert = c$ssl$cert_chain[0]$x509$certificate; local cert = c$ssl$cert_chain[0]$x509$certificate;
local info = CertsInfo($ts = network_time(), $host = host,
local host = c$id$resp_h;
if ( [host, hash] !in certs && addr_matches_host(host, cert_tracking) )
{
add certs[host, hash];
Log::write(Known::CERTS_LOG, [$ts=network_time(), $host=host,
$port_num = c$id$resp_p, $subject = cert$subject, $port_num = c$id$resp_p, $subject = cert$subject,
$issuer_subject = cert$issuer, $issuer_subject = cert$issuer,
$serial=cert$serial]); $serial = cert$serial);
event Known::cert_found(info, hash);
} }
event bro_init() &priority=5
{
Log::create_stream(Known::CERTS_LOG, [$columns=CertsInfo, $ev=log_known_certs, $path="known_certs"]);
} }

View file

@ -3,6 +3,7 @@
# Also caches all intermediate certificates encountered so far and use them # Also caches all intermediate certificates encountered so far and use them
# for future validations. # for future validations.
@load base/frameworks/cluster
@load base/frameworks/notice @load base/frameworks/notice
@load base/protocols/ssl @load base/protocols/ssl
@ -61,12 +62,13 @@ export {
global intermediate_cache: table[string] of vector of opaque of x509; global intermediate_cache: table[string] of vector of opaque of x509;
@if ( Cluster::is_enabled() ) @if ( Cluster::is_enabled() )
@load base/frameworks/cluster event bro_init()
redef Cluster::manager2worker_events += /SSL::intermediate_add/; {
redef Cluster::worker2manager_events += /SSL::new_intermediate/; Broker::auto_publish(Cluster::worker_topic, SSL::intermediate_add);
Broker::auto_publish(Cluster::manager_topic, SSL::new_intermediate);
}
@endif @endif
function add_to_cache(key: string, value: vector of opaque of x509) function add_to_cache(key: string, value: vector of opaque of x509)
{ {
intermediate_cache[key] = value; intermediate_cache[key] = value;

View file

@ -9,7 +9,6 @@
# The base/ scripts are all loaded by default and not included here. # The base/ scripts are all loaded by default and not included here.
# @load frameworks/communication/listen.bro
# @load frameworks/control/controllee.bro # @load frameworks/control/controllee.bro
# @load frameworks/control/controller.bro # @load frameworks/control/controller.bro
@load frameworks/dpd/detect-protocols.bro @load frameworks/dpd/detect-protocols.bro

View file

@ -156,6 +156,7 @@ set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE)
set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE) set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE)
add_subdirectory(analyzer) add_subdirectory(analyzer)
add_subdirectory(broker)
add_subdirectory(broxygen) add_subdirectory(broxygen)
add_subdirectory(file_analysis) add_subdirectory(file_analysis)
add_subdirectory(input) add_subdirectory(input)
@ -163,14 +164,6 @@ add_subdirectory(iosource)
add_subdirectory(logging) add_subdirectory(logging)
add_subdirectory(probabilistic) add_subdirectory(probabilistic)
if ( ENABLE_BROKER )
add_subdirectory(broker)
else ()
# Just to satisfy coverage unit tests until new Broker-based
# communication is enabled by default.
add_subdirectory(broker-dummy)
endif ()
set(bro_SUBDIRS set(bro_SUBDIRS
# Order is important here. # Order is important here.
${bro_PLUGIN_LIBS} ${bro_PLUGIN_LIBS}

View file

@ -54,6 +54,38 @@ void Event::Describe(ODesc* d) const
d->Add("("); d->Add("(");
} }
void Event::Dispatch(bool no_remote)
{
if ( src == SOURCE_BROKER )
no_remote = true;
if ( event_serializer )
{
SerialInfo info(event_serializer);
event_serializer->Serialize(&info, handler->Name(), args);
}
if ( handler->ErrorHandler() )
reporter->BeginErrorHandler();
try
{
handler->Call(args, no_remote);
}
catch ( InterpreterException& e )
{
// Already reported.
}
if ( obj )
// obj->EventDone();
Unref(obj);
if ( handler->ErrorHandler() )
reporter->EndErrorHandler();
}
EventMgr::EventMgr() EventMgr::EventMgr()
{ {
head = tail = 0; head = tail = 0;

View file

@ -34,34 +34,7 @@ protected:
// This method is protected to make sure that everybody goes through // This method is protected to make sure that everybody goes through
// EventMgr::Dispatch(). // EventMgr::Dispatch().
void Dispatch(bool no_remote = false) void Dispatch(bool no_remote = false);
{
if ( event_serializer )
{
SerialInfo info(event_serializer);
event_serializer->Serialize(&info, handler->Name(), args);
}
if ( handler->ErrorHandler() )
reporter->BeginErrorHandler();
try
{
handler->Call(args, no_remote);
}
catch ( InterpreterException& e )
{
// Already reported.
}
if ( obj )
// obj->EventDone();
Unref(obj);
if ( handler->ErrorHandler() )
reporter->EndErrorHandler();
}
EventHandlerPtr handler; EventHandlerPtr handler;
val_list* args; val_list* args;

View file

@ -5,10 +5,8 @@
#include "RemoteSerializer.h" #include "RemoteSerializer.h"
#include "NetVar.h" #include "NetVar.h"
#ifdef ENABLE_BROKER
#include "broker/Manager.h" #include "broker/Manager.h"
#include "broker/Data.h" #include "broker/Data.h"
#endif
EventHandler::EventHandler(const char* arg_name) EventHandler::EventHandler(const char* arg_name)
{ {
@ -32,19 +30,16 @@ EventHandler::operator bool() const
return enabled && ((local && local->HasBodies()) return enabled && ((local && local->HasBodies())
|| receivers.length() || receivers.length()
|| generate_always || generate_always
#ifdef ENABLE_BROKER || ! auto_publish.empty());
|| ! auto_remote_send.empty()
// TODO: and require a subscriber interested in a topic or unsolicited flags?
#endif
);
} }
FuncType* EventHandler::FType() FuncType* EventHandler::FType(bool check_export)
{ {
if ( type ) if ( type )
return type; return type;
ID* id = lookup_ID(name, current_module.c_str()); ID* id = lookup_ID(name, current_module.c_str(), false, false,
check_export);
if ( ! id ) if ( ! id )
return 0; return 0;
@ -84,14 +79,11 @@ void EventHandler::Call(val_list* vl, bool no_remote)
remote_serializer->SendCall(&info, receivers[i], name, vl); remote_serializer->SendCall(&info, receivers[i], name, vl);
} }
#ifdef ENABLE_BROKER if ( ! auto_publish.empty() )
if ( ! auto_remote_send.empty() )
{ {
// TODO: also short-circuit based on interested subscribers/flags? // Send event in form [name, xs...] where xs represent the arguments.
broker::message msg; broker::vector xs;
msg.reserve(vl->length() + 1); xs.reserve(vl->length());
msg.emplace_back(Name());
bool valid_args = true; bool valid_args = true;
for ( auto i = 0; i < vl->length(); ++i ) for ( auto i = 0; i < vl->length(); ++i )
@ -99,30 +91,33 @@ void EventHandler::Call(val_list* vl, bool no_remote)
auto opt_data = bro_broker::val_to_data((*vl)[i]); auto opt_data = bro_broker::val_to_data((*vl)[i]);
if ( opt_data ) if ( opt_data )
msg.emplace_back(move(*opt_data)); xs.emplace_back(move(*opt_data));
else else
{ {
valid_args = false; valid_args = false;
auto_remote_send.clear(); auto_publish.clear();
reporter->Error("failed auto-remote event '%s', disabled", reporter->Error("failed auto-remote event '%s', disabled", Name());
Name());
break; break;
} }
} }
if ( valid_args ) if ( valid_args )
{ {
for ( auto it = auto_remote_send.begin(); for ( auto it = auto_publish.begin(); ; )
it != auto_remote_send.end(); ++it )
{ {
if ( std::next(it) == auto_remote_send.end() ) const auto& topic = *it;
broker_mgr->Event(it->first, move(msg), it->second); ++it;
if ( it != auto_publish.end() )
broker_mgr->PublishEvent(topic, Name(), xs);
else else
broker_mgr->Event(it->first, msg, it->second); {
broker_mgr->PublishEvent(topic, Name(), std::move(xs));
break;
}
} }
} }
} }
#endif
} }
if ( local ) if ( local )

View file

@ -4,7 +4,7 @@
#define EVENTHANDLER #define EVENTHANDLER
#include <assert.h> #include <assert.h>
#include <map> #include <unordered_set>
#include <string> #include <string>
#include "List.h" #include "List.h"
#include "BroList.h" #include "BroList.h"
@ -22,24 +22,22 @@ public:
const char* Name() { return name; } const char* Name() { return name; }
Func* LocalHandler() { return local; } Func* LocalHandler() { return local; }
FuncType* FType(); FuncType* FType(bool check_export = true);
void SetLocalHandler(Func* f); void SetLocalHandler(Func* f);
void AddRemoteHandler(SourceID peer); void AddRemoteHandler(SourceID peer);
void RemoveRemoteHandler(SourceID peer); void RemoveRemoteHandler(SourceID peer);
#ifdef ENABLE_BROKER void AutoPublish(std::string topic)
void AutoRemote(std::string topic, int flags)
{ {
auto_remote_send[std::move(topic)] = flags; auto_publish.insert(std::move(topic));
} }
void AutoRemoteStop(const std::string& topic) void AutoUnpublish(const std::string& topic)
{ {
auto_remote_send.erase(topic); auto_publish.erase(topic);
} }
#endif
void Call(val_list* vl, bool no_remote = false); void Call(val_list* vl, bool no_remote = false);
@ -81,9 +79,7 @@ private:
typedef List(SourceID) receiver_list; typedef List(SourceID) receiver_list;
receiver_list receivers; receiver_list receivers;
#ifdef ENABLE_BROKER std::unordered_set<std::string> auto_publish;
std::map<std::string, int> auto_remote_send; // topic -> flags
#endif
}; };
// Encapsulates a ptr to an event handler to overload the boolean operator. // Encapsulates a ptr to an event handler to overload the boolean operator.

View file

@ -18,8 +18,6 @@
const char* expr_name(BroExprTag t) const char* expr_name(BroExprTag t)
{ {
static char errbuf[512];
static const char* expr_names[int(NUM_EXPRS)] = { static const char* expr_names[int(NUM_EXPRS)] = {
"name", "const", "name", "const",
"(*)", "(*)",
@ -31,7 +29,7 @@ const char* expr_name(BroExprTag t)
"$=", "in", "<<>>", "$=", "in", "<<>>",
"()", "event", "schedule", "()", "event", "schedule",
"coerce", "record_coerce", "table_coerce", "coerce", "record_coerce", "table_coerce",
"sizeof", "flatten" "sizeof", "flatten", "cast", "is"
}; };
if ( int(t) >= NUM_EXPRS ) if ( int(t) >= NUM_EXPRS )
@ -4537,13 +4535,21 @@ Val* CallExpr::Eval(Frame* f) const
if ( func_val && v ) if ( func_val && v )
{ {
const ::Func* func = func_val->AsFunc(); const ::Func* func = func_val->AsFunc();
calling_expr = this;
const CallExpr* current_call = f ? f->GetCall() : 0; const CallExpr* current_call = f ? f->GetCall() : 0;
call_stack.emplace_back(CallInfo{this, func});
if ( f ) if ( f )
f->SetCall(this); f->SetCall(this);
ret = func->Call(v, f); // No try/catch here; we pass exceptions upstream. try
{
ret = func->Call(v, f);
}
catch ( ... )
{
call_stack.pop_back();
throw;
}
if ( f ) if ( f )
f->SetCall(current_call); f->SetCall(current_call);
@ -4551,7 +4557,7 @@ Val* CallExpr::Eval(Frame* f) const
// Don't Unref() the arguments, as Func::Call already did that. // Don't Unref() the arguments, as Func::Call already did that.
delete v; delete v;
calling_expr = 0; call_stack.pop_back();
} }
else else
delete_vals(v); delete_vals(v);
@ -5203,6 +5209,112 @@ bool RecordAssignExpr::DoUnserialize(UnserialInfo* info)
return true; return true;
} }
CastExpr::CastExpr(Expr* arg_op, BroType* t) : UnaryExpr(EXPR_CAST, arg_op)
{
auto stype = Op()->Type();
::Ref(t);
SetType(t);
if ( ! can_cast_value_to_type(stype, t) )
ExprError("cast not supported");
}
Val* CastExpr::Eval(Frame* f) const
{
if ( IsError() )
return 0;
Val* v = op->Eval(f);
if ( ! v )
return 0;
Val* nv = cast_value_to_type(v, Type());
if ( nv )
{
Unref(v);
return nv;
}
ODesc d;
d.Add("cannot cast value of type '");
v->Type()->Describe(&d);
d.Add("' to type '");
Type()->Describe(&d);
d.Add("'");
Unref(v);
reporter->ExprRuntimeError(this, "%s", d.Description());
return 0; // not reached.
}
void CastExpr::ExprDescribe(ODesc* d) const
{
Op()->Describe(d);
d->Add(" as ");
Type()->Describe(d);
}
IMPLEMENT_SERIAL(CastExpr, SER_CAST_EXPR);
bool CastExpr::DoSerialize(SerialInfo* info) const
{
DO_SERIALIZE(SER_CAST_EXPR, UnaryExpr);
return true;
}
bool CastExpr::DoUnserialize(UnserialInfo* info)
{
DO_UNSERIALIZE(UnaryExpr);
return true;
}
IsExpr::IsExpr(Expr* arg_op, BroType* arg_t) : UnaryExpr(EXPR_IS, arg_op)
{
t = arg_t;
::Ref(t);
SetType(base_type(TYPE_BOOL));
}
IsExpr::~IsExpr()
{
Unref(t);
}
Val* IsExpr::Fold(Val* v) const
{
if ( IsError() )
return 0;
if ( can_cast_value_to_type(v, t) )
return new Val(1, TYPE_BOOL);
else
return new Val(0, TYPE_BOOL);
}
void IsExpr::ExprDescribe(ODesc* d) const
{
Op()->Describe(d);
d->Add(" is ");
t->Describe(d);
}
IMPLEMENT_SERIAL(IsExpr, SER_IS_EXPR_ /* sic */);
bool IsExpr::DoSerialize(SerialInfo* info) const
{
DO_SERIALIZE(SER_IS_EXPR_, UnaryExpr);
return true;
}
bool IsExpr::DoUnserialize(UnserialInfo* info)
{
DO_UNSERIALIZE(UnaryExpr);
return true;
}
Expr* get_assign_expr(Expr* op1, Expr* op2, int is_init) Expr* get_assign_expr(Expr* op1, Expr* op2, int is_init)
{ {
if ( op1->Type()->Tag() == TYPE_RECORD && if ( op1->Type()->Tag() == TYPE_RECORD &&
@ -5212,7 +5324,6 @@ Expr* get_assign_expr(Expr* op1, Expr* op2, int is_init)
return new AssignExpr(op1, op2, is_init); return new AssignExpr(op1, op2, is_init);
} }
int check_and_promote_expr(Expr*& e, BroType* t) int check_and_promote_expr(Expr*& e, BroType* t)
{ {
BroType* et = e->Type(); BroType* et = e->Type();

View file

@ -44,7 +44,9 @@ typedef enum {
EXPR_VECTOR_COERCE, EXPR_VECTOR_COERCE,
EXPR_SIZE, EXPR_SIZE,
EXPR_FLATTEN, EXPR_FLATTEN,
#define NUM_EXPRS (int(EXPR_FLATTEN) + 1) EXPR_CAST,
EXPR_IS,
#define NUM_EXPRS (int(EXPR_IS) + 1)
} BroExprTag; } BroExprTag;
extern const char* expr_name(BroExprTag t); extern const char* expr_name(BroExprTag t);
@ -1044,6 +1046,37 @@ protected:
DECLARE_SERIAL(RecordAssignExpr); DECLARE_SERIAL(RecordAssignExpr);
}; };
class CastExpr : public UnaryExpr {
public:
CastExpr(Expr* op, BroType* t);
protected:
friend class Expr;
CastExpr() { }
Val* Eval(Frame* f) const override;
void ExprDescribe(ODesc* d) const override;
DECLARE_SERIAL(CastExpr);
};
class IsExpr : public UnaryExpr {
public:
IsExpr(Expr* op, BroType* t);
virtual ~IsExpr();
protected:
friend class Expr;
IsExpr() { }
Val* Fold(Val* v) const override;
void ExprDescribe(ODesc* d) const override;
DECLARE_SERIAL(IsExpr);
private:
BroType* t;
};
inline Val* Expr::ExprVal() const inline Val* Expr::ExprVal() const
{ {
if ( ! IsConst() ) if ( ! IsConst() )

View file

@ -50,7 +50,7 @@
extern RETSIGTYPE sig_handler(int signo); extern RETSIGTYPE sig_handler(int signo);
const Expr* calling_expr = 0; vector<CallInfo> call_stack;
bool did_builtin_init = false; bool did_builtin_init = false;
vector<Func*> Func::unique_ids; vector<Func*> Func::unique_ids;
@ -637,10 +637,60 @@ bool BuiltinFunc::DoUnserialize(UnserialInfo* info)
void builtin_error(const char* msg, BroObj* arg) void builtin_error(const char* msg, BroObj* arg)
{ {
if ( calling_expr ) if ( call_stack.empty() )
calling_expr->Error(msg, arg); {
else
reporter->Error(msg, arg); reporter->Error(msg, arg);
return;
}
auto last_call = call_stack.back();
if ( call_stack.size() < 2 )
{
// Don't need to check for wrapper function like "<module>::__<func>"
last_call.call->Error(msg, arg);
return;
}
auto starts_with_double_underscore = [](const std::string& name) -> bool
{ return name.size() > 2 && name[0] == '_' && name[1] == '_'; };
auto last_loc = last_call.call->GetLocationInfo();
std::string last_func = last_call.func->Name();
auto pos = last_func.find_first_of("::");
std::string wrapper_func;
if ( pos == std::string::npos )
{
if ( ! starts_with_double_underscore(last_func) )
{
last_call.call->Error(msg, arg);
return;
}
wrapper_func = last_func.substr(2);
}
else
{
auto module_name = last_func.substr(0, pos);
auto func_name = last_func.substr(pos + 2);
if ( ! starts_with_double_underscore(func_name) )
{
last_call.call->Error(msg, arg);
return;
}
wrapper_func = module_name + "::" + func_name.substr(2);
}
auto parent_call = call_stack[call_stack.size() - 2];
auto parent_func = parent_call.func->Name();
if ( wrapper_func == parent_func )
parent_call.call->Error(msg, arg);
else
last_call.call->Error(msg, arg);
} }
#include "bro.bif.func_h" #include "bro.bif.func_h"
@ -671,6 +721,7 @@ void init_builtin_funcs()
TimerStats = internal_type("TimerStats")->AsRecordType(); TimerStats = internal_type("TimerStats")->AsRecordType();
FileAnalysisStats = internal_type("FileAnalysisStats")->AsRecordType(); FileAnalysisStats = internal_type("FileAnalysisStats")->AsRecordType();
ThreadStats = internal_type("ThreadStats")->AsRecordType(); ThreadStats = internal_type("ThreadStats")->AsRecordType();
BrokerStats = internal_type("BrokerStats")->AsRecordType();
var_sizes = internal_type("var_sizes")->AsTableType(); var_sizes = internal_type("var_sizes")->AsTableType();

View file

@ -140,10 +140,12 @@ extern void init_builtin_funcs_subdirs();
extern bool check_built_in_call(BuiltinFunc* f, CallExpr* call); extern bool check_built_in_call(BuiltinFunc* f, CallExpr* call);
// This global is set prior to the interpreter making a function call. struct CallInfo {
// It's there so that built-in functions can access the location information const CallExpr* call;
// associated with a call when reporting error messages. const Func* func;
extern const Expr* calling_expr; };
extern vector<CallInfo> call_stack;
// This is set to true after the built-in functions have been initialized. // This is set to true after the built-in functions have been initialized.
extern bool did_builtin_init; extern bool did_builtin_init;

View file

@ -33,10 +33,7 @@
#include "iosource/PktSrc.h" #include "iosource/PktSrc.h"
#include "iosource/PktDumper.h" #include "iosource/PktDumper.h"
#include "plugin/Manager.h" #include "plugin/Manager.h"
#ifdef ENABLE_BROKER
#include "broker/Manager.h" #include "broker/Manager.h"
#endif
extern "C" { extern "C" {
#include "setsignal.h" #include "setsignal.h"
@ -312,11 +309,7 @@ void net_run()
} }
#endif #endif
current_iosrc = src; current_iosrc = src;
bool communication_enabled = using_communication; auto communication_enabled = using_communication || broker_mgr->Active();
#ifdef ENABLE_BROKER
communication_enabled |= broker_mgr->Enabled();
#endif
if ( src ) if ( src )
src->Process(); // which will call net_packet_dispatch() src->Process(); // which will call net_packet_dispatch()
@ -334,7 +327,8 @@ void net_run()
} }
} }
else if ( (have_pending_timers || communication_enabled) && else if ( (have_pending_timers || communication_enabled ||
BifConst::exit_only_after_terminate) &&
! pseudo_realtime ) ! pseudo_realtime )
{ {
// Take advantage of the lull to get up to // Take advantage of the lull to get up to
@ -387,6 +381,24 @@ void net_run()
// Check whether we have timers scheduled for // Check whether we have timers scheduled for
// the future on which we need to wait. // the future on which we need to wait.
have_pending_timers = timer_mgr->Size() > 0; have_pending_timers = timer_mgr->Size() > 0;
if ( pseudo_realtime && communication_enabled )
{
auto have_active_packet_source = false;
for ( auto& ps : iosource_mgr->GetPktSrcs() )
{
if ( ps->IsOpen() )
{
have_active_packet_source = true;
break;
}
}
if ( ! have_active_packet_source )
// Can turn off pseudo realtime now
pseudo_realtime = 0;
}
} }
// Get the final statistics now, and not when net_finish() is // Get the final statistics now, and not when net_finish() is

View file

@ -408,6 +408,14 @@ RE_Matcher::RE_Matcher(const char* pat)
AddPat(pat); AddPat(pat);
} }
RE_Matcher::RE_Matcher(const char* exact_pat, const char* anywhere_pat)
{
re_anywhere = new Specific_RE_Matcher(MATCH_ANYWHERE);
re_anywhere->SetPat(anywhere_pat);
re_exact = new Specific_RE_Matcher(MATCH_EXACTLY);
re_exact->SetPat(exact_pat);
}
RE_Matcher::~RE_Matcher() RE_Matcher::~RE_Matcher()
{ {
delete re_anywhere; delete re_anywhere;

View file

@ -173,7 +173,8 @@ class RE_Matcher : SerialObj {
public: public:
RE_Matcher(); RE_Matcher();
explicit RE_Matcher(const char* pat); explicit RE_Matcher(const char* pat);
~RE_Matcher() override; RE_Matcher(const char* exact_pat, const char* anywhere_pat);
virtual ~RE_Matcher() override;
void AddPat(const char* pat); void AddPat(const char* pat);

View file

@ -113,13 +113,13 @@ TraversalCode Scope::Traverse(TraversalCallback* cb) const
ID* lookup_ID(const char* name, const char* curr_module, bool no_global, ID* lookup_ID(const char* name, const char* curr_module, bool no_global,
bool same_module_only) bool same_module_only, bool check_export)
{ {
string fullname = make_full_var_name(curr_module, name); string fullname = make_full_var_name(curr_module, name);
string ID_module = extract_module_name(fullname.c_str()); string ID_module = extract_module_name(fullname.c_str());
bool need_export = ID_module != GLOBAL_MODULE_NAME && bool need_export = check_export && (ID_module != GLOBAL_MODULE_NAME &&
ID_module != curr_module; ID_module != curr_module);
for ( int i = scopes.length() - 1; i >= 0; --i ) for ( int i = scopes.length() - 1; i >= 0; --i )
{ {

View file

@ -64,7 +64,8 @@ extern bool in_debug;
// If no_global is true, don't search in the default "global" namespace. // If no_global is true, don't search in the default "global" namespace.
// This passed ownership of a ref'ed ID to the caller. // This passed ownership of a ref'ed ID to the caller.
extern ID* lookup_ID(const char* name, const char* module, extern ID* lookup_ID(const char* name, const char* module,
bool no_global = false, bool same_module_only=false); bool no_global = false, bool same_module_only = false,
bool check_export = true);
extern ID* install_ID(const char* name, const char* module_name, extern ID* install_ID(const char* name, const char* module_name,
bool is_global, bool is_export); bool is_global, bool is_export);

View file

@ -162,6 +162,8 @@ SERIAL_EXPR(SET_CONSTRUCTOR_EXPR, 41)
SERIAL_EXPR(VECTOR_CONSTRUCTOR_EXPR, 42) SERIAL_EXPR(VECTOR_CONSTRUCTOR_EXPR, 42)
SERIAL_EXPR(TABLE_COERCE_EXPR, 43) SERIAL_EXPR(TABLE_COERCE_EXPR, 43)
SERIAL_EXPR(VECTOR_COERCE_EXPR, 44) SERIAL_EXPR(VECTOR_COERCE_EXPR, 44)
SERIAL_EXPR(CAST_EXPR, 45)
SERIAL_EXPR(IS_EXPR_, 46) // Name conflict with internal SER_IS_EXPR constant.
#define SERIAL_STMT(name, val) SERIAL_CONST(name, val, STMT) #define SERIAL_STMT(name, val) SERIAL_CONST(name, val, STMT)
SERIAL_STMT(STMT, 1) SERIAL_STMT(STMT, 1)

View file

@ -18,7 +18,7 @@ SerializationFormat::~SerializationFormat()
free(output); free(output);
} }
void SerializationFormat::StartRead(char* data, uint32 arg_len) void SerializationFormat::StartRead(const char* data, uint32 arg_len)
{ {
input = data; input = data;
input_len = arg_len; input_len = arg_len;

View file

@ -19,7 +19,7 @@ public:
virtual ~SerializationFormat(); virtual ~SerializationFormat();
// Unserialization. // Unserialization.
virtual void StartRead(char* data, uint32 len); virtual void StartRead(const char* data, uint32 len);
virtual void EndRead(); virtual void EndRead();
virtual bool Read(int* v, const char* tag) = 0; virtual bool Read(int* v, const char* tag) = 0;
@ -87,7 +87,7 @@ protected:
uint32 output_size; uint32 output_size;
uint32 output_pos; uint32 output_pos;
char* input; const char* input;
uint32 input_len; uint32 input_len;
uint32 input_pos; uint32 input_pos;

View file

@ -9,10 +9,7 @@
#include "DNS_Mgr.h" #include "DNS_Mgr.h"
#include "Trigger.h" #include "Trigger.h"
#include "threading/Manager.h" #include "threading/Manager.h"
#ifdef ENABLE_BROKER
#include "broker/Manager.h" #include "broker/Manager.h"
#endif
uint64 killed_by_inactivity = 0; uint64 killed_by_inactivity = 0;
@ -226,25 +223,19 @@ void ProfileLogger::Log()
)); ));
} }
#ifdef ENABLE_BROKER auto cs = broker_mgr->GetStatistics();
auto cs = broker_mgr->ConsumeStatistics();
file->Write(fmt("%0.6f Comm: peers=%zu stores=%zu " file->Write(fmt("%0.6f Comm: peers=%zu stores=%zu "
"store_queries=%zu store_responses=%zu " "pending_queries=%zu "
"outgoing_conn_status=%zu incoming_conn_status=%zu " "events_in=%zu events_out=%zu "
"reports=%zu\n", "logs_in=%zu logs_out=%zu "
network_time, cs.outgoing_peer_count, cs.data_store_count, "ids_in=%zu ids_out=%zu ",
cs.pending_query_count, cs.response_count, network_time, cs.num_peers, cs.num_stores,
cs.outgoing_conn_status_count, cs.incoming_conn_status_count, cs.num_pending_queries,
cs.report_count)); cs.num_events_incoming, cs.num_events_outgoing,
cs.num_logs_incoming, cs.num_logs_outgoing,
for ( const auto& s : cs.print_count ) cs.num_ids_incoming, cs.num_ids_outgoing
file->Write(fmt(" %-25s prints dequeued=%zu\n", s.first.data(), s.second)); ));
for ( const auto& s : cs.event_count )
file->Write(fmt(" %-25s events dequeued=%zu\n", s.first.data(), s.second));
for ( const auto& s : cs.log_count )
file->Write(fmt(" %-25s logs dequeued=%zu\n", s.first.data(), s.second));
#endif
// Script-level state. // Script-level state.
unsigned int size, mem = 0; unsigned int size, mem = 0;

View file

@ -546,8 +546,8 @@ static BroStmtTag get_last_stmt_tag(const Stmt* stmt)
return get_last_stmt_tag(stmts->Stmts()[len - 1]); return get_last_stmt_tag(stmts->Stmts()[len - 1]);
} }
Case::Case(ListExpr* c, Stmt* arg_s) Case::Case(ListExpr* arg_expr_cases, id_list* arg_type_cases, Stmt* arg_s)
: cases(c), s(arg_s) : expr_cases(arg_expr_cases), type_cases(arg_type_cases), s(arg_s)
{ {
BroStmtTag t = get_last_stmt_tag(Body()); BroStmtTag t = get_last_stmt_tag(Body());
@ -557,13 +557,18 @@ Case::Case(ListExpr* c, Stmt* arg_s)
Case::~Case() Case::~Case()
{ {
Unref(cases); Unref(expr_cases);
Unref(s); Unref(s);
loop_over_list((*type_cases), i)
Unref((*type_cases)[i]);
delete type_cases;
} }
void Case::Describe(ODesc* d) const void Case::Describe(ODesc* d) const
{ {
if ( ! Cases() ) if ( ! (expr_cases || type_cases) )
{ {
if ( ! d->IsBinary() ) if ( ! d->IsBinary() )
d->Add("default:"); d->Add("default:");
@ -578,20 +583,49 @@ void Case::Describe(ODesc* d) const
return; return;
} }
const expr_list& e = Cases()->Exprs();
if ( ! d->IsBinary() ) if ( ! d->IsBinary() )
d->Add("case"); d->Add("case");
if ( expr_cases )
{
const expr_list& e = expr_cases->Exprs();
d->AddCount(e.length()); d->AddCount(e.length());
loop_over_list(e, j) loop_over_list(e, i)
{ {
if ( j > 0 && ! d->IsReadable() ) if ( i > 0 && d->IsReadable() )
d->Add(","); d->Add(",");
d->SP(); d->SP();
e[j]->Describe(d); e[i]->Describe(d);
}
}
if ( type_cases )
{
const id_list& t = *type_cases;
d->AddCount(t.length());
loop_over_list(t, i)
{
if ( i > 0 && d->IsReadable() )
d->Add(",");
d->SP();
d->Add("type");
d->SP();
t[i]->Type()->Describe(d);
if ( t[i]->Name() )
{
d->SP();
d->Add("as");
d->SP();
d->Add(t[i]->Name());
}
}
} }
if ( d->IsReadable() ) if ( d->IsReadable() )
@ -607,12 +641,17 @@ TraversalCode Case::Traverse(TraversalCallback* cb) const
{ {
TraversalCode tc; TraversalCode tc;
if ( cases ) if ( expr_cases )
{ {
tc = cases->Traverse(cb); tc = expr_cases->Traverse(cb);
HANDLE_TC_STMT_PRE(tc); HANDLE_TC_STMT_PRE(tc);
} }
if ( type_cases )
{
// No traverse support for types.
}
tc = s->Traverse(cb); tc = s->Traverse(cb);
HANDLE_TC_STMT_PRE(tc); HANDLE_TC_STMT_PRE(tc);
@ -634,17 +673,48 @@ IMPLEMENT_SERIAL(Case, SER_CASE);
bool Case::DoSerialize(SerialInfo* info) const bool Case::DoSerialize(SerialInfo* info) const
{ {
DO_SERIALIZE(SER_CASE, BroObj); DO_SERIALIZE(SER_CASE, BroObj);
return cases->Serialize(info) && this->s->Serialize(info);
if ( ! expr_cases->Serialize(info) )
return false;
id_list empty;
id_list* types = (type_cases ? type_cases : &empty);
if ( ! SERIALIZE(types->length()) )
return false;
loop_over_list((*types), i)
{
if ( ! (*types)[i]->Serialize(info) )
return false;
}
return this->s->Serialize(info);
} }
bool Case::DoUnserialize(UnserialInfo* info) bool Case::DoUnserialize(UnserialInfo* info)
{ {
DO_UNSERIALIZE(BroObj); DO_UNSERIALIZE(BroObj);
cases = (ListExpr*) Expr::Unserialize(info, EXPR_LIST); expr_cases = (ListExpr*) Expr::Unserialize(info, EXPR_LIST);
if ( ! cases ) if ( ! expr_cases )
return false; return false;
int len;
if ( ! UNSERIALIZE(&len) )
return false;
type_cases = new id_list;
while ( len-- )
{
ID* id = ID::Unserialize(info);
if ( ! id )
return false;
type_cases->append(id);
}
this->s = Stmt::Unserialize(info); this->s = Stmt::Unserialize(info);
return this->s != 0; return this->s != 0;
} }
@ -661,7 +731,7 @@ void SwitchStmt::Init()
comp_hash = new CompositeHash(t); comp_hash = new CompositeHash(t);
Unref(t); Unref(t);
case_label_map.SetDeleteFunc(int_del_func); case_label_value_map.SetDeleteFunc(int_del_func);
} }
SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) : SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
@ -669,16 +739,22 @@ SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
{ {
Init(); Init();
if ( ! is_atomic_type(e->Type()) ) bool have_exprs = false;
e->Error("switch expression must be of an atomic type"); bool have_types = false;
loop_over_list(*cases, i) loop_over_list(*cases, i)
{ {
Case* c = (*cases)[i]; Case* c = (*cases)[i];
ListExpr* le = c->Cases(); ListExpr* le = c->ExprCases();
id_list* tl = c->TypeCases();
if ( le ) if ( le )
{ {
have_exprs = true;
if ( ! is_atomic_type(e->Type()) )
e->Error("switch expression must be of an atomic type when cases are expressions");
if ( ! le->Type()->AsTypeList()->AllMatch(e->Type(), false) ) if ( ! le->Type()->AsTypeList()->AllMatch(e->Type(), false) )
{ {
le->Error("case expression type differs from switch type", e); le->Error("case expression type differs from switch type", e);
@ -736,12 +812,34 @@ SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
exprs[j]->Error("case label expression isn't constant"); exprs[j]->Error("case label expression isn't constant");
else else
{ {
if ( ! AddCaseLabelMapping(exprs[j]->ExprVal(), i) ) if ( ! AddCaseLabelValueMapping(exprs[j]->ExprVal(), i) )
exprs[j]->Error("duplicate case label"); exprs[j]->Error("duplicate case label");
} }
} }
} }
else if ( tl )
{
have_types = true;
loop_over_list((*tl), j)
{
BroType* ct = (*tl)[j]->Type();
if ( ! can_cast_value_to_type(e->Type(), ct) )
{
c->Error("cannot cast switch expression to case type");
continue;
}
if ( ! AddCaseLabelTypeMapping((*tl)[j], i) )
{
c->Error("duplicate case label");
continue;
}
}
}
else else
{ {
if ( default_case_idx != -1 ) if ( default_case_idx != -1 )
@ -750,6 +848,10 @@ SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
default_case_idx = i; default_case_idx = i;
} }
} }
if ( have_exprs && have_types )
Error("cannot mix cases with expressions and types");
} }
SwitchStmt::~SwitchStmt() SwitchStmt::~SwitchStmt()
@ -761,7 +863,7 @@ SwitchStmt::~SwitchStmt()
delete comp_hash; delete comp_hash;
} }
bool SwitchStmt::AddCaseLabelMapping(const Val* v, int idx) bool SwitchStmt::AddCaseLabelValueMapping(const Val* v, int idx)
{ {
HashKey* hk = comp_hash->ComputeHash(v, 1); HashKey* hk = comp_hash->ComputeHash(v, 1);
@ -772,7 +874,7 @@ bool SwitchStmt::AddCaseLabelMapping(const Val* v, int idx)
type_name(v->Type()->Tag()), type_name(e->Type()->Tag())); type_name(v->Type()->Tag()), type_name(e->Type()->Tag()));
} }
int* label_idx = case_label_map.Lookup(hk); int* label_idx = case_label_value_map.Lookup(hk);
if ( label_idx ) if ( label_idx )
{ {
@ -780,12 +882,32 @@ bool SwitchStmt::AddCaseLabelMapping(const Val* v, int idx)
return false; return false;
} }
case_label_map.Insert(hk, new int(idx)); case_label_value_map.Insert(hk, new int(idx));
delete hk; delete hk;
return true; return true;
} }
int SwitchStmt::FindCaseLabelMatch(const Val* v) const bool SwitchStmt::AddCaseLabelTypeMapping(ID* t, int idx)
{
for ( auto i : case_label_type_list )
{
if ( same_type(i.first->Type(), t->Type()) )
return false;
}
auto e = std::make_pair(t, idx);
case_label_type_list.push_back(e);
return true;
}
std::pair<int, ID*> SwitchStmt::FindCaseLabelMatch(const Val* v) const
{
int label_idx = -1;
ID* label_id = 0;
// Find matching expression cases.
if ( case_label_value_map.Length() )
{ {
HashKey* hk = comp_hash->ComputeHash(v, 1); HashKey* hk = comp_hash->ComputeHash(v, 1);
@ -794,24 +916,42 @@ int SwitchStmt::FindCaseLabelMatch(const Val* v) const
reporter->PushLocation(e->GetLocationInfo()); reporter->PushLocation(e->GetLocationInfo());
reporter->Error("switch expression type mismatch (%s/%s)", reporter->Error("switch expression type mismatch (%s/%s)",
type_name(v->Type()->Tag()), type_name(e->Type()->Tag())); type_name(v->Type()->Tag()), type_name(e->Type()->Tag()));
return -1; return std::make_pair(-1, nullptr);
} }
int* label_idx = case_label_map.Lookup(hk); if ( auto i = case_label_value_map.Lookup(hk) )
label_idx = *i;
delete hk; delete hk;
}
if ( ! label_idx ) // Find matching type cases.
return default_case_idx; for ( auto i : case_label_type_list )
{
auto id = i.first;
auto type = id->Type();
if ( can_cast_value_to_type(v, type) )
{
label_idx = i.second;
label_id = id;
break;
}
}
if ( label_idx < 0 )
return std::make_pair(default_case_idx, nullptr);
else else
return *label_idx; return std::make_pair(label_idx, label_id);
} }
Val* SwitchStmt::DoExec(Frame* f, Val* v, stmt_flow_type& flow) const Val* SwitchStmt::DoExec(Frame* f, Val* v, stmt_flow_type& flow) const
{ {
Val* rval = 0; Val* rval = 0;
int matching_label_idx = FindCaseLabelMatch(v); auto m = FindCaseLabelMatch(v);
int matching_label_idx = m.first;
ID* matching_id = m.second;
if ( matching_label_idx == -1 ) if ( matching_label_idx == -1 )
return 0; return 0;
@ -820,6 +960,12 @@ Val* SwitchStmt::DoExec(Frame* f, Val* v, stmt_flow_type& flow) const
{ {
const Case* c = (*cases)[i]; const Case* c = (*cases)[i];
if ( matching_id )
{
auto cv = cast_value_to_type(v, matching_id->Type());
f->SetElement(matching_id->Offset(), cv);
}
flow = FLOW_NEXT; flow = FLOW_NEXT;
rval = c->Body()->Exec(f, flow); rval = c->Body()->Exec(f, flow);
@ -841,7 +987,7 @@ int SwitchStmt::IsPure() const
loop_over_list(*cases, i) loop_over_list(*cases, i)
{ {
Case* c = (*cases)[i]; Case* c = (*cases)[i];
if ( ! c->Cases()->IsPure() || ! c->Body()->IsPure() ) if ( ! c->ExprCases()->IsPure() || ! c->Body()->IsPure() )
return 0; return 0;
} }
@ -928,7 +1074,7 @@ bool SwitchStmt::DoUnserialize(UnserialInfo* info)
loop_over_list(*cases, i) loop_over_list(*cases, i)
{ {
const ListExpr* le = (*cases)[i]->Cases(); const ListExpr* le = (*cases)[i]->ExprCases();
if ( ! le ) if ( ! le )
continue; continue;
@ -937,7 +1083,7 @@ bool SwitchStmt::DoUnserialize(UnserialInfo* info)
loop_over_list(exprs, j) loop_over_list(exprs, j)
{ {
if ( ! AddCaseLabelMapping(exprs[j]->ExprVal(), i) ) if ( ! AddCaseLabelValueMapping(exprs[j]->ExprVal(), i) )
return false; return false;
} }
} }

View file

@ -183,11 +183,14 @@ protected:
class Case : public BroObj { class Case : public BroObj {
public: public:
Case(ListExpr* c, Stmt* arg_s); Case(ListExpr* c, id_list* types, Stmt* arg_s);
~Case() override; ~Case() override;
const ListExpr* Cases() const { return cases; } const ListExpr* ExprCases() const { return expr_cases; }
ListExpr* Cases() { return cases; } ListExpr* ExprCases() { return expr_cases; }
const id_list* TypeCases() const { return type_cases; }
id_list* TypeCases() { return type_cases; }
const Stmt* Body() const { return s; } const Stmt* Body() const { return s; }
Stmt* Body() { return s; } Stmt* Body() { return s; }
@ -201,11 +204,12 @@ public:
protected: protected:
friend class Stmt; friend class Stmt;
Case() { cases = 0; s = 0; } Case() { expr_cases = 0; type_cases = 0; s = 0; }
DECLARE_SERIAL(Case); DECLARE_SERIAL(Case);
ListExpr* cases; ListExpr* expr_cases;
id_list* type_cases;
Stmt* s; Stmt* s;
}; };
@ -232,20 +236,27 @@ protected:
// Initialize composite hash and case label map. // Initialize composite hash and case label map.
void Init(); void Init();
// Adds an entry in case_label_map for the given value to associate it // Adds an entry in case_label_value_map for the given value to associate it
// with the given index in the cases list. If the entry already exists, // with the given index in the cases list. If the entry already exists,
// returns false, else returns true. // returns false, else returns true.
bool AddCaseLabelMapping(const Val* v, int idx); bool AddCaseLabelValueMapping(const Val* v, int idx);
// Returns index of a case label that's equal to the value, or // Adds an entry in case_label_type_map for the given type (w/ ID) to
// default_case_idx if no case label matches (which may be -1 if there's // associate it with the given index in the cases list. If an entry
// no default label). // for the type already exists, returns false; else returns true.
int FindCaseLabelMatch(const Val* v) const; bool AddCaseLabelTypeMapping(ID* t, int idx);
// Returns index of a case label that matches the value, or
// default_case_idx if no case label matches (which may be -1 if
// there's no default label). The second tuple element is the ID of
// the matching type-based case if it defines one.
std::pair<int, ID*> FindCaseLabelMatch(const Val* v) const;
case_list* cases; case_list* cases;
int default_case_idx; int default_case_idx;
CompositeHash* comp_hash; CompositeHash* comp_hash;
PDict(int) case_label_map; PDict(int) case_label_value_map;
std::vector<std::pair<ID*, int>> case_label_type_list;
}; };
class AddStmt : public ExprStmt { class AddStmt : public ExprStmt {

View file

@ -6,6 +6,7 @@
#include "Timer.h" #include "Timer.h"
#include "Desc.h" #include "Desc.h"
#include "Serializer.h" #include "Serializer.h"
#include "broker/Manager.h"
// Names of timers in same order than in TimerType. // Names of timers in same order than in TimerType.
const char* TimerNames[] = { const char* TimerNames[] = {
@ -103,6 +104,7 @@ int TimerMgr::Advance(double arg_t, int max_expire)
last_timestamp = 0; last_timestamp = 0;
num_expired = 0; num_expired = 0;
last_advance = timer_mgr->Time(); last_advance = timer_mgr->Time();
broker_mgr->AdvanceTime(arg_t);
return DoAdvance(t, max_expire); return DoAdvance(t, max_expire);
} }

Some files were not shown because too many files have changed in this diff Show more