mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
Merge topic/actor-system throug a squashed commit.
This commit is contained in:
parent
7a6f5020f6
commit
fe7e1ee7f0
466 changed files with 12559 additions and 9655 deletions
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -4,9 +4,6 @@
|
|||
[submodule "aux/binpac"]
|
||||
path = aux/binpac
|
||||
url = git://git.bro.org/binpac
|
||||
[submodule "aux/broccoli"]
|
||||
path = aux/broccoli
|
||||
url = git://git.bro.org/broccoli
|
||||
[submodule "aux/broctl"]
|
||||
path = aux/broctl
|
||||
url = git://git.bro.org/broctl
|
||||
|
|
|
@ -2,7 +2,7 @@ project(Bro C CXX)
|
|||
|
||||
# When changing the minimum version here, also adapt
|
||||
# aux/bro-aux/plugin-support/skeleton/CMakeLists.txt
|
||||
cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
|
||||
cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR)
|
||||
|
||||
include(cmake/CommonCMakeConfig.cmake)
|
||||
|
||||
|
@ -95,6 +95,13 @@ FindRequiredPackage(OpenSSL)
|
|||
FindRequiredPackage(BIND)
|
||||
FindRequiredPackage(ZLIB)
|
||||
|
||||
find_package(CAF COMPONENTS core io)
|
||||
if (CAF_FOUND)
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIRS})
|
||||
else ()
|
||||
list(APPEND MISSING_PREREQ_DESCS CAF)
|
||||
endif ()
|
||||
|
||||
if (NOT BinPAC_ROOT_DIR AND
|
||||
EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/aux/binpac/CMakeLists.txt)
|
||||
add_subdirectory(aux/binpac)
|
||||
|
@ -105,6 +112,12 @@ if (ENABLE_JEMALLOC)
|
|||
find_package(JeMalloc)
|
||||
endif ()
|
||||
|
||||
if ( BISON_VERSION AND BISON_VERSION VERSION_LESS 2.5 )
|
||||
set(MISSING_PREREQS true)
|
||||
list(APPEND MISSING_PREREQ_DESCS
|
||||
" Could not find prerequisite package Bison >= 2.5, found: ${BISON_VERSION}")
|
||||
endif ()
|
||||
|
||||
if (MISSING_PREREQS)
|
||||
foreach (prereq ${MISSING_PREREQ_DESCS})
|
||||
message(SEND_ERROR ${prereq})
|
||||
|
@ -219,12 +232,13 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
|||
########################################################################
|
||||
## Recurse on sub-directories
|
||||
|
||||
if ( ENABLE_BROKER )
|
||||
add_subdirectory(aux/broker)
|
||||
set(brodeps ${brodeps} broker)
|
||||
add_definitions(-DENABLE_BROKER)
|
||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker)
|
||||
endif ()
|
||||
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker
|
||||
${CMAKE_CURRENT_BINARY_DIR}/aux/broker)
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIR_CORE})
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIR_IO})
|
||||
include_directories(BEFORE ${CAF_INCLUDE_DIR_OPENSSL})
|
||||
|
||||
add_subdirectory(src)
|
||||
add_subdirectory(scripts)
|
||||
|
@ -235,7 +249,6 @@ include(CheckOptionalBuildSources)
|
|||
|
||||
CheckOptionalBuildSources(aux/broctl Broctl INSTALL_BROCTL)
|
||||
CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS)
|
||||
CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI)
|
||||
|
||||
########################################################################
|
||||
## Packaging Setup
|
||||
|
@ -275,9 +288,6 @@ message(
|
|||
"\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}"
|
||||
"\nCPP: ${CMAKE_CXX_COMPILER}"
|
||||
"\n"
|
||||
"\nBroker: ${ENABLE_BROKER}"
|
||||
"\nBroker Python: ${BROKER_PYTHON_BINDINGS}"
|
||||
"\nBroccoli: ${INSTALL_BROCCOLI}"
|
||||
"\nBroctl: ${INSTALL_BROCTL}"
|
||||
"\nAux. Tools: ${INSTALL_AUX_TOOLS}"
|
||||
"\n"
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 6484fc12712f6ce97138fec8ca413ec8df12766c
|
||||
Subproject commit a6353cfbf937124d327d3064f09913862d3aff5c
|
46
configure
vendored
46
configure
vendored
|
@ -50,15 +50,10 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
|
|||
(automatically on when perftools is present on Linux)
|
||||
--enable-perftools-debug use Google's perftools for debugging
|
||||
--enable-jemalloc link against jemalloc
|
||||
--enable-ruby build ruby bindings for broccoli (deprecated)
|
||||
--enable-broker enable use of the Broker communication library
|
||||
(requires C++ Actor Framework)
|
||||
--disable-broccoli don't build or install the Broccoli library
|
||||
--disable-broctl don't install Broctl
|
||||
--disable-auxtools don't build or install auxiliary tools
|
||||
--disable-perftools don't try to build with Google Perftools
|
||||
--disable-python don't try to build python bindings for broccoli
|
||||
--disable-pybroker don't try to build python bindings for broker
|
||||
--disable-python don't try to build python bindings for broker
|
||||
|
||||
Required Packages in Non-Standard Locations:
|
||||
--with-openssl=PATH path to OpenSSL install root
|
||||
|
@ -71,18 +66,15 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
|
|||
--with-flex=PATH path to flex executable
|
||||
--with-bison=PATH path to bison executable
|
||||
--with-python=PATH path to Python executable
|
||||
|
||||
Optional Packages in Non-Standard Locations:
|
||||
--with-caf=PATH path to C++ Actor Framework installation
|
||||
(a required Broker dependency)
|
||||
|
||||
Optional Packages in Non-Standard Locations:
|
||||
--with-geoip=PATH path to the libGeoIP install root
|
||||
--with-perftools=PATH path to Google Perftools install root
|
||||
--with-jemalloc=PATH path to jemalloc install root
|
||||
--with-python-lib=PATH path to libpython
|
||||
--with-python-inc=PATH path to Python headers
|
||||
--with-ruby=PATH path to ruby interpreter
|
||||
--with-ruby-lib=PATH path to ruby library
|
||||
--with-ruby-inc=PATH path to ruby headers
|
||||
--with-swig=PATH path to SWIG executable
|
||||
--with-rocksdb=PATH path to RocksDB installation
|
||||
(an optional Broker dependency)
|
||||
|
@ -135,21 +127,17 @@ append_cache_entry BRO_ROOT_DIR PATH $prefix
|
|||
append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl
|
||||
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
|
||||
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
|
||||
append_cache_entry BROKER_PYTHON_BINDINGS BOOL false
|
||||
append_cache_entry ENABLE_DEBUG BOOL false
|
||||
append_cache_entry ENABLE_PERFTOOLS BOOL false
|
||||
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
|
||||
append_cache_entry ENABLE_JEMALLOC BOOL false
|
||||
append_cache_entry ENABLE_BROKER BOOL false
|
||||
append_cache_entry BinPAC_SKIP_INSTALL BOOL true
|
||||
append_cache_entry BUILD_SHARED_LIBS BOOL true
|
||||
append_cache_entry INSTALL_AUX_TOOLS BOOL true
|
||||
append_cache_entry INSTALL_BROCCOLI BOOL true
|
||||
append_cache_entry INSTALL_BROCTL BOOL true
|
||||
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
|
||||
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
|
||||
append_cache_entry DISABLE_PERFTOOLS BOOL false
|
||||
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true
|
||||
|
||||
# parse arguments
|
||||
while [ $# -ne 0 ]; do
|
||||
|
@ -221,14 +209,6 @@ while [ $# -ne 0 ]; do
|
|||
--enable-jemalloc)
|
||||
append_cache_entry ENABLE_JEMALLOC BOOL true
|
||||
;;
|
||||
--enable-broker)
|
||||
append_cache_entry ENABLE_BROKER BOOL true
|
||||
;;
|
||||
--disable-broker)
|
||||
;;
|
||||
--disable-broccoli)
|
||||
append_cache_entry INSTALL_BROCCOLI BOOL false
|
||||
;;
|
||||
--disable-broctl)
|
||||
append_cache_entry INSTALL_BROCTL BOOL false
|
||||
;;
|
||||
|
@ -241,12 +221,6 @@ while [ $# -ne 0 ]; do
|
|||
--disable-python)
|
||||
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
|
||||
;;
|
||||
--disable-pybroker)
|
||||
append_cache_entry DISABLE_PYBROKER BOOL true
|
||||
;;
|
||||
--enable-ruby)
|
||||
append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
|
||||
;;
|
||||
--with-openssl=*)
|
||||
append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
|
@ -288,26 +262,12 @@ while [ $# -ne 0 ]; do
|
|||
append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg
|
||||
append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg
|
||||
;;
|
||||
--with-ruby=*)
|
||||
append_cache_entry RUBY_EXECUTABLE PATH $optarg
|
||||
;;
|
||||
--with-ruby-lib=*)
|
||||
append_cache_entry RUBY_LIBRARY PATH $optarg
|
||||
;;
|
||||
--with-ruby-inc=*)
|
||||
append_cache_entry RUBY_INCLUDE_DIRS PATH $optarg
|
||||
append_cache_entry RUBY_INCLUDE_PATH PATH $optarg
|
||||
;;
|
||||
--with-swig=*)
|
||||
append_cache_entry SWIG_EXECUTABLE PATH $optarg
|
||||
;;
|
||||
--with-caf=*)
|
||||
append_cache_entry CAF_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
--with-libcaf=*)
|
||||
echo "warning: --with-libcaf deprecated, use --with-caf instead"
|
||||
append_cache_entry CAF_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
--with-rocksdb=*)
|
||||
append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg
|
||||
;;
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
set(BROCCOLI_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html)
|
||||
set(BROCCOLI_DOCS_DST ${CMAKE_BINARY_DIR}/html/broccoli-api)
|
||||
set(SPHINX_INPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_input)
|
||||
set(SPHINX_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_output)
|
||||
set(BROXYGEN_SCRIPT_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/broxygen_script_output)
|
||||
set(BROXYGEN_CACHE_DIR ${CMAKE_CURRENT_BINARY_DIR}/broxygen_cache)
|
||||
set(BROKER_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broker/doc/html)
|
||||
set(BROKER_DOCS_DST ${CMAKE_BINARY_DIR}/html/broker-manual)
|
||||
|
||||
# Find out what BROPATH to use when executing bro.
|
||||
execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev
|
||||
|
@ -61,10 +61,9 @@ add_custom_target(sphinxdoc
|
|||
COMMAND "${CMAKE_COMMAND}" -E create_symlink
|
||||
${SPHINX_OUTPUT_DIR}/html
|
||||
${CMAKE_BINARY_DIR}/html
|
||||
# Copy Broccoli API reference into output dir if it exists.
|
||||
COMMAND test -d ${BROCCOLI_DOCS_SRC} &&
|
||||
( rm -rf ${BROCCOLI_DOCS_DST} &&
|
||||
cp -r ${BROCCOLI_DOCS_SRC} ${BROCCOLI_DOCS_DST} ) || true
|
||||
# Copy Broker manual into output dir.
|
||||
COMMAND rm -rf ${BROKER_DOCS_DST} &&
|
||||
cp -r ${BROKER_DOCS_SRC} ${BROKER_DOCS_DST}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "[Sphinx] Generate HTML documentation in ${CMAKE_BINARY_DIR}/html")
|
||||
|
||||
|
@ -77,7 +76,10 @@ add_custom_target(sphinxdoc_clean
|
|||
COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_CACHE_DIR}
|
||||
VERBATIM)
|
||||
|
||||
if (NOT TARGET doc)
|
||||
add_custom_target(doc)
|
||||
endif ()
|
||||
|
||||
add_custom_target(docclean)
|
||||
add_dependencies(doc sphinxdoc)
|
||||
add_dependencies(docclean sphinxdoc_clean)
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/bindings/broccoli-python/README
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/bindings/broccoli-ruby/README
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/README
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broccoli/doc/broccoli-manual.rst
|
|
@ -1 +0,0 @@
|
|||
../../../aux/broker/broker-manual.rst
|
|
@ -13,12 +13,7 @@ current, independent component releases.
|
|||
:maxdepth: 1
|
||||
|
||||
BinPAC - A protocol parser generator <binpac/README>
|
||||
Broccoli - The Bro Client Communication Library (README) <broccoli/README>
|
||||
Broccoli - User Manual <broccoli/broccoli-manual>
|
||||
Broccoli Python Bindings <broccoli-python/README>
|
||||
Broccoli Ruby Bindings <broccoli-ruby/README>
|
||||
Broker - Bro's (New) Messaging Library (README) <broker/README>
|
||||
Broker - User Manual <broker/broker-manual.rst>
|
||||
Broker - Bro's (New) Messaging Library <broker/README>
|
||||
BroControl - Interactive Bro management shell <broctl/README>
|
||||
Bro-Aux - Small auxiliary tools for Bro <bro-aux/README>
|
||||
BTest - A unit testing framework <btest/README>
|
||||
|
@ -26,5 +21,5 @@ current, independent component releases.
|
|||
PySubnetTree - Python module for CIDR lookups<pysubnettree/README>
|
||||
trace-summary - Script for generating break-downs of network traffic <trace-summary/README>
|
||||
|
||||
The `Broccoli API Reference <../broccoli-api/index.html>`_ may also be of
|
||||
The `Broker User Manual <../broker-manual/index.html>`_ may also be of
|
||||
interest.
|
||||
|
|
|
@ -259,8 +259,13 @@ class BroDomain(Domain):
|
|||
}
|
||||
|
||||
def clear_doc(self, docname):
|
||||
to_delete = []
|
||||
|
||||
for (typ, name), doc in self.data['objects'].items():
|
||||
if doc == docname:
|
||||
to_delete.append((typ, name))
|
||||
|
||||
for (typ, name) in to_delete:
|
||||
del self.data['objects'][typ, name]
|
||||
|
||||
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
|
||||
|
|
|
@ -1,174 +1,344 @@
|
|||
.. _CAF: https://github.com/actor-framework/actor-framework
|
||||
|
||||
.. _brokercomm-framework:
|
||||
|
||||
======================================
|
||||
Broker-Enabled Communication Framework
|
||||
======================================
|
||||
==============================================
|
||||
Broker-Enabled Communication/Cluster Framework
|
||||
==============================================
|
||||
|
||||
.. rst-class:: opening
|
||||
|
||||
Bro can now use the `Broker Library
|
||||
Bro now uses the `Broker Library
|
||||
<../components/broker/README.html>`_ to exchange information with
|
||||
other Bro processes.
|
||||
other Bro processes. Broker itself uses CAF_ (C++ Actor Framework)
|
||||
internally for connecting nodes and exchanging arbitrary data over
|
||||
networks. Broker then introduces, on top of CAF, a topic-based
|
||||
publish/subscribe communication pattern using a data model that is
|
||||
compatible to Bro's. Broker itself can be utilized outside the
|
||||
context of Bro, with Bro itself making use of only a few predefined
|
||||
Broker message formats that represent Bro events, log entries, etc.
|
||||
|
||||
In summary, the Bro's Broker framework provides basic facilities for
|
||||
connecting broker-enabled peers (e.g. Bro instances) to each other
|
||||
and exchanging messages (e.g. events and logs). With this comes
|
||||
changes in how clusters operate and, since Broker significantly
|
||||
differs from the previous communication framework, there are several
|
||||
changes in the set of scripts that Bro ships with that may break
|
||||
your own customizations. This document aims to describe the changes
|
||||
that have been made, making it easier to port your own scripts. It
|
||||
also gives examples of Broker and the new cluster framework that
|
||||
show off all the new features and capabilities.
|
||||
|
||||
.. contents::
|
||||
|
||||
Porting Guide
|
||||
=============
|
||||
|
||||
Review and use the points below as a guide to port your own scripts
|
||||
to the latest version of Bro, which uses the new cluster and Broker
|
||||
communication framework.
|
||||
|
||||
General Porting Tips
|
||||
--------------------
|
||||
|
||||
- ``@load policy/frameworks/communication/listen`` and
|
||||
``@load base/frameworks/communication`` indicates use of the
|
||||
old communication framework, consider porting to
|
||||
``@load base/frameworks/broker`` and using the Broker API:
|
||||
:doc:`/scripts/base/frameworks/broker/main.bro`
|
||||
|
||||
- The ``&synchronized`` and ``&persistent`` attributes are deprecated,
|
||||
consider using `Data Stores`_ instead.
|
||||
|
||||
- Instead of using e.g. ``Cluster::manager2worker_events`` (and all
|
||||
permutations for every node type), what you'd now use is either
|
||||
:bro:see:`Broker::publish` or :bro:see:`Broker::auto_publish` with
|
||||
either the topic associated with a specific node or class of nodes,
|
||||
like :bro:see:`Cluster::node_topic` or
|
||||
:bro:see:`Cluster::worker_topic`.
|
||||
|
||||
- Instead of using the ``send_id`` BIF, use :bro:see:`Broker::publish_id`.
|
||||
|
||||
- Use :bro:see:`terminate` instead of :bro:see:`terminate_communication`.
|
||||
The later refers to the old communication system and no longer effects
|
||||
the new Broker-based system.
|
||||
|
||||
- For replacing :bro:see:`remote_connection_established` and
|
||||
:bro:see:`remote_connection_closed`, consider :bro:see:`Broker::peer_added`
|
||||
or :bro:see:`Broker::peer_lost`. There's also :bro:see:`Cluster::node_up`
|
||||
and :bro:see:`Cluster::node_down`.
|
||||
|
||||
Notable / Specific Script API Changes
|
||||
-------------------------------------
|
||||
|
||||
- :bro:see:`Software::tracked` is now partitioned among proxy nodes
|
||||
instead of synchronized in its entirety to all nodes.
|
||||
|
||||
- ``Known::known_devices`` is renamed to :bro:see:`Known::device_store`
|
||||
and implemented via the new Broker data store interface.
|
||||
Also use :bro:see:`Known::device_found` instead of updating the
|
||||
store directly directly.
|
||||
|
||||
- ``Known::known_hosts`` is renamed to :bro:see:`Known::host_store` and
|
||||
implemented via the new Broker data store interface.
|
||||
|
||||
- ``Known::known_services`` is renamed to :bro:see:`Known::service_store`
|
||||
and implemented via the new Broker data store interface.
|
||||
|
||||
- ``Known::certs`` is renamed to :bro:see:`Known::cert_store`
|
||||
and implemented via the new Broker data store interface.
|
||||
|
||||
New Cluster Layout / API
|
||||
========================
|
||||
|
||||
Layout / Topology
|
||||
-----------------
|
||||
|
||||
The cluster topology has changed.
|
||||
|
||||
- Proxy nodes no longer connect with each other.
|
||||
|
||||
- Each worker node connects to all proxies.
|
||||
|
||||
- All node types connect to all logger nodes and the manager node.
|
||||
|
||||
This looks like:
|
||||
|
||||
.. figure:: broker/cluster-layout.png
|
||||
|
||||
Some general suggestions as to the purpose/utilization of each node type:
|
||||
|
||||
- Workers: are a good first choice for doing the brunt of any work you need
|
||||
done. They should be spending a lot of time performing the actual job
|
||||
of parsing/analyzing incoming data from packets, so you might choose
|
||||
to look at them as doing a "first pass" analysis and then deciding how
|
||||
the results should be shared with other nodes in the cluster.
|
||||
|
||||
- Proxies: serve as intermediaries for data storage and work/calculation
|
||||
offloading. Good for helping offload work or data in a scalable and
|
||||
distributed way. i.e. since any given worker is connected to all
|
||||
proxies and can agree on an "arbitrary key -> proxy node" mapping
|
||||
(more on that later), you can partition work or data amongst them in a
|
||||
uniform manner. e.g. you might choose to use proxies as a method of
|
||||
sharing non-persistent state or as a "second pass" analysis for any
|
||||
work that you don't want interferring with the workers' capacity to
|
||||
keep up with capturing and parsing packets.
|
||||
|
||||
- Manager: this node will be good at performing decisions that require a
|
||||
global view of things since it is in a centralized location, connected
|
||||
to everything. However, that also makes it easy to overload, so try
|
||||
to use it sparingly and only for tasks that must be done in a
|
||||
centralized or authoritative location. Optionally, for some
|
||||
deployments, the Manager can also serve as the sole Logger.
|
||||
|
||||
- Loggers: these nodes should simply be spending their time writing out
|
||||
logs to disk and not used for much else. In the default cluster
|
||||
configuration, logs get distributed among available loggers in a
|
||||
round-robin fashion, providing failover capability should any given
|
||||
logger temporarily go offline.
|
||||
|
||||
Data Management/Sharing Strategies
|
||||
==================================
|
||||
|
||||
There's maybe no single, best approach or pattern to use when you need a
|
||||
Bro script to store or share long-term state and data. The two
|
||||
approaches that were previously used were either using ``&synchronized``
|
||||
attribute on tables/sets or by explicitly sending events to specific
|
||||
nodes on which you wanted data to be stored. The former is no longer
|
||||
possible, though there are several new possibilities that the new
|
||||
Broker/Cluster framework offer, namely distributed data store and data
|
||||
partitioning APIs.
|
||||
|
||||
Data Stores
|
||||
-----------
|
||||
|
||||
Broker provides a distributed key-value store interface with optional
|
||||
choice of using a persistent backend. For more detail, see
|
||||
:ref:`this example <data_store_example>`.
|
||||
|
||||
Some ideas/considerations/scenarios when deciding whether to use
|
||||
a data store for your use-case:
|
||||
|
||||
* If you need the full data set locally in order to achieve low-latency
|
||||
queries using data store "clones" can provide that.
|
||||
|
||||
* If you need data that persists across restarts of Bro processes, then
|
||||
data stores can also provide that.
|
||||
|
||||
* If the data you want to store is complex (tables, sets, records) or
|
||||
you expect to read, modify, and store back, then data stores may not
|
||||
be able to provide simple, race-free methods of performing the pattern
|
||||
of logic that you want.
|
||||
|
||||
* If the data set you want to store is excessively large, that's still
|
||||
problematic even for stores that use a persistent backend as they are
|
||||
implemented in a way that requires a full snapshot of the store's
|
||||
contents to fit in memory (this limitation may change in the future).
|
||||
|
||||
Data Partitioning
|
||||
-----------------
|
||||
|
||||
New data partitioning strategies are available using the API in
|
||||
:doc:`/scripts/base/frameworks/cluster/pools.bro`.
|
||||
|
||||
One example strategy is to use Highest Random Weight (HRW) hashing to
|
||||
partition data tables amongst proxy nodes. e.g. using
|
||||
:bro:see:`Cluster::publish_hrw`. This could allow clusters to
|
||||
be scaled more easily than the approach of "the entire data set gets
|
||||
synchronized to all nodes" as the solution to memory limitations becomes
|
||||
"just add another proxy node". It may also take away some of the
|
||||
messaging load that used to be required to synchronize data sets across
|
||||
all nodes.
|
||||
|
||||
The tradeoff of this approach, is that nodes that leave the pool (due to
|
||||
crashing, etc.) cause a temporary gap in the total data set until
|
||||
workers start hashing keys to a new proxy node that is still alive,
|
||||
causing data to now be located and updated there.
|
||||
|
||||
Broker Framework Examples
|
||||
=========================
|
||||
|
||||
The broker framework provides basic facilities for connecting Bro instances
|
||||
to eachother and exchanging messages, like events or logs.
|
||||
|
||||
See :doc:`/scripts/base/frameworks/broker/main.bro` for an overview
|
||||
of the main Broker API.
|
||||
|
||||
.. _broker_topic_naming:
|
||||
|
||||
Topic Naming Conventions
|
||||
------------------------
|
||||
|
||||
All Broker-based messaging involves two components: the information you
|
||||
want to send (e.g. an event w/ its arguments) along with an associated
|
||||
topic name string. The topic strings are used as a filtering mechanism:
|
||||
Broker uses a publish/subscribe communication pattern where peers
|
||||
advertise interest in topic **prefixes** and only receive messages which
|
||||
match one of their prefix subscriptions.
|
||||
|
||||
Broker itself supports arbitrary topic strings, however Bro generally
|
||||
follows certain conventions in choosing these topics to help avoid
|
||||
conflicts and generally make them easier to remember.
|
||||
|
||||
As a reminder of how topic subscriptions work, subscribers advertise
|
||||
interest in a topic **prefix** and then receive any messages publish by a
|
||||
peer to a topic name that starts with that prefix. E.g. Alice
|
||||
subscribes to the "alice/dogs" prefix, then would receive the following
|
||||
message topics published by Bob:
|
||||
|
||||
- topic "alice/dogs/corgi"
|
||||
- topic "alice/dogs"
|
||||
- topic "alice/dogsarecool/oratleastilikethem"
|
||||
|
||||
Alice would **not** receive the following message topics published by Bob:
|
||||
|
||||
- topic "alice/cats/siamese"
|
||||
- topic "alice/cats"
|
||||
- topic "alice/dog"
|
||||
- topic "alice"
|
||||
|
||||
Note that the topics aren't required to form a slash-delimited hierarchy,
|
||||
the subscription matching is purely a byte-per-byte prefix comparison.
|
||||
|
||||
However, Bro scripts generally will follow a topic naming hierarchy and
|
||||
any given script will make the topic names it uses apparent via some
|
||||
redef'able constant in its export section. Generally topics that Bro
|
||||
scripts use will be along the lines of "bro/<namespace>/<specifics>"
|
||||
with "<namespace>" being the script's module name (in all-undercase).
|
||||
For example, you might expect an imaginary "Pretend" framework to
|
||||
publish/subscribe using topic names like "bro/pretend/my_cool_event".
|
||||
|
||||
For cluster operation, see :doc:`/scripts/base/frameworks/cluster/main.bro`
|
||||
for a list of topics that are useful for steering published events to
|
||||
the various node classes. E.g. you have the ability to broadcast to all
|
||||
directly-connected nodes, only those of a given class (e.g. just workers),
|
||||
or to a specific node within a class.
|
||||
|
||||
The topic names that logs get published under are a bit nuanced. In the
|
||||
default cluster configuration, they are round-robin published to
|
||||
explicit topic names that identify a single logger. In standalone Bro
|
||||
processes, logs get published to the topic indicated by
|
||||
:bro:see:`Broker::default_log_topic_prefix`.
|
||||
|
||||
For those writing their own scripts which need new topic names, a
|
||||
suggestion would be to avoid prefixing any new topics/prefixes with
|
||||
"bro/" as any changes in scripts shipping with Bro will use that prefix
|
||||
and it's better to not risk unintended conflicts.
|
||||
|
||||
Connecting to Peers
|
||||
===================
|
||||
-------------------
|
||||
|
||||
Communication via Broker must first be turned on via
|
||||
:bro:see:`Broker::enable`.
|
||||
|
||||
Bro can accept incoming connections by calling :bro:see:`Broker::listen`
|
||||
and then monitor connection status updates via the
|
||||
:bro:see:`Broker::incoming_connection_established` and
|
||||
:bro:see:`Broker::incoming_connection_broken` events.
|
||||
Bro can accept incoming connections by calling :bro:see:`Broker::listen`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
|
||||
|
||||
Bro can initiate outgoing connections by calling :bro:see:`Broker::connect`
|
||||
and then monitor connection status updates via the
|
||||
:bro:see:`Broker::outgoing_connection_established`,
|
||||
:bro:see:`Broker::outgoing_connection_broken`, and
|
||||
:bro:see:`Broker::outgoing_connection_incompatible` events.
|
||||
Bro can initiate outgoing connections by calling :bro:see:`Broker::peer`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
|
||||
|
||||
Remote Printing
|
||||
===============
|
||||
|
||||
To receive remote print messages, first use the
|
||||
:bro:see:`Broker::subscribe_to_prints` function to advertise to peers a
|
||||
topic prefix of interest and then create an event handler for
|
||||
:bro:see:`Broker::print_handler` to handle any print messages that are
|
||||
received.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro
|
||||
|
||||
To send remote print messages, just call :bro:see:`Broker::send_print`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro
|
||||
|
||||
Notice that the subscriber only used the prefix "bro/print/", but is
|
||||
able to receive messages with full topics of "bro/print/hi",
|
||||
"bro/print/stuff", and "bro/print/bye". The model here is that the
|
||||
publisher of a message checks for all subscribers who advertised
|
||||
interest in a prefix of that message's topic and sends it to them.
|
||||
|
||||
Message Format
|
||||
--------------
|
||||
|
||||
For other applications that want to exchange print messages with Bro,
|
||||
the Broker message format is simply:
|
||||
|
||||
.. code:: c++
|
||||
|
||||
broker::message{std::string{}};
|
||||
In either case, connection status updates are monitored via the
|
||||
:bro:see:`Broker::peer_added` and :bro:see:`Broker::peer_lost` events.
|
||||
|
||||
Remote Events
|
||||
=============
|
||||
-------------
|
||||
|
||||
Receiving remote events is similar to remote prints. Just use the
|
||||
:bro:see:`Broker::subscribe_to_events` function and possibly define any
|
||||
new events along with handlers that peers may want to send.
|
||||
To receive remote events, you need to first subscribe to a "topic" to which
|
||||
the events are being sent. A topic is just a string chosen by the sender,
|
||||
and named in a way that helps organize events into various categories.
|
||||
See the :ref:`topic naming conventions section <broker_topic_naming>` for
|
||||
more on how topics work and are chosen.
|
||||
|
||||
Use the :bro:see:`Broker::subscribe` function to subscribe to topics and
|
||||
define any event handlers for events that peers will send.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
|
||||
|
||||
There are two different ways to send events. The first is to call the
|
||||
:bro:see:`Broker::send_event` function directly. The second option is to call
|
||||
the :bro:see:`Broker::auto_event` function where you specify a
|
||||
particular event that will be automatically sent to peers whenever the
|
||||
event is called locally via the normal event invocation syntax.
|
||||
There are two different ways to send events.
|
||||
|
||||
The first is to call the :bro:see:`Broker::publish` function which you can
|
||||
supply directly with the event and its arguments or give it the return value of
|
||||
:bro:see:`Broker::make_event` in case you need to send the same event/args
|
||||
multiple times. When publishing events like this, local event handlers for
|
||||
the event are not called.
|
||||
|
||||
The second option is to call the :bro:see:`Broker::auto_publish` function where
|
||||
you specify a particular event that will be automatically sent to peers
|
||||
whenever the event is called locally via the normal event invocation syntax.
|
||||
When auto-publishing events, local event handler for the event are called
|
||||
in addition to sending the event to any subscribed peers.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-connector.bro
|
||||
|
||||
Again, the subscription model is prefix-based.
|
||||
|
||||
Message Format
|
||||
--------------
|
||||
|
||||
For other applications that want to exchange event messages with Bro,
|
||||
the Broker message format is:
|
||||
|
||||
.. code:: c++
|
||||
|
||||
broker::message{std::string{}, ...};
|
||||
|
||||
The first parameter is the name of the event and the remaining ``...``
|
||||
are its arguments, which are any of the supported Broker data types as
|
||||
they correspond to the Bro types for the event named in the first
|
||||
parameter of the message.
|
||||
Note that the subscription model is prefix-based, meaning that if you subscribe
|
||||
to the "bro/events" topic prefix you would receive events that are published
|
||||
to topic names "bro/events/foo" and "bro/events/bar" but not "bro/misc".
|
||||
|
||||
Remote Logging
|
||||
==============
|
||||
--------------
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
|
||||
|
||||
Use the :bro:see:`Broker::subscribe_to_logs` function to advertise interest
|
||||
in logs written by peers. The topic names that Bro uses are implicitly of the
|
||||
form "bro/log/<stream-name>".
|
||||
To toggle remote logs, redef :bro:see:`Log::enable_remote_logging`.
|
||||
Use the :bro:see:`Broker::subscribe` function to advertise interest
|
||||
in logs written by peers. The topic names that Bro uses are determined by
|
||||
:bro:see:`Broker::log_topic`.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
|
||||
|
||||
To send remote logs either redef :bro:see:`Log::enable_remote_logging` or
|
||||
use the :bro:see:`Broker::enable_remote_logs` function. The former
|
||||
allows any log stream to be sent to peers while the latter enables remote
|
||||
logging for particular streams.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-connector.bro
|
||||
|
||||
Message Format
|
||||
--------------
|
||||
Note that logging events are only raised locally on the node that performs
|
||||
the :bro:see:`Log::write` and not automatically published to peers.
|
||||
|
||||
For other applications that want to exchange log messages with Bro,
|
||||
the Broker message format is:
|
||||
|
||||
.. code:: c++
|
||||
|
||||
broker::message{broker::enum_value{}, broker::record{}};
|
||||
|
||||
The enum value corresponds to the stream's :bro:see:`Log::ID` value, and
|
||||
the record corresponds to a single entry of that log's columns record,
|
||||
in this case a ``Test::Info`` value.
|
||||
|
||||
Tuning Access Control
|
||||
=====================
|
||||
|
||||
By default, endpoints do not restrict the message topics that it sends
|
||||
to peers and do not restrict what message topics and data store
|
||||
identifiers get advertised to peers. These are the default
|
||||
:bro:see:`Broker::EndpointFlags` supplied to :bro:see:`Broker::enable`.
|
||||
|
||||
If not using the ``auto_publish`` flag, one can use the
|
||||
:bro:see:`Broker::publish_topic` and :bro:see:`Broker::unpublish_topic`
|
||||
functions to manipulate the set of message topics (must match exactly)
|
||||
that are allowed to be sent to peer endpoints. These settings take
|
||||
precedence over the per-message ``peers`` flag supplied to functions
|
||||
that take a :bro:see:`Broker::SendFlags` such as :bro:see:`Broker::send_print`,
|
||||
:bro:see:`Broker::send_event`, :bro:see:`Broker::auto_event` or
|
||||
:bro:see:`Broker::enable_remote_logs`.
|
||||
|
||||
If not using the ``auto_advertise`` flag, one can use the
|
||||
:bro:see:`Broker::advertise_topic` and
|
||||
:bro:see:`Broker::unadvertise_topic` functions
|
||||
to manipulate the set of topic prefixes that are allowed to be
|
||||
advertised to peers. If an endpoint does not advertise a topic prefix, then
|
||||
the only way peers can send messages to it is via the ``unsolicited``
|
||||
flag of :bro:see:`Broker::SendFlags` and choosing a topic with a matching
|
||||
prefix (i.e. full topic may be longer than receivers prefix, just the
|
||||
prefix needs to match).
|
||||
.. _data_store_example:
|
||||
|
||||
Distributed Data Stores
|
||||
=======================
|
||||
-----------------------
|
||||
|
||||
There are three flavors of key-value data store interfaces: master,
|
||||
clone, and frontend.
|
||||
See :doc:`/scripts/base/frameworks/broker/store.bro` for an overview
|
||||
of the Broker data store API.
|
||||
|
||||
A frontend is the common interface to query and modify data stores.
|
||||
That is, a clone is a specific type of frontend and a master is also a
|
||||
specific type of frontend, but a standalone frontend can also exist to
|
||||
e.g. query and modify the contents of a remote master store without
|
||||
actually "owning" any of the contents itself.
|
||||
There are two flavors of key-value data store interfaces: master and clone.
|
||||
|
||||
A master data store can be cloned from remote peers which may then
|
||||
perform lightweight, local queries against the clone, which
|
||||
|
@ -177,24 +347,149 @@ modify their content directly, instead they send modifications to the
|
|||
centralized master store which applies them and then broadcasts them to
|
||||
all clones.
|
||||
|
||||
Master and clone stores get to choose what type of storage backend to
|
||||
use. E.g. In-memory versus SQLite for persistence. Note that if clones
|
||||
are used, then data store sizes must be able to fit within memory
|
||||
regardless of the storage backend as a single snapshot of the master
|
||||
store is sent in a single chunk to initialize the clone.
|
||||
Master stores get to choose what type of storage backend to
|
||||
use. E.g. In-memory versus SQLite for persistence.
|
||||
|
||||
Data stores also support expiration on a per-key basis either using an
|
||||
absolute point in time or a relative amount of time since the entry's
|
||||
last modification time.
|
||||
Data stores also support expiration on a per-key basis using an amount of
|
||||
time relative to the entry's last modification time.
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-listener.bro
|
||||
|
||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro
|
||||
|
||||
In the above example, if a local copy of the store contents isn't
|
||||
needed, just replace the :bro:see:`Broker::create_clone` call with
|
||||
:bro:see:`Broker::create_frontend`. Queries will then be made against
|
||||
the remote master store instead of the local clone.
|
||||
|
||||
Note that all data store queries must be made within Bro's asynchronous
|
||||
``when`` statements and must specify a timeout block.
|
||||
|
||||
Cluster Framework Examples
|
||||
==========================
|
||||
|
||||
This section contains a few brief examples of how various communication
|
||||
patterns one might use when developing Bro scripts that are to operate in
|
||||
the context of a cluster.
|
||||
|
||||
Manager Sending Events To Workers
|
||||
---------------------------------
|
||||
|
||||
This is fairly straightforward, we just need a topic name which we know
|
||||
all workers are subscribed combined with the event we want to send them.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event manager_to_workers(s: string)
|
||||
{
|
||||
print "got event from manager", s;
|
||||
}
|
||||
|
||||
event some_event_handled_on_manager()
|
||||
{
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v0");
|
||||
|
||||
# If you know this event is only handled on the manager, you don't
|
||||
# need any of the following conditions, they're just here as an
|
||||
# example of how you can further discriminate based on node identity.
|
||||
|
||||
# Can check based on the name of the node.
|
||||
if ( Cluster::node == "manager" )
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v1");
|
||||
|
||||
# Can check based on the type of the node.
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v2");
|
||||
|
||||
# The run-time overhead of the above conditions can even be
|
||||
# eliminated by using the following conditional directives.
|
||||
# It's evaluated once per node at parse-time and, if false,
|
||||
# any code within is just ignored / treated as not existing at all.
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, manager_to_workers,
|
||||
"hello v3");
|
||||
@endif
|
||||
}
|
||||
|
||||
Worker Sending Events To Manager
|
||||
--------------------------------
|
||||
|
||||
This should look almost identical to the previous case of sending an event
|
||||
from the manager to workers, except it simply changes the topic name to
|
||||
one which the manager is subscribed.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event worker_to_manager(worker_name: string)
|
||||
{
|
||||
print "got event from worker", worker_name;
|
||||
}
|
||||
|
||||
event some_event_handled_on_worker()
|
||||
{
|
||||
Broker::publish(Cluster::manager_topic, worker_to_manager,
|
||||
Cluster::node);
|
||||
}
|
||||
|
||||
Worker Sending Events To All Workers
|
||||
------------------------------------
|
||||
|
||||
Since workers are not directly connected to each other in the cluster
|
||||
topology, this type of communication is a bit different than what we
|
||||
did before. Instead of using :bro:see:`Broker::publish` we use different
|
||||
"relay" calls to hop the message from a different node that *is* connected.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event worker_to_workers(worker_name: string)
|
||||
{
|
||||
print "got event from worker", worker_name;
|
||||
}
|
||||
|
||||
event some_event_handled_on_worker()
|
||||
{
|
||||
# We know the manager is connected to all workers, so we could
|
||||
# choose to relay the event across it. Note that sending the event
|
||||
# this way will not allow the manager to handle it, even if it
|
||||
# does have an event handler.
|
||||
Broker::relay(Cluster::manager_topic, Cluster::worker_topic,
|
||||
worker_to_workers, Cluster::node + " (via manager)");
|
||||
|
||||
# We also know that any given proxy is connected to all workers,
|
||||
# though now we have a choice of which proxy to use. If we
|
||||
# want to distribute the work associated with relaying uniformly,
|
||||
# we can use a round-robin strategy. The key used here is simply
|
||||
# used by the cluster framework internally to keep track of the
|
||||
# which node is up next in the round-robin.
|
||||
Cluster::relay_rr(Cluster::proxy_pool, "example_key",
|
||||
Cluster::worker_topic, worker_to_workers,
|
||||
Cluster::node + " (via a proxy)");
|
||||
}
|
||||
|
||||
Worker Distributing Events Uniformly Across Proxies
|
||||
---------------------------------------------------
|
||||
|
||||
If you want to offload some data/work from a worker to your proxies,
|
||||
we can make use of a `Highest Random Weight (HRW) hashing
|
||||
<https://en.wikipedia.org/wiki/Rendezvous_hashing>`_ distribution strategy
|
||||
to uniformly map an arbitrary key space across all available proxies.
|
||||
|
||||
.. code:: bro
|
||||
|
||||
event worker_to_proxies(worker_name: string)
|
||||
{
|
||||
print "got event from worker", worker_name;
|
||||
}
|
||||
|
||||
global my_counter = 0;
|
||||
|
||||
event some_event_handled_on_worker()
|
||||
{
|
||||
# The key here is used to choose which proxy shall receive
|
||||
# the event. Different keys may map to different nodes, but
|
||||
# any given key always maps to the same node provided the
|
||||
# pool of nodes remains consistent. If a proxy goes offline,
|
||||
# that key maps to a different node until the original comes
|
||||
# back up.
|
||||
Cluster::publish_hrw(Cluster::proxy_pool,
|
||||
cat("example_key", ++my_counter),
|
||||
worker_to_proxies, Cluster::node);
|
||||
}
|
||||
|
|
BIN
doc/frameworks/broker/cluster-layout.png
Normal file
BIN
doc/frameworks/broker/cluster-layout.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 52 KiB |
2
doc/frameworks/broker/cluster-layout.xml
Normal file
2
doc/frameworks/broker/cluster-layout.xml
Normal file
|
@ -0,0 +1,2 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<mxfile userAgent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36" version="8.5.2" editor="www.draw.io" type="device"><diagram name="Page-1" id="42789a77-a242-8287-6e28-9cd8cfd52e62">7Vxdc6M2FP01flwPkpCAx0022T60MzuTTrt9lEGx2cXIg0ns9NdXGAkjAbExH3a6+CXWRRIgnXN075WcGbpf778mdLP6gwcsmkEr2M/QlxkUH9cRfzLLW25xLDs3LJMwyE3gaHgK/2XSaEnrSxiwrVYx5TxKw41u9HkcMz/VbDRJ+E6v9swj/a4bumQVw5NPo6r17zBIV9IKLOt44TcWLlfy1i6WFxbU/7lM+Ess7zeD6PnwyS+vqepL1t+uaMB3JRN6mKH7hPM0/7be37MoG1s1bHm7x4arxXMnLE7PasAoAo7jWQH0qeeiTwDlXbzS6IWpdzg8afqmRocFYrBkMeax+HN3eGWWdQpEaZWuI/k1ogsW3RWjcs8jnhybbVOapJ+zCTNsj2GU9WCpsoQIFmUWB6qFH9HtNvT/XIVxfkE2A3mp1OgHS9M3WaYvKRcmnqQrvuQxjX7nfCNbbdOE/2TqKcXsecRBn0lxRaEhq/vM4/SRrsMoA/lfLAloTKVZ3glAWS51aB0+wh7zh9I4Hh55H6bfs7eeY1n6R47B8Vll1eo8y6nf8pfEZ02TK6lEkyVLG+p4eZ1sjksdS/R8ZXzN0uRNVEhYRNPwVScMlbxbFvVkUzFj9K1UYcPDON2Wev6WGUQFJSGKaVJAkAN1HJv1ERSDVm5h23a5hfiSP4MqlV7maDqw41ym2BNTfmmmoJtgCiDtmAKgMzpTKkRZCwAsWTKDJBKje7fIvi3TYrbKDIoisehnaN+twpQ9behhznbC79Dpc+TVgQpqXc0u+Xwd+vLCKZbVgJo2gFowqFSTgQAzpzvYaRQu44yxAq9ihN4B8CtLUrZ/F3rqKtJnHCl13R2dG0+aViW3RkGrDqwluLRDA6yRzRwG2w2NFRA2Cd+/fYJ1CMkt4r7l+rcGnMDxFpZ1DnCenxnx/dsEDoQ6cACuAsfBVeDgIYBT55lWgRPxpVCWTHM+Bk7IguCzBEYEEfBWceLpa5BNrggTrwITCYka92ya+x4WFw9fbfKnYFUD1BfHu2tadLoBp3C4S+52/uBixr6XC8oRrzrtDa654T+399XhGb56L655xZe2CZp7jo0wsICNXAc5GhM8OEeQeBhjghwM9d7zV5IdvhfZemBORC8WcsQdsHLWixXZtudZ7454S2IDixi3yUelcpvmKEOjXjvHvy4gbvLswOTZdVXtikTXUKdRtY2ocUzHDk+iPa5oq4xJRbSt0UTbvppGE2jkRxC5TIix8jZUR8ToaEypJROHRuJQwZU5shyNL3OL1FKmPjlZS6ZDZ99YEoqpz5T9MJTDMcwbhGHINhgG8GUMs1HfDGtIgVpGxAK0TbLOCVBnIufIUUmFaSXaEtdgLTxvoeuRmxeFLGgYsiJX55jlXEZW5OgdAWh01BNZIR6WrEo4S+TU44YSE5uj3YvJcediG9eToy7mzpE+mJvmVXFpDwJDc3/XNrcaGmDYWuqhHi57PYOHtFN2E0+nHPRRcDYmmoZxSdqiAiEj+rWMUzIn6qsweKj6vWuc2w2mx8UUjyp8lWX9qlCFwygh0dfR7krYIaxEoBtQavSshJmqos07OZaTppU1xMzondI00lKjOtbvW9Oq23x1yeYdT37mW38fNNu8aMa4mW12fXar2eYi6TvGhjCsQcYUfl9jU9Ca26a/MG7gfU5SbIqzNToOJNfTufLb2fWZE2Jmsj9ITmyYBDZRbm+RwHYv4yp29Y4ANNazvnJi7sA5MVzh5gU+/yCh4fhbl4CcAcxhYkPXzJKBgfCk9kQHypI57aS+AWHVPOiVIscGEB4eaUCMDeOoAAtqc4/71qxT8eipePLM671roNcPaN/bmholmzaqMPayWzBMasvqZz6N2bwtCfqYAnRiK6inRa5tzr9luqtT9d6jrbrzQY3ZsY97FvP/kR0b89cSdWnTXzgMf0eFB0qOXXVnYpgjltA4T3XxEUvbPElmQr6vA2DGT1aMtaC771j9xevEslGTXa1cqDnKkKLlwwrPvKdk1znu1TDkLE5jKlfDMzh1LjmLnwgoF8fsqC9ykl7JKYrHf6eSVz/+zxr08B8=</diagram></mxfile>
|
|
@ -1,18 +1,12 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
Broker::peer("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
print "peer added", endpoint;
|
||||
terminate();
|
||||
}
|
||||
|
|
|
@ -1,20 +1,17 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
print "peer added", endpoint;
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_broken(peer_name: string)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_broken", peer_name;
|
||||
print "peer lost", endpoint;
|
||||
terminate();
|
||||
}
|
||||
|
|
|
@ -1,31 +1,35 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
global my_event: event(msg: string, c: count);
|
||||
global my_auto_event: event(msg: string, c: count);
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
Broker::auto_event("bro/event/my_auto_event", my_auto_event);
|
||||
Broker::peer("127.0.0.1");
|
||||
Broker::auto_publish("bro/event/my_auto_event", my_auto_event);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "hi", 0));
|
||||
print "peer added", endpoint;
|
||||
Broker::publish("bro/event/my_event", my_event, "hi", 0);
|
||||
event my_auto_event("stuff", 88);
|
||||
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "...", 1));
|
||||
Broker::publish("bro/event/my_event", my_event, "...", 1);
|
||||
event my_auto_event("more stuff", 51);
|
||||
Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "bye", 2));
|
||||
local e = Broker::make_event(my_event, "bye", 2);
|
||||
Broker::publish("bro/event/my_event", e);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
terminate();
|
||||
}
|
||||
|
||||
event my_event(msg: string, c: count)
|
||||
{
|
||||
print "got my_event", msg, c;
|
||||
}
|
||||
|
||||
event my_auto_event(msg: string, c: count)
|
||||
{
|
||||
print "got my_auto_event", msg, c;
|
||||
}
|
||||
|
|
|
@ -1,20 +1,17 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
global msg_count = 0;
|
||||
global my_event: event(msg: string, c: count);
|
||||
global my_auto_event: event(msg: string, c: count);
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_events("bro/event/");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::subscribe("bro/event/");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
print "peer added", endpoint;
|
||||
}
|
||||
|
||||
event my_event(msg: string, c: count)
|
||||
|
|
|
@ -1,17 +1,11 @@
|
|||
@load ./testlog
|
||||
|
||||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
redef Log::enable_local_logging = F;
|
||||
redef Log::enable_remote_logging = F;
|
||||
global n = 0;
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::enable_remote_logs(Test::LOG);
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
Broker::peer("127.0.0.1");
|
||||
}
|
||||
|
||||
event do_write()
|
||||
|
@ -24,17 +18,19 @@ event do_write()
|
|||
event do_write();
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
print "peer added", endpoint;
|
||||
event do_write();
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
terminate();
|
||||
}
|
||||
|
||||
event Test::log_test(rec: Test::Info)
|
||||
{
|
||||
print "wrote log", rec;
|
||||
Broker::publish("bro/logs/forward/test", Test::log_test, rec);
|
||||
}
|
||||
|
|
|
@ -1,24 +1,21 @@
|
|||
@load ./testlog
|
||||
|
||||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_logs("bro/log/Test::LOG");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::subscribe("bro/logs");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
print "peer added", endpoint;
|
||||
}
|
||||
|
||||
event Test::log_test(rec: Test::Info)
|
||||
{
|
||||
print "wrote log", rec;
|
||||
print "got log event", rec;
|
||||
|
||||
if ( rec$num == 5 )
|
||||
terminate();
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "connector";
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1sec);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
{
|
||||
print "Broker::outgoing_connection_established",
|
||||
peer_address, peer_port, peer_name;
|
||||
Broker::send_print("bro/print/hi", "hello");
|
||||
Broker::send_print("bro/print/stuff", "...");
|
||||
Broker::send_print("bro/print/bye", "goodbye");
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
{
|
||||
terminate();
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
redef Broker::endpoint_name = "listener";
|
||||
global msg_count = 0;
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_prints("bro/print/");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
}
|
||||
|
||||
event Broker::incoming_connection_established(peer_name: string)
|
||||
{
|
||||
print "Broker::incoming_connection_established", peer_name;
|
||||
}
|
||||
|
||||
event Broker::print_handler(msg: string)
|
||||
{
|
||||
++msg_count;
|
||||
print "got print message", msg;
|
||||
|
||||
if ( msg_count == 3 )
|
||||
terminate();
|
||||
}
|
|
@ -1,53 +1,29 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
|
||||
global h: opaque of Broker::Handle;
|
||||
|
||||
function dv(d: Broker::Data): Broker::DataVector
|
||||
{
|
||||
local rval: Broker::DataVector;
|
||||
rval[0] = d;
|
||||
return rval;
|
||||
}
|
||||
global h: opaque of Broker::Store;
|
||||
|
||||
global ready: event();
|
||||
|
||||
event Broker::outgoing_connection_broken(peer_address: string,
|
||||
peer_port: port)
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
terminate();
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string,
|
||||
peer_port: port,
|
||||
peer_name: string)
|
||||
{
|
||||
local myset: set[string] = {"a", "b", "c"};
|
||||
local myvec: vector of string = {"alpha", "beta", "gamma"};
|
||||
h = Broker::create_master("mystore");
|
||||
Broker::insert(h, Broker::data("one"), Broker::data(110));
|
||||
Broker::insert(h, Broker::data("two"), Broker::data(223));
|
||||
Broker::insert(h, Broker::data("myset"), Broker::data(myset));
|
||||
Broker::insert(h, Broker::data("myvec"), Broker::data(myvec));
|
||||
Broker::increment(h, Broker::data("one"));
|
||||
Broker::decrement(h, Broker::data("two"));
|
||||
Broker::add_to_set(h, Broker::data("myset"), Broker::data("d"));
|
||||
Broker::remove_from_set(h, Broker::data("myset"), Broker::data("b"));
|
||||
Broker::push_left(h, Broker::data("myvec"), dv(Broker::data("delta")));
|
||||
Broker::push_right(h, Broker::data("myvec"), dv(Broker::data("omega")));
|
||||
|
||||
when ( local res = Broker::size(h) )
|
||||
{
|
||||
print "master size", res;
|
||||
event ready();
|
||||
}
|
||||
timeout 10sec
|
||||
{ print "timeout"; }
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect("127.0.0.1", broker_port, 1secs);
|
||||
Broker::auto_event("bro/event/ready", ready);
|
||||
h = Broker::create_master("mystore");
|
||||
|
||||
local myset: set[string] = {"a", "b", "c"};
|
||||
local myvec: vector of string = {"alpha", "beta", "gamma"};
|
||||
Broker::put(h, "one", 110);
|
||||
Broker::put(h, "two", 223);
|
||||
Broker::put(h, "myset", myset);
|
||||
Broker::put(h, "myvec", myvec);
|
||||
Broker::increment(h, "one");
|
||||
Broker::decrement(h, "two");
|
||||
Broker::insert_into_set(h, "myset", "d");
|
||||
Broker::remove_from(h, "myset", "b");
|
||||
Broker::push(h, "myvec", "delta");
|
||||
|
||||
Broker::peer("127.0.0.1");
|
||||
}
|
||||
|
|
|
@ -1,43 +1,79 @@
|
|||
const broker_port: port = 9999/tcp &redef;
|
||||
redef exit_only_after_terminate = T;
|
||||
|
||||
global h: opaque of Broker::Handle;
|
||||
global h: opaque of Broker::Store;
|
||||
global expected_key_count = 4;
|
||||
global key_count = 0;
|
||||
|
||||
# Lookup a value in the store based on an arbitrary key string.
|
||||
function do_lookup(key: string)
|
||||
{
|
||||
when ( local res = Broker::lookup(h, Broker::data(key)) )
|
||||
when ( local res = Broker::get(h, key) )
|
||||
{
|
||||
++key_count;
|
||||
print "lookup", key, res;
|
||||
|
||||
if ( key_count == expected_key_count )
|
||||
# End after we iterated over looking up each key in the store twice.
|
||||
if ( key_count == expected_key_count * 2 )
|
||||
terminate();
|
||||
}
|
||||
timeout 10sec
|
||||
# All data store queries must specify a timeout
|
||||
timeout 3sec
|
||||
{ print "timeout", key; }
|
||||
}
|
||||
|
||||
event ready()
|
||||
event check_keys()
|
||||
{
|
||||
h = Broker::create_clone("mystore");
|
||||
|
||||
# Here we just query for the list of keys in the store, and show how to
|
||||
# look up each one's value.
|
||||
when ( local res = Broker::keys(h) )
|
||||
{
|
||||
print "clone keys", res;
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 0)));
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 1)));
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 2)));
|
||||
do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 3)));
|
||||
|
||||
if ( res?$result )
|
||||
{
|
||||
# Since we know that the keys we are storing are all strings,
|
||||
# we can conveniently cast the result of Broker::keys to
|
||||
# a native Bro type, namely 'set[string]'.
|
||||
for ( k in res$result as string_set )
|
||||
do_lookup(k);
|
||||
|
||||
# Alternatively, we can use a generic iterator to iterate
|
||||
# over the results (which we know is of the 'set' type because
|
||||
# that's what Broker::keys() always returns). If the keys
|
||||
# we stored were not all of the same type, then you would
|
||||
# likely want to use this method of inspecting the store's keys.
|
||||
local i = Broker::set_iterator(res$result);
|
||||
|
||||
while ( ! Broker::set_iterator_last(i) )
|
||||
{
|
||||
do_lookup(Broker::set_iterator_value(i) as string);
|
||||
Broker::set_iterator_next(i);
|
||||
}
|
||||
timeout 10sec
|
||||
{ print "timeout"; }
|
||||
}
|
||||
}
|
||||
# All data store queries must specify a timeout.
|
||||
# You also might see timeouts on connecting/initializing a clone since
|
||||
# it hasn't had time to get fully set up yet.
|
||||
timeout 1sec
|
||||
{
|
||||
print "timeout";
|
||||
schedule 1sec { check_keys() };
|
||||
}
|
||||
}
|
||||
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
print "peer added";
|
||||
# We could create a clone early, like in bro_init and it will periodically
|
||||
# try to synchronize with its master once it connects, however, we just
|
||||
# create it now since we know the peer w/ the master store has just
|
||||
# connected.
|
||||
h = Broker::create_clone("mystore");
|
||||
|
||||
event check_keys();
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::subscribe_to_events("bro/event/ready");
|
||||
Broker::listen(broker_port, "127.0.0.1");
|
||||
Broker::listen("127.0.0.1");
|
||||
}
|
||||
|
|
|
@ -13,6 +13,5 @@ export {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Broker::enable();
|
||||
Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]);
|
||||
}
|
||||
|
|
|
@ -45,6 +45,8 @@ Reference Section
|
|||
script-reference/index.rst
|
||||
components/index.rst
|
||||
|
||||
`Broker User Manual <../broker-manual/index.html>`_
|
||||
|
||||
Development
|
||||
===========
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../../aux/broccoli/bindings/broccoli-python/CHANGES
|
|
@ -1 +0,0 @@
|
|||
../../aux/broccoli/bindings/broccoli-ruby/CHANGES
|
|
@ -1 +0,0 @@
|
|||
../../aux/broccoli/CHANGES
|
1
doc/install/CHANGES-broker.txt
Symbolic link
1
doc/install/CHANGES-broker.txt
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../aux/broker/CHANGES
|
|
@ -17,23 +17,11 @@ BroControl
|
|||
|
||||
.. literalinclude:: CHANGES-broctl.txt
|
||||
|
||||
--------
|
||||
Broccoli
|
||||
--------
|
||||
------
|
||||
Broker
|
||||
------
|
||||
|
||||
.. literalinclude:: CHANGES-broccoli.txt
|
||||
|
||||
---------------
|
||||
Broccoli Python
|
||||
---------------
|
||||
|
||||
.. literalinclude:: CHANGES-broccoli-python.txt
|
||||
|
||||
-------------
|
||||
Broccoli Ruby
|
||||
-------------
|
||||
|
||||
.. literalinclude:: CHANGES-broccoli-ruby.txt
|
||||
.. literalinclude:: CHANGES-broker.txt
|
||||
|
||||
--------
|
||||
Capstats
|
||||
|
|
|
@ -35,16 +35,16 @@ before you begin:
|
|||
|
||||
To build Bro from source, the following additional dependencies are required:
|
||||
|
||||
* CMake 2.8 or greater (http://www.cmake.org)
|
||||
* CMake 2.8.12 or greater (http://www.cmake.org)
|
||||
* Make
|
||||
* C/C++ compiler with C++11 support (GCC 4.8+ or Clang 3.3+)
|
||||
* SWIG (http://www.swig.org)
|
||||
* Bison (GNU Parser Generator)
|
||||
* Flex (Fast Lexical Analyzer)
|
||||
* Bison 2.5 or greater (https://www.gnu.org/software/bison/)
|
||||
* Flex (lexical analyzer generator) (https://github.com/westes/flex)
|
||||
* Libpcap headers (http://www.tcpdump.org)
|
||||
* OpenSSL headers (http://www.openssl.org)
|
||||
* zlib headers
|
||||
* Python
|
||||
* zlib headers (https://zlib.net/)
|
||||
* Python (https://www.python.org/)
|
||||
|
||||
To install the required dependencies, you can use:
|
||||
|
||||
|
@ -73,7 +73,7 @@ To install the required dependencies, you can use:
|
|||
|
||||
.. console::
|
||||
|
||||
sudo pkg install bash cmake swig bison python py27-sqlite3
|
||||
sudo pkg install bash cmake swig30 bison python py27-sqlite3 py27-ipaddress
|
||||
|
||||
For older versions of FreeBSD (especially FreeBSD 9.x), the system compiler
|
||||
is not new enough to compile Bro. For these systems, you will have to install
|
||||
|
@ -101,14 +101,17 @@ To install the required dependencies, you can use:
|
|||
clicking "Install").
|
||||
|
||||
OS X comes with all required dependencies except for CMake_, SWIG_,
|
||||
and OpenSSL (OpenSSL headers were removed in OS X 10.11, therefore OpenSSL
|
||||
must be installed manually for OS X versions 10.11 or newer).
|
||||
Distributions of these dependencies can
|
||||
likely be obtained from your preferred Mac OS X package management
|
||||
system (e.g. Homebrew_, MacPorts_, or Fink_). Specifically for
|
||||
Homebrew, the ``cmake``, ``swig``, and ``openssl`` packages
|
||||
provide the required dependencies. For MacPorts, the ``cmake``, ``swig``,
|
||||
``swig-python``, and ``openssl`` packages provide the required dependencies.
|
||||
Bison, and OpenSSL (OpenSSL headers were removed in OS X 10.11,
|
||||
therefore OpenSSL must be installed manually for OS X versions 10.11
|
||||
or newer).
|
||||
|
||||
Distributions of these dependencies can likely be obtained from your
|
||||
preferred Mac OS X package management system (e.g. Homebrew_,
|
||||
MacPorts_, or Fink_). Specifically for Homebrew, the ``cmake``,
|
||||
``swig``, ``openssl``, and ``bison`` packages
|
||||
provide the required dependencies. For MacPorts, the ``cmake``,
|
||||
``swig``, ``swig-python``, ``openssl``, and ``bison`` packages provide
|
||||
the required dependencies.
|
||||
|
||||
|
||||
Optional Dependencies
|
||||
|
@ -117,7 +120,6 @@ Optional Dependencies
|
|||
Bro can make use of some optional libraries and tools if they are found at
|
||||
build time:
|
||||
|
||||
* C++ Actor Framework (CAF) version 0.14 (http://actor-framework.org)
|
||||
* LibGeoIP (for geolocating IP addresses)
|
||||
* sendmail (enables Bro and BroControl to send mail)
|
||||
* curl (used by a Bro script that implements active HTTP)
|
||||
|
|
|
@ -168,8 +168,8 @@ Bro Diagnostics
|
|||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| cluster.log | Bro cluster messages | :bro:type:`Cluster::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| communication.log | Communication events between Bro or | :bro:type:`Communication::Info` |
|
||||
| | Broccoli instances | |
|
||||
| broker.log | Peering status events between Bro or | :bro:type:`Broker::Info` |
|
||||
| | Broker-enabled processes | |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
| loaded_scripts.log | Shows all scripts loaded by Bro | :bro:type:`LoadedScripts::Info` |
|
||||
+----------------------------+---------------------------------------+---------------------------------+
|
||||
|
|
|
@ -325,29 +325,14 @@ variable declared while scripts using a different namespace or no
|
|||
namespace altogether will not have access to the variable.
|
||||
Alternatively, if a global variable is declared within an ``export { ... }``
|
||||
block that variable is available to any other script through the
|
||||
naming convention of ``MODULE::variable_name``.
|
||||
|
||||
The declaration below is taken from the
|
||||
:doc:`/scripts/policy/protocols/conn/known-hosts.bro` script and
|
||||
declares a variable called ``known_hosts`` as a global set of unique
|
||||
IP addresses within the ``Known`` namespace and exports it for use
|
||||
outside of the ``Known`` namespace. Were we to want to use the
|
||||
``known_hosts`` variable we'd be able to access it through
|
||||
``Known::known_hosts``.
|
||||
|
||||
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/conn/known-hosts.bro
|
||||
:lines: 8-10, 32, 37
|
||||
|
||||
The sample above also makes use of an ``export { ... }`` block. When the module
|
||||
keyword is used in a script, the variables declared are said to be in
|
||||
that module's "namespace". Where as a global variable can be accessed
|
||||
by its name alone when it is not declared within a module, a global
|
||||
variable declared within a module must be exported and then accessed
|
||||
via ``MODULE_NAME::VARIABLE_NAME``. As in the example above, we would be
|
||||
able to access the ``known_hosts`` in a separate script variable via
|
||||
``Known::known_hosts`` due to the fact that ``known_hosts`` was declared as
|
||||
a global variable within an export block under the ``Known`` namespace.
|
||||
naming convention of ``<module name>::<variable name>``, i.e. the variable
|
||||
needs to be "scoped" by the name of the module in which it was declared.
|
||||
|
||||
When the ``module`` keyword is used in a script, the variables declared
|
||||
are said to be in that module's "namespace". Where as a global variable
|
||||
can be accessed by its name alone when it is not declared within a
|
||||
module, a global variable declared within a module must be exported and
|
||||
then accessed via ``<module name>::<variable name>``.
|
||||
|
||||
Constants
|
||||
~~~~~~~~~
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
@load ./main
|
||||
@load ./store
|
||||
@load ./log
|
||||
|
|
80
scripts/base/frameworks/broker/log.bro
Normal file
80
scripts/base/frameworks/broker/log.bro
Normal file
|
@ -0,0 +1,80 @@
|
|||
@load ./main
|
||||
|
||||
module Broker;
|
||||
|
||||
export {
|
||||
## The Broker logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## The type of a Broker activity being logged.
|
||||
type Type: enum {
|
||||
## An informational status update.
|
||||
STATUS,
|
||||
## An error situation.
|
||||
ERROR
|
||||
};
|
||||
|
||||
## A record type containing the column fields of the Broker log.
|
||||
type Info: record {
|
||||
## The network time at which a Broker event occurred.
|
||||
ts: time &log;
|
||||
## The type of the Broker event.
|
||||
ty: Type &log;
|
||||
## The event being logged.
|
||||
ev: string &log;
|
||||
## The peer (if any) with which a Broker event is
|
||||
## concerned.
|
||||
peer: NetworkInfo &log &optional;
|
||||
## An optional message describing the Broker event in more detail
|
||||
message: string &log &optional;
|
||||
};
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Broker::LOG, [$columns=Info, $path="broker"]);
|
||||
}
|
||||
|
||||
function log_status(ev: string, endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
local r: Info;
|
||||
|
||||
r = [$ts = network_time(),
|
||||
$ev = ev,
|
||||
$ty = STATUS,
|
||||
$message = msg];
|
||||
|
||||
if ( endpoint?$network )
|
||||
r$peer = endpoint$network;
|
||||
|
||||
Log::write(Broker::LOG, r);
|
||||
}
|
||||
|
||||
event Broker::peer_added(endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
log_status("peer-added", endpoint, msg);
|
||||
}
|
||||
|
||||
event Broker::peer_removed(endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
log_status("peer-removed", endpoint, msg);
|
||||
}
|
||||
|
||||
event Broker::peer_lost(endpoint: EndpointInfo, msg: string)
|
||||
{
|
||||
log_status("connection-terminated", endpoint, msg);
|
||||
}
|
||||
|
||||
event Broker::error(code: ErrorCode, msg: string)
|
||||
{
|
||||
local ev = cat(code);
|
||||
ev = subst_string(ev, "Broker::", "");
|
||||
ev = subst_string(ev, "_", "-");
|
||||
ev = to_lower(ev);
|
||||
|
||||
Log::write(Broker::LOG, [$ts = network_time(),
|
||||
$ev = ev,
|
||||
$ty = ERROR,
|
||||
$message = msg]);
|
||||
}
|
||||
|
|
@ -1,55 +1,160 @@
|
|||
##! Various data structure definitions for use with Bro's communication system.
|
||||
|
||||
module Log;
|
||||
|
||||
export {
|
||||
type Log::ID: enum {
|
||||
## Dummy place-holder.
|
||||
UNKNOWN
|
||||
};
|
||||
}
|
||||
##! The Broker-based communication API and its various options.
|
||||
|
||||
module Broker;
|
||||
|
||||
export {
|
||||
## Default port for Broker communication. Where not specified
|
||||
## otherwise, this is the port to connect to and listen on.
|
||||
const default_port = 9999/tcp &redef;
|
||||
|
||||
## A name used to identify this endpoint to peers.
|
||||
## Default interval to retry listening on a port if it's currently in
|
||||
## use already.
|
||||
const default_listen_retry = 30sec &redef;
|
||||
|
||||
## Default address on which to listen.
|
||||
##
|
||||
## .. bro:see:: Broker::connect Broker::listen
|
||||
const endpoint_name = "" &redef;
|
||||
## .. bro:see:: Broker::listen
|
||||
const default_listen_address = "" &redef;
|
||||
|
||||
## Change communication behavior.
|
||||
type EndpointFlags: record {
|
||||
## Whether to restrict message topics that can be published to peers.
|
||||
auto_publish: bool &default = T;
|
||||
## Whether to restrict what message topics or data store identifiers
|
||||
## the local endpoint advertises to peers (e.g. subscribing to
|
||||
## events or making a master data store available).
|
||||
auto_advertise: bool &default = T;
|
||||
## Default interval to retry connecting to a peer if it cannot be made to work
|
||||
## initially, or if it ever becomes disconnected.
|
||||
const default_connect_retry = 30sec &redef;
|
||||
|
||||
## If false, do not use SSL for network connections. By default, SSL will even
|
||||
## be used if no certificates / CAs have been configured. In that case
|
||||
## (which is the default) the communication will be encrypted, but not
|
||||
## authenticated.
|
||||
const disable_ssl = F &redef;
|
||||
|
||||
## Path to a file containing concatenated trusted certificates
|
||||
## in PEM format. If set, Bro will require valid certificates forx
|
||||
## all peers.
|
||||
const ssl_cafile = "" &redef;
|
||||
|
||||
## Path to an OpenSSL-style directory of trusted certificates.
|
||||
## If set, Bro will require valid certificates forx
|
||||
## all peers.
|
||||
const ssl_capath = "" &redef;
|
||||
|
||||
## Path to a file containing a X.509 certificate for this
|
||||
## node in PEM format. If set, Bro will require valid certificates for
|
||||
## all peers.
|
||||
const ssl_certificate = "" &redef;
|
||||
|
||||
## Passphrase to decrypt the private key specified by
|
||||
## :bro:see:`Broker::ssl_keyfile`. If set, Bro will require valid
|
||||
## certificates for all peers.
|
||||
const ssl_passphrase = "" &redef;
|
||||
|
||||
## Path to the file containing the private key for this node's
|
||||
## certificate. If set, Bro will require valid certificates for
|
||||
## all peers.
|
||||
const ssl_keyfile = "" &redef;
|
||||
|
||||
## Forward all received messages to subscribing peers.
|
||||
const forward_messages = F &redef;
|
||||
|
||||
## The default topic prefix where logs will be published. The log's stream
|
||||
## id is appended when writing to a particular stream.
|
||||
const default_log_topic_prefix = "bro/logs/" &redef;
|
||||
|
||||
## The default implementation for :bro:see:`Broker::log_topic`.
|
||||
function default_log_topic(id: Log::ID, path: string): string
|
||||
{
|
||||
return default_log_topic_prefix + cat(id);
|
||||
}
|
||||
|
||||
## A function that will be called for each log entry to determine what
|
||||
## broker topic string will be used for sending it to peers. The
|
||||
## default implementation will return a value based on
|
||||
## :bro:see:`Broker::default_log_topic_prefix`.
|
||||
##
|
||||
## id: the ID associated with the log stream entry that will be sent.
|
||||
##
|
||||
## path: the path to which the log stream entry will be output.
|
||||
##
|
||||
## Returns: a string representing the broker topic to which the log
|
||||
## will be sent.
|
||||
const log_topic: function(id: Log::ID, path: string): string = default_log_topic &redef;
|
||||
|
||||
type ErrorCode: enum {
|
||||
## The unspecified default error code.
|
||||
UNSPECIFIED = 1,
|
||||
## Version incompatibility.
|
||||
PEER_INCOMPATIBLE = 2,
|
||||
## Referenced peer does not exist.
|
||||
PEER_INVALID = 3,
|
||||
## Remote peer not listening.
|
||||
PEER_UNAVAILABLE = 4,
|
||||
## An peering request timed out.
|
||||
PEER_TIMEOUT = 5,
|
||||
## Master with given name already exist.
|
||||
MASTER_EXISTS = 6,
|
||||
## Master with given name does not exist.
|
||||
NO_SUCH_MASTER = 7,
|
||||
## The given data store key does not exist.
|
||||
NO_SUCH_KEY = 8,
|
||||
## The store operation timed out.
|
||||
REQUEST_TIMEOUT = 9,
|
||||
## The operation expected a different type than provided
|
||||
TYPE_CLASH = 10,
|
||||
## The data value cannot be used to carry out the desired operation.
|
||||
INVALID_DATA = 11,
|
||||
## The storage backend failed to execute the operation.
|
||||
BACKEND_FAILURE = 12,
|
||||
## The storage backend failed to execute the operation.
|
||||
STALE_DATA = 13,
|
||||
## Catch-all for a CAF-level problem.
|
||||
CAF_ERROR = 100
|
||||
};
|
||||
|
||||
## Fine-grained tuning of communication behavior for a particular message.
|
||||
type SendFlags: record {
|
||||
## Send the message to the local endpoint.
|
||||
self: bool &default = F;
|
||||
## Send the message to peer endpoints that advertise interest in
|
||||
## the topic associated with the message.
|
||||
peers: bool &default = T;
|
||||
## Send the message to peer endpoints even if they don't advertise
|
||||
## interest in the topic associated with the message.
|
||||
unsolicited: bool &default = F;
|
||||
## The possible states of a peer endpoint.
|
||||
type PeerStatus: enum {
|
||||
## The peering process is initiated.
|
||||
INITIALIZING,
|
||||
## Connection establishment in process.
|
||||
CONNECTING,
|
||||
## Connection established, peering pending.
|
||||
CONNECTED,
|
||||
## Successfully peered.
|
||||
PEERED,
|
||||
## Connection to remote peer lost.
|
||||
DISCONNECTED,
|
||||
## Reconnecting to peer after a lost connection.
|
||||
RECONNECTING,
|
||||
};
|
||||
|
||||
type NetworkInfo: record {
|
||||
## The IP address or hostname where the endpoint listens.
|
||||
address: string &log;
|
||||
## The port where the endpoint is bound to.
|
||||
bound_port: port &log;
|
||||
};
|
||||
|
||||
type EndpointInfo: record {
|
||||
## A unique identifier of the node.
|
||||
id: string;
|
||||
## Network-level information.
|
||||
network: NetworkInfo &optional;
|
||||
};
|
||||
|
||||
type PeerInfo: record {
|
||||
peer: EndpointInfo;
|
||||
status: PeerStatus;
|
||||
};
|
||||
|
||||
type PeerInfos: vector of PeerInfo;
|
||||
|
||||
## Opaque communication data.
|
||||
type Data: record {
|
||||
d: opaque of Broker::Data &optional;
|
||||
data: opaque of Broker::Data &optional;
|
||||
};
|
||||
|
||||
## Opaque communication data.
|
||||
## Opaque communication data sequence.
|
||||
type DataVector: vector of Broker::Data;
|
||||
|
||||
## Opaque event communication data.
|
||||
type EventArgs: record {
|
||||
type Event: record {
|
||||
## The name of the event. Not set if invalid event or arguments.
|
||||
name: string &optional;
|
||||
## The arguments to the event.
|
||||
|
@ -63,52 +168,23 @@ export {
|
|||
val: Broker::Data;
|
||||
};
|
||||
|
||||
## Enable use of communication.
|
||||
##
|
||||
## flags: used to tune the local Broker endpoint behavior.
|
||||
##
|
||||
## Returns: true if communication is successfully initialized.
|
||||
global enable: function(flags: EndpointFlags &default = EndpointFlags()): bool;
|
||||
|
||||
## Changes endpoint flags originally supplied to :bro:see:`Broker::enable`.
|
||||
##
|
||||
## flags: the new endpoint behavior flags to use.
|
||||
##
|
||||
## Returns: true if flags were changed.
|
||||
global set_endpoint_flags: function(flags: EndpointFlags &default = EndpointFlags()): bool;
|
||||
|
||||
## Allow sending messages to peers if associated with the given topic.
|
||||
## This has no effect if auto publication behavior is enabled via the flags
|
||||
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
|
||||
##
|
||||
## topic: a topic to allow messages to be published under.
|
||||
##
|
||||
## Returns: true if successful.
|
||||
global publish_topic: function(topic: string): bool;
|
||||
|
||||
## Disallow sending messages to peers if associated with the given topic.
|
||||
## This has no effect if auto publication behavior is enabled via the flags
|
||||
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
|
||||
##
|
||||
## topic: a topic to disallow messages to be published under.
|
||||
##
|
||||
## Returns: true if successful.
|
||||
global unpublish_topic: function(topic: string): bool;
|
||||
|
||||
## Listen for remote connections.
|
||||
##
|
||||
## p: the TCP port to listen on.
|
||||
##
|
||||
## a: an address string on which to accept connections, e.g.
|
||||
## "127.0.0.1". An empty string refers to @p INADDR_ANY.
|
||||
##
|
||||
## reuse: equivalent to behavior of SO_REUSEADDR.
|
||||
## p: the TCP port to listen on. The value 0 means that the OS should choose
|
||||
## the next available free port.
|
||||
##
|
||||
## Returns: true if the local endpoint is now listening for connections.
|
||||
## retry: If non-zero, retries listening in regular intervals if the port cannot be
|
||||
## acquired immediately. 0 disables retries.
|
||||
##
|
||||
## .. bro:see:: Broker::incoming_connection_established
|
||||
global listen: function(p: port, a: string &default = "", reuse: bool &default = T): bool;
|
||||
|
||||
## Returns: the bound port or 0/? on failure.
|
||||
##
|
||||
## .. bro:see:: Broker::status
|
||||
global listen: function(a: string &default = default_listen_address,
|
||||
p: port &default = default_port,
|
||||
retry: interval &default = default_listen_retry): port;
|
||||
## Initiate a remote connection.
|
||||
##
|
||||
## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
|
||||
|
@ -123,60 +199,63 @@ export {
|
|||
## it's a new peer. The actual connection may not be established
|
||||
## until a later point in time.
|
||||
##
|
||||
## .. bro:see:: Broker::outgoing_connection_established
|
||||
global connect: function(a: string, p: port, retry: interval): bool;
|
||||
## .. bro:see:: Broker::status
|
||||
global peer: function(a: string, p: port &default=default_port,
|
||||
retry: interval &default=default_connect_retry): bool;
|
||||
|
||||
## Remove a remote connection.
|
||||
##
|
||||
## a: the address used in previous successful call to :bro:see:`Broker::connect`.
|
||||
## Note that this does not terminate the connection to the peer, it
|
||||
## just means that we won't exchange any further information with it
|
||||
## unless peering resumes later.
|
||||
##
|
||||
## p: the port used in previous successful call to :bro:see:`Broker::connect`.
|
||||
## a: the address used in previous successful call to :bro:see:`Broker::peer`.
|
||||
##
|
||||
## p: the port used in previous successful call to :bro:see:`Broker::peer`.
|
||||
##
|
||||
## Returns: true if the arguments match a previously successful call to
|
||||
## :bro:see:`Broker::connect`.
|
||||
global disconnect: function(a: string, p: port): bool;
|
||||
## :bro:see:`Broker::peer`.
|
||||
##
|
||||
## TODO: We do not have a function yet to terminate a connection.
|
||||
global unpeer: function(a: string, p: port): bool;
|
||||
|
||||
## Print a simple message to any interested peers. The receiver can use
|
||||
## :bro:see:`Broker::print_handler` to handle messages.
|
||||
## Returns: a list of all peer connections.
|
||||
global peers: function(): vector of PeerInfo;
|
||||
|
||||
## Returns: a unique identifier for the local broker endpoint.
|
||||
global node_id: function(): string;
|
||||
|
||||
## Sends all pending log messages to remote peers. This normally
|
||||
## doesn't need to be used except for test cases that are time-sensitive.
|
||||
global flush_logs: function(): count;
|
||||
|
||||
## Publishes the value of an identifier to a given topic. The subscribers
|
||||
## will update their local value for that identifier on receipt.
|
||||
##
|
||||
## topic: a topic associated with the printed message.
|
||||
## topic: a topic associated with the message.
|
||||
##
|
||||
## msg: the print message to send to peers.
|
||||
##
|
||||
## flags: tune the behavior of how the message is sent.
|
||||
## id: the identifier to publish.
|
||||
##
|
||||
## Returns: true if the message is sent.
|
||||
global send_print: function(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool;
|
||||
global publish_id: function(topic: string, id: string): bool;
|
||||
|
||||
## Register interest in all peer print messages that use a certain topic
|
||||
## prefix. Use :bro:see:`Broker::print_handler` to handle received
|
||||
## messages.
|
||||
## Register interest in all peer event messages that use a certain topic
|
||||
## prefix.
|
||||
##
|
||||
## topic_prefix: a prefix to match against remote message topics.
|
||||
## e.g. an empty prefix matches everything and "a" matches
|
||||
## "alice" and "amy" but not "bob".
|
||||
##
|
||||
## Returns: true if it's a new print subscription and it is now registered.
|
||||
global subscribe_to_prints: function(topic_prefix: string): bool;
|
||||
## Returns: true if it's a new event subscription and it is now registered.
|
||||
global subscribe: function(topic_prefix: string): bool;
|
||||
|
||||
## Unregister interest in all peer print messages that use a topic prefix.
|
||||
## Unregister interest in all peer event messages that use a topic prefix.
|
||||
##
|
||||
## topic_prefix: a prefix previously supplied to a successful call to
|
||||
## :bro:see:`Broker::subscribe_to_prints`.
|
||||
## :bro:see:`Broker::subscribe`.
|
||||
##
|
||||
## Returns: true if interest in the topic prefix is no longer advertised.
|
||||
global unsubscribe_to_prints: function(topic_prefix: string): bool;
|
||||
|
||||
## Send an event to any interested peers.
|
||||
##
|
||||
## topic: a topic associated with the event message.
|
||||
##
|
||||
## args: event arguments as made by :bro:see:`Broker::event_args`.
|
||||
##
|
||||
## flags: tune the behavior of how the message is sent.
|
||||
##
|
||||
## Returns: true if the message is sent.
|
||||
global send_event: function(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool;
|
||||
global unsubscribe: function(topic_prefix: string): bool;
|
||||
|
||||
## Automatically send an event to any interested peers whenever it is
|
||||
## locally dispatched (e.g. using "event my_event(...);" in a script).
|
||||
|
@ -187,83 +266,18 @@ export {
|
|||
##
|
||||
## ev: a Bro event value.
|
||||
##
|
||||
## flags: tune the behavior of how the message is sent.
|
||||
##
|
||||
## Returns: true if automatic event sending is now enabled.
|
||||
global auto_event: function(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool;
|
||||
global auto_publish: function(topic: string, ev: any): bool;
|
||||
|
||||
## Stop automatically sending an event to peers upon local dispatch.
|
||||
##
|
||||
## topic: a topic originally given to :bro:see:`Broker::auto_event`.
|
||||
## topic: a topic originally given to :bro:see:`Broker::auto_publish`.
|
||||
##
|
||||
## ev: an event originally given to :bro:see:`Broker::auto_event`.
|
||||
## ev: an event originally given to :bro:see:`Broker::auto_publish`.
|
||||
##
|
||||
## Returns: true if automatic events will not occur for the topic/event
|
||||
## pair.
|
||||
global auto_event_stop: function(topic: string, ev: any): bool;
|
||||
|
||||
## Register interest in all peer event messages that use a certain topic
|
||||
## prefix.
|
||||
##
|
||||
## topic_prefix: a prefix to match against remote message topics.
|
||||
## e.g. an empty prefix matches everything and "a" matches
|
||||
## "alice" and "amy" but not "bob".
|
||||
##
|
||||
## Returns: true if it's a new event subscription and it is now registered.
|
||||
global subscribe_to_events: function(topic_prefix: string): bool;
|
||||
|
||||
## Unregister interest in all peer event messages that use a topic prefix.
|
||||
##
|
||||
## topic_prefix: a prefix previously supplied to a successful call to
|
||||
## :bro:see:`Broker::subscribe_to_events`.
|
||||
##
|
||||
## Returns: true if interest in the topic prefix is no longer advertised.
|
||||
global unsubscribe_to_events: function(topic_prefix: string): bool;
|
||||
|
||||
## Enable remote logs for a given log stream.
|
||||
##
|
||||
## id: the log stream to enable remote logs for.
|
||||
##
|
||||
## flags: tune the behavior of how log entry messages are sent.
|
||||
##
|
||||
## Returns: true if remote logs are enabled for the stream.
|
||||
global enable_remote_logs: function(id: Log::ID, flags: SendFlags &default = SendFlags()): bool;
|
||||
|
||||
## Disable remote logs for a given log stream.
|
||||
##
|
||||
## id: the log stream to disable remote logs for.
|
||||
##
|
||||
## Returns: true if remote logs are disabled for the stream.
|
||||
global disable_remote_logs: function(id: Log::ID): bool;
|
||||
|
||||
## Check if remote logs are enabled for a given log stream.
|
||||
##
|
||||
## id: the log stream to check.
|
||||
##
|
||||
## Returns: true if remote logs are enabled for the given stream.
|
||||
global remote_logs_enabled: function(id: Log::ID): bool;
|
||||
|
||||
## Register interest in all peer log messages that use a certain topic
|
||||
## prefix. Logs are implicitly sent with topic "bro/log/<stream-name>" and
|
||||
## the receiving side processes them through the logging framework as usual.
|
||||
##
|
||||
## topic_prefix: a prefix to match against remote message topics.
|
||||
## e.g. an empty prefix matches everything and "a" matches
|
||||
## "alice" and "amy" but not "bob".
|
||||
##
|
||||
## Returns: true if it's a new log subscription and it is now registered.
|
||||
global subscribe_to_logs: function(topic_prefix: string): bool;
|
||||
|
||||
## Unregister interest in all peer log messages that use a topic prefix.
|
||||
## Logs are implicitly sent with topic "bro/log/<stream-name>" and the
|
||||
## receiving side processes them through the logging framework as usual.
|
||||
##
|
||||
## topic_prefix: a prefix previously supplied to a successful call to
|
||||
## :bro:see:`Broker::subscribe_to_logs`.
|
||||
##
|
||||
## Returns: true if interest in the topic prefix is no longer advertised.
|
||||
global unsubscribe_to_logs: function(topic_prefix: string): bool;
|
||||
|
||||
global auto_unpublish: function(topic: string, ev: any): bool;
|
||||
}
|
||||
|
||||
@load base/bif/comm.bif
|
||||
|
@ -271,106 +285,67 @@ export {
|
|||
|
||||
module Broker;
|
||||
|
||||
@ifdef ( Broker::__enable )
|
||||
|
||||
function enable(flags: EndpointFlags &default = EndpointFlags()) : bool
|
||||
event retry_listen(a: string, p: port, retry: interval)
|
||||
{
|
||||
return __enable(flags);
|
||||
listen(a, p, retry);
|
||||
}
|
||||
|
||||
function set_endpoint_flags(flags: EndpointFlags &default = EndpointFlags()): bool
|
||||
function listen(a: string, p: port, retry: interval): port
|
||||
{
|
||||
return __set_endpoint_flags(flags);
|
||||
local bound = __listen(a, p);
|
||||
|
||||
if ( bound == 0/tcp && retry != 0secs )
|
||||
schedule retry { retry_listen(a, p, retry) };
|
||||
|
||||
return bound;
|
||||
}
|
||||
|
||||
function publish_topic(topic: string): bool
|
||||
function peer(a: string, p: port, retry: interval): bool
|
||||
{
|
||||
return __publish_topic(topic);
|
||||
return __peer(a, p, retry);
|
||||
}
|
||||
|
||||
function unpublish_topic(topic: string): bool
|
||||
function unpeer(a: string, p: port): bool
|
||||
{
|
||||
return __unpublish_topic(topic);
|
||||
return __unpeer(a, p);
|
||||
}
|
||||
|
||||
function listen(p: port, a: string &default = "", reuse: bool &default = T): bool
|
||||
function peers(): vector of PeerInfo
|
||||
{
|
||||
return __listen(p, a, reuse);
|
||||
return __peers();
|
||||
}
|
||||
|
||||
function connect(a: string, p: port, retry: interval): bool
|
||||
function node_id(): string
|
||||
{
|
||||
return __connect(a, p, retry);
|
||||
return __node_id();
|
||||
}
|
||||
|
||||
function disconnect(a: string, p: port): bool
|
||||
function flush_logs(): count
|
||||
{
|
||||
return __disconnect(a, p);
|
||||
return __flush_logs();
|
||||
}
|
||||
|
||||
function send_print(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool
|
||||
function publish_id(topic: string, id: string): bool
|
||||
{
|
||||
return __send_print(topic, msg, flags);
|
||||
return __publish_id(topic, id);
|
||||
}
|
||||
|
||||
function subscribe_to_prints(topic_prefix: string): bool
|
||||
function subscribe(topic_prefix: string): bool
|
||||
{
|
||||
return __subscribe_to_prints(topic_prefix);
|
||||
return __subscribe(topic_prefix);
|
||||
}
|
||||
|
||||
function unsubscribe_to_prints(topic_prefix: string): bool
|
||||
function unsubscribe(topic_prefix: string): bool
|
||||
{
|
||||
return __unsubscribe_to_prints(topic_prefix);
|
||||
return __unsubscribe(topic_prefix);
|
||||
}
|
||||
|
||||
function send_event(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool
|
||||
function auto_publish(topic: string, ev: any): bool
|
||||
{
|
||||
return __event(topic, args, flags);
|
||||
return __auto_publish(topic, ev);
|
||||
}
|
||||
|
||||
function auto_event(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool
|
||||
function auto_unpublish(topic: string, ev: any): bool
|
||||
{
|
||||
return __auto_event(topic, ev, flags);
|
||||
return __auto_unpublish(topic, ev);
|
||||
}
|
||||
|
||||
function auto_event_stop(topic: string, ev: any): bool
|
||||
{
|
||||
return __auto_event_stop(topic, ev);
|
||||
}
|
||||
|
||||
function subscribe_to_events(topic_prefix: string): bool
|
||||
{
|
||||
return __subscribe_to_events(topic_prefix);
|
||||
}
|
||||
|
||||
function unsubscribe_to_events(topic_prefix: string): bool
|
||||
{
|
||||
return __unsubscribe_to_events(topic_prefix);
|
||||
}
|
||||
|
||||
function enable_remote_logs(id: Log::ID, flags: SendFlags &default = SendFlags()): bool
|
||||
{
|
||||
return __enable_remote_logs(id, flags);
|
||||
}
|
||||
|
||||
function disable_remote_logs(id: Log::ID): bool
|
||||
{
|
||||
return __disable_remote_logs(id);
|
||||
}
|
||||
|
||||
function remote_logs_enabled(id: Log::ID): bool
|
||||
{
|
||||
return __remote_logs_enabled(id);
|
||||
}
|
||||
|
||||
function subscribe_to_logs(topic_prefix: string): bool
|
||||
{
|
||||
return __subscribe_to_logs(topic_prefix);
|
||||
}
|
||||
|
||||
function unsubscribe_to_logs(topic_prefix: string): bool
|
||||
{
|
||||
return __unsubscribe_to_logs(topic_prefix);
|
||||
}
|
||||
|
||||
@endif
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,11 +1,16 @@
|
|||
# Load the core cluster support.
|
||||
@load ./main
|
||||
@load ./pools
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
|
||||
# Give the node being started up it's peer name.
|
||||
redef peer_description = Cluster::node;
|
||||
|
||||
@if ( Cluster::enable_round_robin_logging )
|
||||
redef Broker::log_topic = Cluster::rr_log_topic;
|
||||
@endif
|
||||
|
||||
# Add a cluster prefix.
|
||||
@prefixes += cluster
|
||||
|
||||
|
@ -19,13 +24,6 @@ redef peer_description = Cluster::node;
|
|||
|
||||
@load ./setup-connections
|
||||
|
||||
# Don't load the listening script until we're a bit more sure that the
|
||||
# cluster framework is actually being enabled.
|
||||
@load frameworks/communication/listen
|
||||
|
||||
## Set the port that this node is supposed to listen on.
|
||||
redef Communication::listen_port = Cluster::nodes[Cluster::node]$p;
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@load ./nodes/manager
|
||||
# If no logger is defined, then the manager receives logs.
|
||||
|
|
|
@ -7,10 +7,111 @@
|
|||
##! ``@load base/frameworks/cluster``.
|
||||
|
||||
@load base/frameworks/control
|
||||
@load base/frameworks/broker
|
||||
|
||||
module Cluster;
|
||||
|
||||
export {
|
||||
## Whether to distribute log messages among available logging nodes.
|
||||
const enable_round_robin_logging = T &redef;
|
||||
|
||||
## The topic name used for exchanging general messages that are relevant to
|
||||
## any node in a cluster. Used with broker-enabled cluster communication.
|
||||
const broadcast_topic = "bro/cluster/broadcast" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## logger nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const logger_topic = "bro/cluster/logger" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## manager nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const manager_topic = "bro/cluster/manager" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## proxy nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const proxy_topic = "bro/cluster/proxy" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## worker nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const worker_topic = "bro/cluster/worker" &redef;
|
||||
|
||||
## The topic name used for exchanging messages that are relevant to
|
||||
## time machine nodes in a cluster. Used with broker-enabled cluster communication.
|
||||
const time_machine_topic = "bro/cluster/time_machine" &redef;
|
||||
|
||||
## The topic prefix used for exchanging messages that are relevant to
|
||||
## a named node in a cluster. Used with broker-enabled cluster communication.
|
||||
const node_topic_prefix = "bro/cluster/node/" &redef;
|
||||
|
||||
## Name of the node on which master data stores will be created if no other
|
||||
## has already been specified by the user in :bro:see:`Cluster::stores`.
|
||||
## An empty value means "use whatever name corresponds to the manager
|
||||
## node".
|
||||
const default_master_node = "" &redef;
|
||||
|
||||
## The type of data store backend that will be used for all data stores if
|
||||
## no other has already been specified by the user in :bro:see:`Cluster::stores`.
|
||||
const default_backend = Broker::MEMORY &redef;
|
||||
|
||||
## The type of persistent data store backend that will be used for all data
|
||||
## stores if no other has already been specified by the user in
|
||||
## :bro:see:`Cluster::stores`. This will be used when script authors call
|
||||
## :bro:see:`Cluster::create_store` with the *persistent* argument set true.
|
||||
const default_persistent_backend = Broker::SQLITE &redef;
|
||||
|
||||
## Setting a default dir will, for persistent backends that have not
|
||||
## been given an explicit file path via :bro:see:`Cluster::stores`,
|
||||
## automatically create a path within this dir that is based on the name of
|
||||
## the data store.
|
||||
const default_store_dir = "" &redef;
|
||||
|
||||
## Information regarding a cluster-enabled data store.
|
||||
type StoreInfo: record {
|
||||
## The name of the data store.
|
||||
name: string &optional;
|
||||
## The store handle.
|
||||
store: opaque of Broker::Store &optional;
|
||||
## The name of the cluster node on which the master version of the data
|
||||
## store resides.
|
||||
master_node: string &default=default_master_node;
|
||||
## Whether the data store is the master version or a clone.
|
||||
master: bool &default=F;
|
||||
## The type of backend used for storing data.
|
||||
backend: Broker::BackendType &default=default_backend;
|
||||
## Parameters used for configuring the backend.
|
||||
options: Broker::BackendOptions &default=Broker::BackendOptions();
|
||||
## A resync/reconnect interval to pass through to
|
||||
## :bro:see:`Broker::create_clone`.
|
||||
clone_resync_interval: interval &default=Broker::default_clone_resync_interval;
|
||||
## A staleness duration to pass through to
|
||||
## :bro:see:`Broker::create_clone`.
|
||||
clone_stale_interval: interval &default=Broker::default_clone_stale_interval;
|
||||
## A mutation buffer interval to pass through to
|
||||
## :bro:see:`Broker::create_clone`.
|
||||
clone_mutation_buffer_interval: interval &default=Broker::default_clone_mutation_buffer_interval;
|
||||
};
|
||||
|
||||
## A table of cluster-enabled data stores that have been created, indexed
|
||||
## by their name. This table will be populated automatically by
|
||||
## :bro:see:`Cluster::create_store`, but if you need to customize
|
||||
## the options related to a particular data store, you may redef this
|
||||
## table. Calls to :bro:see:`Cluster::create_store` will first check
|
||||
## the table for an entry of the same name and, if found, will use the
|
||||
## predefined options there when setting up the store.
|
||||
global stores: table[string] of StoreInfo &default=StoreInfo() &redef;
|
||||
|
||||
## Sets up a cluster-enabled data store. They will also still properly
|
||||
## function for uses that are not operating a cluster.
|
||||
##
|
||||
## name: the name of the data store to create.
|
||||
##
|
||||
## persistent: whether the data store must be persistent.
|
||||
##
|
||||
## Returns: the store's information. For master stores, the store will be
|
||||
## ready to use immediately. For clones, the store field will not
|
||||
## be set until the node containing the master store has connected.
|
||||
global create_store: function(name: string, persistent: bool &default=F): StoreInfo;
|
||||
|
||||
## The cluster logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
|
@ -18,6 +119,8 @@ export {
|
|||
type Info: record {
|
||||
## The time at which a cluster message was generated.
|
||||
ts: time;
|
||||
## The name of the node that is creating the log record.
|
||||
node: string;
|
||||
## A message indicating information about the cluster's operation.
|
||||
message: string;
|
||||
} &log;
|
||||
|
@ -46,43 +149,6 @@ export {
|
|||
TIME_MACHINE,
|
||||
};
|
||||
|
||||
## Events raised by a manager and handled by the workers.
|
||||
const manager2worker_events = /Drop::.*/ &redef;
|
||||
|
||||
## Events raised by a manager and handled by proxies.
|
||||
const manager2proxy_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by a manager and handled by loggers.
|
||||
const manager2logger_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by proxies and handled by loggers.
|
||||
const proxy2logger_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by proxies and handled by a manager.
|
||||
const proxy2manager_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by proxies and handled by workers.
|
||||
const proxy2worker_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by workers and handled by loggers.
|
||||
const worker2logger_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by workers and handled by a manager.
|
||||
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
|
||||
|
||||
## Events raised by workers and handled by proxies.
|
||||
const worker2proxy_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by TimeMachine instances and handled by a manager.
|
||||
const tm2manager_events = /EMPTY/ &redef;
|
||||
|
||||
## Events raised by TimeMachine instances and handled by workers.
|
||||
const tm2worker_events = /EMPTY/ &redef;
|
||||
|
||||
## Events sent by the control host (i.e., BroControl) when dynamically
|
||||
## connecting to a running instance to update settings or request data.
|
||||
const control_events = Control::controller_events &redef;
|
||||
|
||||
## Record type to indicate a node in a cluster.
|
||||
type Node: record {
|
||||
## Identifies the type of cluster node in this node's configuration.
|
||||
|
@ -92,22 +158,17 @@ export {
|
|||
## If the *ip* field is a non-global IPv6 address, this field
|
||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||
zone_id: string &default="";
|
||||
## The port to which this local node can connect when
|
||||
## establishing communication.
|
||||
## The port that this node will listen on for peer connections.
|
||||
p: port;
|
||||
## Identifier for the interface a worker is sniffing.
|
||||
interface: string &optional;
|
||||
## Name of the logger node this node uses. For manager, proxies and workers.
|
||||
logger: string &optional;
|
||||
## Name of the manager node this node uses. For workers and proxies.
|
||||
manager: string &optional;
|
||||
## Name of the proxy node this node uses. For workers and managers.
|
||||
proxy: string &optional;
|
||||
## Names of worker nodes that this node connects with.
|
||||
## For managers and proxies.
|
||||
workers: set[string] &optional;
|
||||
## Name of a time machine node with which this node connects.
|
||||
time_machine: string &optional;
|
||||
## A unique identifier assigned to the node by the broker framework.
|
||||
## This field is only set while a node is connected.
|
||||
id: string &optional;
|
||||
};
|
||||
|
||||
## This function can be called at any time to determine if the cluster
|
||||
|
@ -134,6 +195,8 @@ export {
|
|||
## named cluster-layout.bro somewhere in the BROPATH. It will be
|
||||
## automatically loaded if the CLUSTER_NODE environment variable is set.
|
||||
## Note that BroControl handles all of this automatically.
|
||||
## The table is typically indexed by node names/labels (e.g. "manager"
|
||||
## or "worker-1").
|
||||
const nodes: table[string] of Node = {} &redef;
|
||||
|
||||
## Indicates whether or not the manager will act as the logger and receive
|
||||
|
@ -148,6 +211,60 @@ export {
|
|||
|
||||
## Interval for retrying failed connections between cluster nodes.
|
||||
const retry_interval = 1min &redef;
|
||||
|
||||
## When using broker-enabled cluster framework, nodes broadcast this event
|
||||
## to exchange their user-defined name along with a string that uniquely
|
||||
## identifies it for the duration of its lifetime. This string may change
|
||||
## if the node dies and has to reconnect later.
|
||||
global hello: event(name: string, id: string);
|
||||
|
||||
## When using broker-enabled cluster framework, this event will be emitted
|
||||
## locally whenever a cluster node connects or reconnects.
|
||||
global node_up: event(name: string, id: string);
|
||||
|
||||
## When using broker-enabled cluster framework, this event will be emitted
|
||||
## locally whenever a connected cluster node becomes disconnected.
|
||||
global node_down: event(name: string, id: string);
|
||||
|
||||
## Write a message to the cluster logging stream.
|
||||
global log: function(msg: string);
|
||||
|
||||
## Retrieve the topic associated with a specific node in the cluster.
|
||||
##
|
||||
## name: the name of the cluster node (e.g. "manager").
|
||||
##
|
||||
## Returns: a topic string that may used to send a message exclusively to
|
||||
## a given cluster node.
|
||||
global node_topic: function(name: string): string;
|
||||
}
|
||||
|
||||
type NamedNode: record {
|
||||
name: string;
|
||||
node: Node;
|
||||
};
|
||||
|
||||
function nodes_with_type(node_type: NodeType): vector of NamedNode
|
||||
{
|
||||
local rval: vector of NamedNode = vector();
|
||||
local names: vector of string = vector();
|
||||
|
||||
for ( name in Cluster::nodes )
|
||||
names[|names|] = name;
|
||||
|
||||
names = sort(names, strcmp);
|
||||
|
||||
for ( i in names )
|
||||
{
|
||||
name = names[i];
|
||||
local n = Cluster::nodes[name];
|
||||
|
||||
if ( n$node_type != node_type )
|
||||
next;
|
||||
|
||||
rval[|rval|] = NamedNode($name=name, $node=n);
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
function is_enabled(): bool
|
||||
|
@ -160,16 +277,64 @@ function local_node_type(): NodeType
|
|||
return is_enabled() ? nodes[node]$node_type : NONE;
|
||||
}
|
||||
|
||||
event remote_connection_handshake_done(p: event_peer) &priority=5
|
||||
function node_topic(name: string): string
|
||||
{
|
||||
if ( p$descr in nodes && nodes[p$descr]$node_type == WORKER )
|
||||
return node_topic_prefix + name;
|
||||
}
|
||||
|
||||
event Cluster::hello(name: string, id: string) &priority=10
|
||||
{
|
||||
if ( name !in nodes )
|
||||
{
|
||||
Reporter::error(fmt("Got Cluster::hello msg from unexpected node: %s", name));
|
||||
return;
|
||||
}
|
||||
|
||||
local n = nodes[name];
|
||||
|
||||
if ( n?$id )
|
||||
{
|
||||
if ( n$id != id )
|
||||
Reporter::error(fmt("Got Cluster::hello msg from duplicate node:%s",
|
||||
name));
|
||||
}
|
||||
else
|
||||
event Cluster::node_up(name, id);
|
||||
|
||||
n$id = id;
|
||||
Cluster::log(fmt("got hello from %s (%s)", name, id));
|
||||
|
||||
if ( n$node_type == WORKER )
|
||||
++worker_count;
|
||||
}
|
||||
|
||||
event remote_connection_closed(p: event_peer) &priority=5
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
if ( p$descr in nodes && nodes[p$descr]$node_type == WORKER )
|
||||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
local e = Broker::make_event(Cluster::hello, node, Broker::node_id());
|
||||
Broker::publish(Cluster::broadcast_topic, e);
|
||||
}
|
||||
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
for ( node_name in nodes )
|
||||
{
|
||||
local n = nodes[node_name];
|
||||
|
||||
if ( n?$id && n$id == endpoint$id )
|
||||
{
|
||||
Cluster::log(fmt("node down: %s", node_name));
|
||||
delete n$id;
|
||||
|
||||
if ( n$node_type == WORKER )
|
||||
--worker_count;
|
||||
|
||||
event Cluster::node_down(node_name, endpoint$id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
|
@ -183,3 +348,90 @@ event bro_init() &priority=5
|
|||
|
||||
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]);
|
||||
}
|
||||
|
||||
function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo
|
||||
{
|
||||
local info = stores[name];
|
||||
info$name = name;
|
||||
|
||||
if ( Cluster::default_store_dir != "" )
|
||||
{
|
||||
local default_options = Broker::BackendOptions();
|
||||
local path = Cluster::default_store_dir + "/" + name;
|
||||
|
||||
if ( info$options$sqlite$path == default_options$sqlite$path )
|
||||
info$options$sqlite$path = path + ".sqlite";
|
||||
|
||||
if ( info$options$rocksdb$path == default_options$rocksdb$path )
|
||||
info$options$rocksdb$path = path + ".rocksdb";
|
||||
}
|
||||
|
||||
if ( persistent )
|
||||
{
|
||||
switch ( info$backend ) {
|
||||
case Broker::MEMORY:
|
||||
info$backend = Cluster::default_persistent_backend;
|
||||
break;
|
||||
case Broker::SQLITE:
|
||||
fallthrough;
|
||||
case Broker::ROCKSDB:
|
||||
# no-op: user already asked for a specific persistent backend.
|
||||
break;
|
||||
default:
|
||||
Reporter::error(fmt("unhandled data store type: %s", info$backend));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( ! Cluster::is_enabled() )
|
||||
{
|
||||
if ( info?$store )
|
||||
{
|
||||
Reporter::warning(fmt("duplicate cluster store creation for %s", name));
|
||||
return info;
|
||||
}
|
||||
|
||||
info$store = Broker::create_master(name, info$backend, info$options);
|
||||
info$master = T;
|
||||
stores[name] = info;
|
||||
return info;
|
||||
}
|
||||
|
||||
if ( info$master_node == "" )
|
||||
{
|
||||
local mgr_nodes = nodes_with_type(Cluster::MANAGER);
|
||||
|
||||
if ( |mgr_nodes| == 0 )
|
||||
Reporter::fatal(fmt("empty master node name for cluster store " +
|
||||
"'%s', but there's no manager node to default",
|
||||
name));
|
||||
|
||||
info$master_node = mgr_nodes[0]$name;
|
||||
}
|
||||
else if ( info$master_node !in Cluster::nodes )
|
||||
Reporter::fatal(fmt("master node '%s' for cluster store '%s' does not exist",
|
||||
info$master_node, name));
|
||||
|
||||
if ( Cluster::node == info$master_node )
|
||||
{
|
||||
info$store = Broker::create_master(name, info$backend, info$options);
|
||||
info$master = T;
|
||||
stores[name] = info;
|
||||
Cluster::log(fmt("created master store: %s", name));
|
||||
return info;
|
||||
}
|
||||
|
||||
info$master = F;
|
||||
stores[name] = info;
|
||||
info$store = Broker::create_clone(info$name,
|
||||
info$clone_resync_interval,
|
||||
info$clone_stale_interval,
|
||||
info$clone_mutation_buffer_interval);
|
||||
Cluster::log(fmt("created clone store: %s", info$name));
|
||||
return info;
|
||||
}
|
||||
|
||||
function log(msg: string)
|
||||
{
|
||||
Log::write(Cluster::LOG, [$ts = network_time(), $node = node, $message = msg]);
|
||||
}
|
||||
|
|
440
scripts/base/frameworks/cluster/pools.bro
Normal file
440
scripts/base/frameworks/cluster/pools.bro
Normal file
|
@ -0,0 +1,440 @@
|
|||
##! Defines an interface for managing pools of cluster nodes. Pools are
|
||||
##! are useful way to distribute work or data among nodes within a cluster.
|
||||
|
||||
@load ./main
|
||||
@load base/utils/hash_hrw
|
||||
|
||||
module Cluster;
|
||||
|
||||
export {
|
||||
## Store state of a cluster within within the context of a work pool.
|
||||
type PoolNode: record {
|
||||
## The node name (e.g. "manager").
|
||||
name: string;
|
||||
## An alias of *name* used to prevent hashing collisions when creating
|
||||
## *site_id*.
|
||||
alias: string;
|
||||
## A 32-bit unique identifier for the pool node, derived from name/alias.
|
||||
site_id: count;
|
||||
## Whether the node is currently alive and can receive work.
|
||||
alive: bool &default=F;
|
||||
};
|
||||
|
||||
## A pool specification.
|
||||
type PoolSpec: record {
|
||||
## A topic string that can be used to reach all nodes within a pool.
|
||||
topic: string &default = "";
|
||||
## The type of nodes that are contained within the pool.
|
||||
node_type: Cluster::NodeType &default = Cluster::PROXY;
|
||||
## The maximum number of nodes that may belong to the pool.
|
||||
## If not set, then all available nodes will be added to the pool,
|
||||
## else the cluster framework will automatically limit the pool
|
||||
## membership according to the threshhold.
|
||||
max_nodes: count &optional;
|
||||
## Whether the pool requires exclusive access to nodes. If true,
|
||||
## then *max_nodes* nodes will not be assigned to any other pool.
|
||||
## When using this flag, *max_nodes* must also be set.
|
||||
exclusive: bool &default = F;
|
||||
};
|
||||
|
||||
type PoolNodeTable: table[string] of PoolNode;
|
||||
type RoundRobinTable: table[string] of int;
|
||||
|
||||
## A pool used for distributing data/work among a set of cluster nodes.
|
||||
type Pool: record {
|
||||
## The specification of the pool that was used when registering it.
|
||||
spec: PoolSpec &default = PoolSpec();
|
||||
## Nodes in the pool, indexed by their name (e.g. "manager").
|
||||
nodes: PoolNodeTable &default = PoolNodeTable();
|
||||
## A list of nodes in the pool in a deterministic order.
|
||||
node_list: vector of PoolNode &default = vector();
|
||||
## The Rendezvous hashing structure.
|
||||
hrw_pool: HashHRW::Pool &default = HashHRW::Pool();
|
||||
## Round-Robin table indexed by arbitrary key and storing the next
|
||||
## index of *node_list* that will be eligible to receive work (if it's
|
||||
## alive at the time of next request).
|
||||
rr_key_seq: RoundRobinTable &default = RoundRobinTable();
|
||||
};
|
||||
|
||||
## The specification for :bro:see:`Cluster::proxy_pool`.
|
||||
global proxy_pool_spec: PoolSpec =
|
||||
PoolSpec($topic = "bro/cluster/pool/proxy",
|
||||
$node_type = Cluster::PROXY) &redef;
|
||||
|
||||
## The specification for :bro:see:`Cluster::worker_pool`.
|
||||
global worker_pool_spec: PoolSpec =
|
||||
PoolSpec($topic = "bro/cluster/pool/worker",
|
||||
$node_type = Cluster::WORKER) &redef;
|
||||
|
||||
## The specification for :bro:see:`Cluster::logger_pool`.
|
||||
global logger_pool_spec: PoolSpec =
|
||||
PoolSpec($topic = "bro/cluster/pool/logger",
|
||||
$node_type = Cluster::LOGGER) &redef;
|
||||
|
||||
## A pool containing all the proxy nodes of a cluster.
|
||||
## The pool's node membership/availability is automatically
|
||||
## maintained by the cluster framework.
|
||||
global proxy_pool: Pool;
|
||||
|
||||
## A pool containing all the worker nodes of a cluster.
|
||||
## The pool's node membership/availability is automatically
|
||||
## maintained by the cluster framework.
|
||||
global worker_pool: Pool;
|
||||
|
||||
## A pool containing all the logger nodes of a cluster.
|
||||
## The pool's node membership/availability is automatically
|
||||
## maintained by the cluster framework.
|
||||
global logger_pool: Pool;
|
||||
|
||||
## Registers and initializes a pool.
|
||||
global register_pool: function(spec: PoolSpec): Pool;
|
||||
|
||||
## Retrieve the topic associated with the node mapped via Rendezvous hash
|
||||
## of an arbitrary key.
|
||||
##
|
||||
## pool: the pool of nodes to consider.
|
||||
##
|
||||
## key: data used for input to the hashing function that will uniformly
|
||||
## distribute keys among available nodes.
|
||||
##
|
||||
## Returns: a topic string associated with a cluster node that is alive
|
||||
## or an empty string if nothing is alive.
|
||||
global hrw_topic: function(pool: Pool, key: any): string;
|
||||
|
||||
## Retrieve the topic associated with the node in a round-robin fashion.
|
||||
##
|
||||
## pool: the pool of nodes to consider.
|
||||
##
|
||||
## key: an arbitrary string to identify the purpose for which you're
|
||||
## requesting the topic. e.g. consider using namespacing of your script
|
||||
## like "Intel::cluster_rr_key".
|
||||
##
|
||||
## Returns: a topic string associated with a cluster node that is alive,
|
||||
## or an empty string if nothing is alive.
|
||||
global rr_topic: function(pool: Pool, key: string): string;
|
||||
|
||||
## Distributes log message topics among logger nodes via round-robin.
|
||||
## This will be automatically assigned to :bro:see:`Broker::log_topic`
|
||||
## if :bro:see:`Cluster::enable_round_robin_logging` is enabled.
|
||||
## If no logger nodes are active, then this will return the value
|
||||
## of :bro:see:`Broker::default_log_topic`.
|
||||
global rr_log_topic: function(id: Log::ID, path: string): string;
|
||||
}
|
||||
|
||||
## Initialize a node as a member of a pool.
|
||||
##
|
||||
## pool: the pool to which the node will belong.
|
||||
##
|
||||
## name: the name of the node (e.g. "manager").
|
||||
##
|
||||
## Returns: F if a node of the same name already exists in the pool, else T.
|
||||
global init_pool_node: function(pool: Pool, name: string): bool;
|
||||
|
||||
## Mark a pool node as alive/online/available. :bro:see:`Cluster::hrw_topic`
|
||||
## will distribute keys to nodes marked as alive.
|
||||
##
|
||||
## pool: the pool to which the node belongs.
|
||||
##
|
||||
## name: the name of the node to mark.
|
||||
##
|
||||
## Returns: F if the node does not exist in the pool, else T.
|
||||
global mark_pool_node_alive: function(pool: Pool, name: string): bool;
|
||||
|
||||
## Mark a pool node as dead/offline/unavailable. :bro:see:`Cluster::hrw_topic`
|
||||
## will not distribute keys to nodes marked as dead.
|
||||
##
|
||||
## pool: the pool to which the node belongs.
|
||||
##
|
||||
## name: the name of the node to mark.
|
||||
##
|
||||
## Returns: F if the node does not exist in the pool, else T.
|
||||
global mark_pool_node_dead: function(pool: Pool, name: string): bool;
|
||||
|
||||
global registered_pools: vector of Pool = vector();
|
||||
|
||||
function register_pool(spec: PoolSpec): Pool
|
||||
{
|
||||
local rval = Pool($spec = spec);
|
||||
registered_pools[|registered_pools|] = rval;
|
||||
return rval;
|
||||
}
|
||||
|
||||
function hrw_topic(pool: Pool, key: any): string
|
||||
{
|
||||
if ( |pool$hrw_pool$sites| == 0 )
|
||||
return "";
|
||||
|
||||
local site = HashHRW::get_site(pool$hrw_pool, key);
|
||||
local pn: PoolNode = site$user_data;
|
||||
return node_topic_prefix + pn$name;
|
||||
}
|
||||
|
||||
function rr_topic(pool: Pool, key: string): string
|
||||
{
|
||||
if ( key !in pool$rr_key_seq )
|
||||
pool$rr_key_seq[key] = 0;
|
||||
|
||||
local next_idx = pool$rr_key_seq[key];
|
||||
local start = next_idx;
|
||||
local rval = "";
|
||||
|
||||
if ( next_idx >= |pool$node_list| )
|
||||
return rval;
|
||||
|
||||
while ( T )
|
||||
{
|
||||
local pn = pool$node_list[next_idx];
|
||||
|
||||
++next_idx;
|
||||
|
||||
if ( next_idx == |pool$node_list| )
|
||||
next_idx = 0;
|
||||
|
||||
if ( pn$alive )
|
||||
{
|
||||
rval = node_topic_prefix + pn$name;
|
||||
break;
|
||||
}
|
||||
|
||||
if ( next_idx == start )
|
||||
# no nodes alive
|
||||
break;
|
||||
}
|
||||
|
||||
pool$rr_key_seq[key] = next_idx;
|
||||
return rval;
|
||||
}
|
||||
|
||||
function rr_log_topic(id: Log::ID, path: string): string
|
||||
{
|
||||
local rval = rr_topic(logger_pool, "Cluster::rr_log_topic");
|
||||
|
||||
if ( rval != "" )
|
||||
return rval;
|
||||
|
||||
rval = Broker::default_log_topic(id, path);
|
||||
return rval;
|
||||
}
|
||||
|
||||
event Cluster::node_up(name: string, id: string) &priority=10
|
||||
{
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
local pool = registered_pools[i];
|
||||
|
||||
if ( name in pool$nodes )
|
||||
mark_pool_node_alive(pool, name);
|
||||
}
|
||||
}
|
||||
|
||||
event Cluster::node_down(name: string, id: string) &priority=10
|
||||
{
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
local pool = registered_pools[i];
|
||||
|
||||
if ( name in pool$nodes )
|
||||
mark_pool_node_dead(pool, name);
|
||||
}
|
||||
}
|
||||
|
||||
function site_id_in_pool(pool: Pool, site_id: count): bool
|
||||
{
|
||||
for ( i in pool$nodes )
|
||||
{
|
||||
local pn = pool$nodes[i];
|
||||
|
||||
if ( pn$site_id == site_id )
|
||||
return T;
|
||||
}
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function init_pool_node(pool: Pool, name: string): bool
|
||||
{
|
||||
if ( name in pool$nodes )
|
||||
return F;
|
||||
|
||||
local loop = T;
|
||||
local c = 0;
|
||||
|
||||
while ( loop )
|
||||
{
|
||||
# site id collisions are unlikely, but using aliases handles it...
|
||||
# alternatively could terminate and ask user to pick a new node name
|
||||
# if it ends up colliding.
|
||||
local alias = name + fmt(".%s", c);
|
||||
local site_id = fnv1a32(alias);
|
||||
|
||||
if ( site_id_in_pool(pool, site_id) )
|
||||
++c;
|
||||
else
|
||||
{
|
||||
local pn = PoolNode($name=name, $alias=alias, $site_id=site_id,
|
||||
$alive=Cluster::node == name);
|
||||
pool$nodes[name] = pn;
|
||||
pool$node_list[|pool$node_list|] = pn;
|
||||
loop = F;
|
||||
}
|
||||
}
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function mark_pool_node_alive(pool: Pool, name: string): bool
|
||||
{
|
||||
if ( name !in pool$nodes )
|
||||
return F;
|
||||
|
||||
local pn = pool$nodes[name];
|
||||
pn$alive = T;
|
||||
HashHRW::add_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn));
|
||||
return T;
|
||||
}
|
||||
|
||||
function mark_pool_node_dead(pool: Pool, name: string): bool
|
||||
{
|
||||
if ( name !in pool$nodes )
|
||||
return F;
|
||||
|
||||
local pn = pool$nodes[name];
|
||||
pn$alive = F;
|
||||
HashHRW::rem_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn));
|
||||
return T;
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
worker_pool = register_pool(worker_pool_spec);
|
||||
proxy_pool = register_pool(proxy_pool_spec);
|
||||
logger_pool = register_pool(logger_pool_spec);
|
||||
}
|
||||
|
||||
type PoolEligibilityTracking: record {
|
||||
eligible_nodes: vector of NamedNode &default = vector();
|
||||
next_idx: count &default = 0;
|
||||
excluded: count &default = 0;
|
||||
};
|
||||
|
||||
global pool_eligibility: table[Cluster::NodeType] of PoolEligibilityTracking = table();
|
||||
|
||||
function pool_sorter(a: Pool, b: Pool): int
|
||||
{
|
||||
return strcmp(a$spec$topic, b$spec$topic);
|
||||
}
|
||||
|
||||
# Needs to execute before the bro_init in setup-connections
|
||||
event bro_init() &priority=-5
|
||||
{
|
||||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
# Sorting now ensures the node distribution process is stable even if
|
||||
# there's a change in the order of time-of-registration between Bro runs.
|
||||
sort(registered_pools, pool_sorter);
|
||||
|
||||
pool_eligibility[Cluster::WORKER] =
|
||||
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::WORKER));
|
||||
pool_eligibility[Cluster::PROXY] =
|
||||
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::PROXY));
|
||||
pool_eligibility[Cluster::LOGGER] =
|
||||
PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::LOGGER));
|
||||
|
||||
if ( manager_is_logger )
|
||||
{
|
||||
local mgr = nodes_with_type(Cluster::MANAGER);
|
||||
|
||||
if ( |mgr| > 0 )
|
||||
{
|
||||
local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes;
|
||||
eln[|eln|] = mgr[0];
|
||||
}
|
||||
}
|
||||
|
||||
local pool: Pool;
|
||||
local pet: PoolEligibilityTracking;
|
||||
local en: vector of NamedNode;
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
pool = registered_pools[i];
|
||||
|
||||
if ( pool$spec$node_type !in pool_eligibility )
|
||||
Reporter::fatal(fmt("invalid pool node type: %s", pool$spec$node_type));
|
||||
|
||||
if ( ! pool$spec$exclusive )
|
||||
next;
|
||||
|
||||
if ( ! pool$spec?$max_nodes )
|
||||
Reporter::fatal("Cluster::PoolSpec 'max_nodes' field must be set when using the 'exclusive' flag");
|
||||
|
||||
pet = pool_eligibility[pool$spec$node_type];
|
||||
pet$excluded += pool$spec$max_nodes;
|
||||
}
|
||||
|
||||
for ( nt in pool_eligibility )
|
||||
{
|
||||
pet = pool_eligibility[nt];
|
||||
|
||||
if ( pet$excluded > |pet$eligible_nodes| )
|
||||
Reporter::fatal(fmt("not enough %s nodes to satisfy pool exclusivity requirements: need %d nodes", nt, pet$excluded));
|
||||
}
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
pool = registered_pools[i];
|
||||
|
||||
if ( ! pool$spec$exclusive )
|
||||
next;
|
||||
|
||||
pet = pool_eligibility[pool$spec$node_type];
|
||||
|
||||
local e = 0;
|
||||
|
||||
while ( e < pool$spec$max_nodes )
|
||||
{
|
||||
init_pool_node(pool, pet$eligible_nodes[e]$name);
|
||||
++e;
|
||||
}
|
||||
|
||||
local nen: vector of NamedNode = vector();
|
||||
|
||||
for ( j in pet$eligible_nodes )
|
||||
{
|
||||
if ( j < e )
|
||||
next;
|
||||
|
||||
nen[|nen|] = pet$eligible_nodes[j];
|
||||
}
|
||||
|
||||
pet$eligible_nodes = nen;
|
||||
}
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
pool = registered_pools[i];
|
||||
|
||||
if ( pool$spec$exclusive )
|
||||
next;
|
||||
|
||||
pet = pool_eligibility[pool$spec$node_type];
|
||||
local nodes_to_init = |pet$eligible_nodes|;
|
||||
|
||||
if ( pool$spec?$max_nodes &&
|
||||
pool$spec$max_nodes < |pet$eligible_nodes| )
|
||||
nodes_to_init = pool$spec$max_nodes;
|
||||
|
||||
local nodes_inited = 0;
|
||||
|
||||
while ( nodes_inited < nodes_to_init )
|
||||
{
|
||||
init_pool_node(pool, pet$eligible_nodes[pet$next_idx]$name);
|
||||
++nodes_inited;
|
||||
++pet$next_idx;
|
||||
|
||||
if ( pet$next_idx == |pet$eligible_nodes| )
|
||||
pet$next_idx = 0;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,142 +2,122 @@
|
|||
##! as defined by :bro:id:`Cluster::nodes`.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/communication
|
||||
|
||||
@if ( Cluster::node in Cluster::nodes )
|
||||
@load ./pools
|
||||
@load base/frameworks/broker
|
||||
|
||||
module Cluster;
|
||||
|
||||
event bro_init() &priority=9
|
||||
function connect_peer(node_type: NodeType, node_name: string)
|
||||
{
|
||||
local me = nodes[node];
|
||||
local nn = nodes_with_type(node_type);
|
||||
|
||||
for ( i in Cluster::nodes )
|
||||
for ( i in nn )
|
||||
{
|
||||
local n = nodes[i];
|
||||
local n = nn[i];
|
||||
|
||||
# Connections from the control node for runtime control and update events.
|
||||
# Every node in a cluster is eligible for control from this host.
|
||||
if ( n$node_type == CONTROL )
|
||||
Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id,
|
||||
$connect=F, $class="control",
|
||||
$events=control_events];
|
||||
|
||||
if ( me$node_type == LOGGER )
|
||||
{
|
||||
if ( n$node_type == MANAGER && n$logger == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=manager2logger_events, $request_logs=T];
|
||||
if ( n$node_type == PROXY && n$logger == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=proxy2logger_events, $request_logs=T];
|
||||
if ( n$node_type == WORKER && n$logger == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=worker2logger_events, $request_logs=T];
|
||||
}
|
||||
else if ( me$node_type == MANAGER )
|
||||
{
|
||||
if ( n$node_type == LOGGER && me$logger == i )
|
||||
Communication::nodes["logger"] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node];
|
||||
|
||||
if ( n$node_type == WORKER && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=worker2manager_events,
|
||||
$request_logs=Cluster::manager_is_logger];
|
||||
|
||||
if ( n$node_type == PROXY && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=proxy2manager_events,
|
||||
$request_logs=Cluster::manager_is_logger];
|
||||
|
||||
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$events=tm2manager_events];
|
||||
}
|
||||
|
||||
else if ( me$node_type == PROXY )
|
||||
{
|
||||
if ( n$node_type == LOGGER && me$logger == i )
|
||||
Communication::nodes["logger"] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node];
|
||||
|
||||
if ( n$node_type == WORKER && n$proxy == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
|
||||
$sync=T, $auth=T, $events=worker2proxy_events];
|
||||
|
||||
# accepts connections from the previous one.
|
||||
# (This is not ideal for setups with many proxies)
|
||||
# FIXME: Once we're using multiple proxies, we should also figure out some $class scheme ...
|
||||
if ( n$node_type == PROXY )
|
||||
{
|
||||
if ( n?$proxy )
|
||||
Communication::nodes[i]
|
||||
= [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $auth=F, $sync=T, $retry=retry_interval];
|
||||
else if ( me?$proxy && me$proxy == i )
|
||||
Communication::nodes[me$proxy]
|
||||
= [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
|
||||
$connect=F, $auth=T, $sync=T];
|
||||
}
|
||||
|
||||
# Finally the manager, to send it status updates.
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node,
|
||||
$events=manager2proxy_events];
|
||||
}
|
||||
else if ( me$node_type == WORKER )
|
||||
{
|
||||
if ( n$node_type == LOGGER && me$logger == i )
|
||||
Communication::nodes["logger"] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node];
|
||||
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node,
|
||||
$events=manager2worker_events];
|
||||
|
||||
if ( n$node_type == PROXY && me$proxy == i )
|
||||
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$sync=T, $class=node,
|
||||
$events=proxy2worker_events];
|
||||
|
||||
if ( n$node_type == TIME_MACHINE &&
|
||||
me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T,
|
||||
$retry=retry_interval,
|
||||
$events=tm2worker_events];
|
||||
if ( n$name != node_name )
|
||||
next;
|
||||
|
||||
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
||||
Cluster::retry_interval);
|
||||
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
||||
n$node$ip, n$node$p, Cluster::retry_interval,
|
||||
status));
|
||||
}
|
||||
}
|
||||
|
||||
function connect_peers_with_type(node_type: NodeType)
|
||||
{
|
||||
local rval: vector of NamedNode = vector();
|
||||
local nn = nodes_with_type(node_type);
|
||||
|
||||
for ( i in nn )
|
||||
{
|
||||
local n = nn[i];
|
||||
local status = Broker::peer(cat(n$node$ip), n$node$p,
|
||||
Cluster::retry_interval);
|
||||
Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s",
|
||||
n$node$ip, n$node$p, Cluster::retry_interval,
|
||||
status));
|
||||
}
|
||||
}
|
||||
|
||||
@endif
|
||||
event bro_init() &priority=-10
|
||||
{
|
||||
local self = nodes[node];
|
||||
|
||||
for ( i in registered_pools )
|
||||
{
|
||||
local pool = registered_pools[i];
|
||||
|
||||
if ( node in pool$nodes )
|
||||
Broker::subscribe(pool$spec$topic);
|
||||
}
|
||||
|
||||
switch ( self$node_type ) {
|
||||
case NONE:
|
||||
return;
|
||||
case CONTROL:
|
||||
break;
|
||||
case LOGGER:
|
||||
Broker::subscribe(Cluster::logger_topic);
|
||||
Broker::subscribe(Broker::default_log_topic_prefix);
|
||||
break;
|
||||
case MANAGER:
|
||||
Broker::subscribe(Cluster::manager_topic);
|
||||
|
||||
if ( Cluster::manager_is_logger )
|
||||
Broker::subscribe(Broker::default_log_topic_prefix);
|
||||
|
||||
break;
|
||||
case PROXY:
|
||||
Broker::subscribe(Cluster::proxy_topic);
|
||||
break;
|
||||
case WORKER:
|
||||
Broker::subscribe(Cluster::worker_topic);
|
||||
break;
|
||||
case TIME_MACHINE:
|
||||
Broker::subscribe(Cluster::time_machine_topic);
|
||||
break;
|
||||
default:
|
||||
Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type));
|
||||
return;
|
||||
}
|
||||
|
||||
Broker::subscribe(Cluster::broadcast_topic);
|
||||
Broker::subscribe(node_topic(node));
|
||||
|
||||
Broker::listen(Broker::default_listen_address,
|
||||
self$p,
|
||||
Broker::default_listen_retry);
|
||||
|
||||
Cluster::log(fmt("listening on %s:%s", Broker::default_listen_address, self$p));
|
||||
|
||||
switch ( self$node_type ) {
|
||||
case MANAGER:
|
||||
connect_peers_with_type(LOGGER);
|
||||
|
||||
if ( self?$time_machine )
|
||||
connect_peer(TIME_MACHINE, self$time_machine);
|
||||
|
||||
break;
|
||||
case PROXY:
|
||||
connect_peers_with_type(LOGGER);
|
||||
|
||||
if ( self?$manager )
|
||||
connect_peer(MANAGER, self$manager);
|
||||
|
||||
break;
|
||||
case WORKER:
|
||||
connect_peers_with_type(LOGGER);
|
||||
connect_peers_with_type(PROXY);
|
||||
|
||||
if ( self?$manager )
|
||||
connect_peer(MANAGER, self$manager);
|
||||
|
||||
if ( self?$time_machine )
|
||||
connect_peer(TIME_MACHINE, self$time_machine);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
The communication framework facilitates connecting to remote Bro or
|
||||
Broccoli instances to share state and transfer events.
|
|
@ -1 +0,0 @@
|
|||
@load ./main
|
|
@ -1,354 +0,0 @@
|
|||
##! Facilitates connecting to remote Bro or Broccoli instances to share state
|
||||
##! and/or transfer events.
|
||||
|
||||
@load base/frameworks/packet-filter
|
||||
@load base/utils/addrs
|
||||
|
||||
module Communication;
|
||||
|
||||
export {
|
||||
|
||||
## The communication logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
|
||||
## are wildcards.
|
||||
const listen_interface = 0.0.0.0 &redef;
|
||||
|
||||
## Which port to listen on. Note that BroControl sets this
|
||||
## automatically.
|
||||
const listen_port = 47757/tcp &redef;
|
||||
|
||||
## This defines if a listening socket should use SSL.
|
||||
const listen_ssl = F &redef;
|
||||
|
||||
## Defines if a listening socket can bind to IPv6 addresses.
|
||||
##
|
||||
## Note that this is overridden by the BroControl IPv6Comm option.
|
||||
const listen_ipv6 = F &redef;
|
||||
|
||||
## If :bro:id:`Communication::listen_interface` is a non-global
|
||||
## IPv6 address and requires a specific :rfc:`4007` ``zone_id``,
|
||||
## it can be specified here.
|
||||
const listen_ipv6_zone_id = "" &redef;
|
||||
|
||||
## Defines the interval at which to retry binding to
|
||||
## :bro:id:`Communication::listen_interface` on
|
||||
## :bro:id:`Communication::listen_port` if it's already in use.
|
||||
const listen_retry = 30 secs &redef;
|
||||
|
||||
## Default compression level. Compression level is 0-9, with 0 = no
|
||||
## compression.
|
||||
global compression_level = 0 &redef;
|
||||
|
||||
## A record type containing the column fields of the communication log.
|
||||
type Info: record {
|
||||
## The network time at which a communication event occurred.
|
||||
ts: time &log;
|
||||
## The peer name (if any) with which a communication event is
|
||||
## concerned.
|
||||
peer: string &log &optional;
|
||||
## Where the communication event message originated from, that
|
||||
## is, either from the scripting layer or inside the Bro process.
|
||||
src_name: string &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_desc: string &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_addr: addr &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_port: port &log &optional;
|
||||
## The severity of the communication event message.
|
||||
level: string &log &optional;
|
||||
## A message describing the communication event between Bro or
|
||||
## Broccoli instances.
|
||||
message: string &log;
|
||||
};
|
||||
|
||||
## A remote peer to which we would like to talk.
|
||||
## If there's no entry for a peer, it may still connect
|
||||
## and request state, but not send us any.
|
||||
type Node: record {
|
||||
## Remote address.
|
||||
host: addr;
|
||||
|
||||
## If the *host* field is a non-global IPv6 address, this field
|
||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||
zone_id: string &optional;
|
||||
|
||||
## Port of the remote Bro communication endpoint if we are
|
||||
## initiating the connection (based on the *connect* field).
|
||||
p: port &optional;
|
||||
|
||||
## When accepting a connection, the configuration only
|
||||
## applies if the class matches the one transmitted by
|
||||
## the peer.
|
||||
##
|
||||
## When initiating a connection, the class is sent to
|
||||
## the other side.
|
||||
class: string &optional;
|
||||
|
||||
## Events requested from remote side.
|
||||
events: pattern &optional;
|
||||
|
||||
## Whether we are going to connect (rather than waiting
|
||||
## for the other side to connect to us).
|
||||
connect: bool &default = F;
|
||||
|
||||
## If disconnected, reconnect after this many seconds.
|
||||
retry: interval &default = 0 secs;
|
||||
|
||||
## Whether to accept remote events.
|
||||
accept_input: bool &default = T;
|
||||
|
||||
## Whether to perform state synchronization with peer.
|
||||
sync: bool &default = F;
|
||||
|
||||
## Whether to request logs from the peer.
|
||||
request_logs: bool &default = F;
|
||||
|
||||
## When performing state synchronization, whether we consider
|
||||
## our state to be authoritative (only one side can be
|
||||
## authoritative). If so, we will send the peer our current
|
||||
## set when the connection is set up.
|
||||
auth: bool &default = F;
|
||||
|
||||
## If not set, no capture filter is sent.
|
||||
## If set to an empty string, then the default capture filter
|
||||
## is sent.
|
||||
capture_filter: string &optional;
|
||||
|
||||
## Whether to use SSL-based communication.
|
||||
ssl: bool &default = F;
|
||||
|
||||
## Compression level is 0-9, with 0 = no compression.
|
||||
compression: count &default = compression_level;
|
||||
|
||||
## The remote peer.
|
||||
peer: event_peer &optional;
|
||||
|
||||
## Indicates the status of the node.
|
||||
connected: bool &default = F;
|
||||
};
|
||||
|
||||
## The table of Bro or Broccoli nodes that Bro will initiate connections
|
||||
## to or respond to connections from. Note that BroControl sets this
|
||||
## automatically.
|
||||
global nodes: table[string] of Node &redef;
|
||||
|
||||
## A table of peer nodes for which this node issued a
|
||||
## :bro:id:`Communication::connect_peer` call but with which a connection
|
||||
## has not yet been established or with which a connection has been
|
||||
## closed and is currently in the process of retrying to establish.
|
||||
## When a connection is successfully established, the peer is removed
|
||||
## from the table.
|
||||
global pending_peers: table[peer_id] of Node;
|
||||
|
||||
## A table of peer nodes for which this node has an established connection.
|
||||
## Peers are automatically removed if their connection is closed and
|
||||
## automatically added back if a connection is re-established later.
|
||||
global connected_peers: table[peer_id] of Node;
|
||||
|
||||
## Connect to a node in :bro:id:`Communication::nodes` independent
|
||||
## of its "connect" flag.
|
||||
##
|
||||
## peer: the string used to index a particular node within the
|
||||
## :bro:id:`Communication::nodes` table.
|
||||
global connect_peer: function(peer: string);
|
||||
}
|
||||
|
||||
const src_names = {
|
||||
[REMOTE_SRC_CHILD] = "child",
|
||||
[REMOTE_SRC_PARENT] = "parent",
|
||||
[REMOTE_SRC_SCRIPT] = "script",
|
||||
};
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Communication::LOG, [$columns=Info, $path="communication"]);
|
||||
}
|
||||
|
||||
function do_script_log_common(level: count, src: count, msg: string)
|
||||
{
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
|
||||
$src_name = src_names[src],
|
||||
$peer = get_event_peer()$descr,
|
||||
$message = msg]);
|
||||
}
|
||||
|
||||
# This is a core generated event.
|
||||
event remote_log(level: count, src: count, msg: string)
|
||||
{
|
||||
do_script_log_common(level, src, msg);
|
||||
}
|
||||
|
||||
# This is a core generated event.
|
||||
event remote_log_peer(p: event_peer, level: count, src: count, msg: string)
|
||||
{
|
||||
local rmsg = fmt("[#%d/%s:%d] %s", p$id, addr_to_uri(p$host), p$p, msg);
|
||||
do_script_log_common(level, src, rmsg);
|
||||
}
|
||||
|
||||
function do_script_log(p: event_peer, msg: string)
|
||||
{
|
||||
do_script_log_common(REMOTE_LOG_INFO, REMOTE_SRC_SCRIPT, msg);
|
||||
}
|
||||
|
||||
function connect_peer(peer: string)
|
||||
{
|
||||
local node = nodes[peer];
|
||||
local p = listen_port;
|
||||
|
||||
if ( node?$p )
|
||||
p = node$p;
|
||||
|
||||
local class = node?$class ? node$class : "";
|
||||
local zone_id = node?$zone_id ? node$zone_id : "";
|
||||
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
|
||||
|
||||
if ( id == PEER_ID_NONE )
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$peer = get_event_peer()$descr,
|
||||
$message = "can't trigger connect"]);
|
||||
pending_peers[id] = node;
|
||||
}
|
||||
|
||||
|
||||
function setup_peer(p: event_peer, node: Node)
|
||||
{
|
||||
if ( node?$events )
|
||||
{
|
||||
do_script_log(p, fmt("requesting events matching %s", node$events));
|
||||
request_remote_events(p, node$events);
|
||||
}
|
||||
|
||||
if ( node?$capture_filter && node$capture_filter != "" )
|
||||
{
|
||||
local filter = node$capture_filter;
|
||||
do_script_log(p, fmt("sending capture_filter: %s", filter));
|
||||
send_capture_filter(p, filter);
|
||||
}
|
||||
|
||||
if ( node$accept_input )
|
||||
{
|
||||
do_script_log(p, "accepting state");
|
||||
set_accept_state(p, T);
|
||||
}
|
||||
|
||||
set_compression_level(p, node$compression);
|
||||
|
||||
if ( node$sync )
|
||||
{
|
||||
do_script_log(p, "requesting synchronized state");
|
||||
request_remote_sync(p, node$auth);
|
||||
}
|
||||
|
||||
if ( node$request_logs )
|
||||
{
|
||||
do_script_log(p, "requesting logs");
|
||||
request_remote_logs(p);
|
||||
}
|
||||
|
||||
node$peer = p;
|
||||
node$connected = T;
|
||||
connected_peers[p$id] = node;
|
||||
}
|
||||
|
||||
event remote_connection_established(p: event_peer)
|
||||
{
|
||||
if ( is_remote_event() )
|
||||
return;
|
||||
|
||||
do_script_log(p, "connection established");
|
||||
|
||||
if ( p$id in pending_peers )
|
||||
{
|
||||
# We issued the connect.
|
||||
local node = pending_peers[p$id];
|
||||
setup_peer(p, node);
|
||||
delete pending_peers[p$id];
|
||||
}
|
||||
else
|
||||
{ # The other side connected to us.
|
||||
local found = F;
|
||||
for ( i in nodes )
|
||||
{
|
||||
node = nodes[i];
|
||||
if ( node$host == p$host )
|
||||
{
|
||||
local c = 0;
|
||||
|
||||
# See if classes match = either both have
|
||||
# the same class, or neither of them has
|
||||
# a class.
|
||||
if ( p?$class && p$class != "" )
|
||||
++c;
|
||||
|
||||
if ( node?$class && node$class != "" )
|
||||
++c;
|
||||
|
||||
if ( c == 1 ||
|
||||
(c == 2 && p$class != node$class) )
|
||||
next;
|
||||
|
||||
found = T;
|
||||
setup_peer(p, node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( ! found )
|
||||
set_compression_level(p, compression_level);
|
||||
}
|
||||
|
||||
complete_handshake(p);
|
||||
}
|
||||
|
||||
event remote_connection_closed(p: event_peer)
|
||||
{
|
||||
if ( is_remote_event() )
|
||||
return;
|
||||
|
||||
do_script_log(p, "connection closed");
|
||||
|
||||
if ( p$id in connected_peers )
|
||||
{
|
||||
local node = connected_peers[p$id];
|
||||
node$connected = F;
|
||||
|
||||
delete connected_peers[p$id];
|
||||
|
||||
if ( node$retry != 0secs )
|
||||
# The core will retry.
|
||||
pending_peers[p$id] = node;
|
||||
}
|
||||
}
|
||||
|
||||
event remote_state_inconsistency(operation: string, id: string,
|
||||
expected_old: string, real_old: string)
|
||||
{
|
||||
if ( is_remote_event() )
|
||||
return;
|
||||
|
||||
local msg = fmt("state inconsistency: %s should be %s but is %s before %s",
|
||||
id, expected_old, real_old, operation);
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$peer = get_event_peer()$descr,
|
||||
$message = msg]);
|
||||
}
|
||||
|
||||
|
||||
# Actually initiate the connections that need to be established.
|
||||
event bro_init() &priority = -10 # let others modify nodes
|
||||
{
|
||||
if ( |nodes| > 0 )
|
||||
enable_communication();
|
||||
|
||||
for ( tag in nodes )
|
||||
{
|
||||
if ( ! nodes[tag]$connect )
|
||||
next;
|
||||
|
||||
connect_peer(tag);
|
||||
}
|
||||
}
|
|
@ -5,6 +5,13 @@
|
|||
module Control;
|
||||
|
||||
export {
|
||||
## The topic prefix used for exchanging control messages via Broker.
|
||||
const topic_prefix = "bro/control";
|
||||
|
||||
## Whether the controllee should call :bro:see:`Broker::listen`.
|
||||
## In a cluster, this isn't needed since the setup process calls it.
|
||||
const controllee_listen = T &redef;
|
||||
|
||||
## The address of the host that will be controlled.
|
||||
const host = 0.0.0.0 &redef;
|
||||
|
||||
|
@ -22,12 +29,6 @@ export {
|
|||
## This can be used by commands that take an argument.
|
||||
const arg = "" &redef;
|
||||
|
||||
## Events that need to be handled by controllers.
|
||||
const controller_events = /Control::.*_request/ &redef;
|
||||
|
||||
## Events that need to be handled by controllees.
|
||||
const controllee_events = /Control::.*_response/ &redef;
|
||||
|
||||
## The commands that can currently be given on the command line for
|
||||
## remote control.
|
||||
const commands: set[string] = {
|
||||
|
@ -73,8 +74,7 @@ export {
|
|||
global shutdown_response: event();
|
||||
}
|
||||
|
||||
|
||||
event terminate_event()
|
||||
{
|
||||
terminate_communication();
|
||||
terminate();
|
||||
}
|
||||
|
|
|
@ -6,69 +6,96 @@
|
|||
|
||||
module Intel;
|
||||
|
||||
redef record Item += {
|
||||
## This field is used internally for cluster transparency to avoid
|
||||
## re-dispatching intelligence items over and over from workers.
|
||||
first_dispatch: bool &default=T;
|
||||
};
|
||||
export {
|
||||
## Broker topic for management of intel items. Sending insert_item and
|
||||
## remove_item events, peers can manage intelligence data.
|
||||
const item_topic = "bro/intel/items" &redef;
|
||||
|
||||
## Broker topic for management of intel indicators as stored on workers
|
||||
## for matching. Sending insert_indicator and remove_indicator events,
|
||||
## the back-end manages indicators.
|
||||
const indicator_topic = "bro/intel/indicators" &redef;
|
||||
|
||||
## Broker topic for matching events, generated by workers and sent to
|
||||
## the back-end for metadata enrichment and logging.
|
||||
const match_topic = "bro/intel/match" &redef;
|
||||
}
|
||||
|
||||
# Internal events for cluster data distribution.
|
||||
global insert_item: event(item: Item);
|
||||
global insert_indicator: event(item: Item);
|
||||
|
||||
# If this process is not a manager process, we don't want the full metadata.
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
redef have_full_data = F;
|
||||
@endif
|
||||
|
||||
# Internal event for cluster data distribution.
|
||||
global cluster_new_item: event(item: Item);
|
||||
|
||||
# Primary intelligence management is done by the manager.
|
||||
# The manager informs the workers about new items and item removal.
|
||||
redef Cluster::manager2worker_events += /^Intel::(cluster_new_item|purge_item)$/;
|
||||
# A worker queries the manager to insert, remove or indicate the match of an item.
|
||||
redef Cluster::worker2manager_events += /^Intel::(cluster_new_item|remove_item|match_no_items)$/;
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event bro_init()
|
||||
{
|
||||
Broker::subscribe(item_topic);
|
||||
Broker::subscribe(match_topic);
|
||||
|
||||
Broker::auto_publish(indicator_topic, remove_indicator);
|
||||
}
|
||||
|
||||
# Handling of new worker nodes.
|
||||
event remote_connection_handshake_done(p: event_peer)
|
||||
event Cluster::node_up(name: string, id: string)
|
||||
{
|
||||
# When a worker connects, send it the complete minimal data store.
|
||||
# It will be kept up to date after this by the cluster_new_item event.
|
||||
if ( p$descr in Cluster::nodes && Cluster::nodes[p$descr]$node_type == Cluster::WORKER )
|
||||
# It will be kept up to date after this by the insert_indicator event.
|
||||
if ( name in Cluster::nodes && Cluster::nodes[name]$node_type == Cluster::WORKER )
|
||||
{
|
||||
send_id(p, "Intel::min_data_store");
|
||||
Broker::publish_id(Cluster::node_topic(name), "Intel::min_data_store");
|
||||
}
|
||||
}
|
||||
|
||||
# Handling of matches triggered by worker nodes.
|
||||
event Intel::match_no_items(s: Seen) &priority=5
|
||||
# On the manager, the new_item event indicates a new indicator that
|
||||
# has to be distributed.
|
||||
event Intel::new_item(item: Item) &priority=5
|
||||
{
|
||||
Broker::publish(indicator_topic, Intel::insert_indicator, item);
|
||||
}
|
||||
|
||||
# Handling of item insertion triggered by remote node.
|
||||
event Intel::insert_item(item: Intel::Item) &priority=5
|
||||
{
|
||||
Intel::_insert(item, T);
|
||||
}
|
||||
|
||||
# Handling of item removal triggered by remote node.
|
||||
event Intel::remove_item(item: Item, purge_indicator: bool) &priority=5
|
||||
{
|
||||
remove(item, purge_indicator);
|
||||
}
|
||||
|
||||
# Handling of match triggered by remote node.
|
||||
event Intel::match_remote(s: Seen) &priority=5
|
||||
{
|
||||
if ( Intel::find(s) )
|
||||
event Intel::match(s, Intel::get_items(s));
|
||||
}
|
||||
|
||||
# Handling of item removal triggered by worker nodes.
|
||||
event Intel::remove_item(item: Item, purge_indicator: bool)
|
||||
{
|
||||
remove(item, purge_indicator);
|
||||
}
|
||||
@endif
|
||||
|
||||
# Handling of item insertion.
|
||||
event Intel::new_item(item: Intel::Item) &priority=5
|
||||
@if ( Cluster::local_node_type() == Cluster::WORKER )
|
||||
event bro_init()
|
||||
{
|
||||
# The cluster manager always rebroadcasts intelligence.
|
||||
# Workers redistribute it if it was locally generated.
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER ||
|
||||
item$first_dispatch )
|
||||
{
|
||||
item$first_dispatch=F;
|
||||
event Intel::cluster_new_item(item);
|
||||
}
|
||||
Broker::subscribe(indicator_topic);
|
||||
|
||||
Broker::auto_publish(match_topic, match_remote);
|
||||
Broker::auto_publish(item_topic, remove_item);
|
||||
}
|
||||
|
||||
# Handling of item insertion by remote node.
|
||||
event Intel::cluster_new_item(item: Intel::Item) &priority=5
|
||||
# On a worker, the new_item event requires to trigger the insertion
|
||||
# on the manager to update the back-end data store.
|
||||
event Intel::new_item(item: Intel::Item) &priority=5
|
||||
{
|
||||
# Ignore locally generated events to avoid event storms.
|
||||
if ( is_remote_event() )
|
||||
Intel::insert(item);
|
||||
Broker::publish(item_topic, Intel::insert_item, item);
|
||||
}
|
||||
|
||||
# Handling of new indicators published by the manager.
|
||||
event Intel::insert_indicator(item: Intel::Item) &priority=5
|
||||
{
|
||||
Intel::_insert(item, F);
|
||||
}
|
||||
@endif
|
||||
|
|
|
@ -177,12 +177,12 @@ export {
|
|||
}
|
||||
|
||||
# Internal handler for matches with no metadata available.
|
||||
global match_no_items: event(s: Seen);
|
||||
global match_remote: event(s: Seen);
|
||||
|
||||
# Internal events for cluster data distribution.
|
||||
# Internal events for (cluster) data distribution.
|
||||
global new_item: event(item: Item);
|
||||
global remove_item: event(item: Item, purge_indicator: bool);
|
||||
global purge_item: event(item: Item);
|
||||
global remove_indicator: event(item: Item);
|
||||
|
||||
# Optionally store metadata. This is used internally depending on
|
||||
# if this is a cluster deployment or not.
|
||||
|
@ -357,7 +357,7 @@ function Intel::seen(s: Seen)
|
|||
}
|
||||
else
|
||||
{
|
||||
event Intel::match_no_items(s);
|
||||
event Intel::match_remote(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -389,9 +389,11 @@ hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=5
|
|||
}
|
||||
}
|
||||
|
||||
function insert(item: Item)
|
||||
# Function to insert metadata of an item. The function returns T
|
||||
# if the given indicator is new.
|
||||
function insert_meta_data(item: Item): bool
|
||||
{
|
||||
# Create and fill out the metadata item.
|
||||
# Prepare the metadata entry.
|
||||
local meta = item$meta;
|
||||
local meta_tbl: table [string] of MetaData;
|
||||
local is_new: bool = T;
|
||||
|
@ -399,11 +401,11 @@ function insert(item: Item)
|
|||
# All intelligence is case insensitive at the moment.
|
||||
local lower_indicator = to_lower(item$indicator);
|
||||
|
||||
if ( item$indicator_type == ADDR )
|
||||
switch ( item$indicator_type )
|
||||
{
|
||||
case ADDR:
|
||||
local host = to_addr(item$indicator);
|
||||
if ( have_full_data )
|
||||
{
|
||||
|
||||
if ( host !in data_store$host_data )
|
||||
data_store$host_data[host] = table();
|
||||
else
|
||||
|
@ -414,15 +416,10 @@ function insert(item: Item)
|
|||
}
|
||||
|
||||
meta_tbl = data_store$host_data[host];
|
||||
}
|
||||
|
||||
add min_data_store$host_data[host];
|
||||
}
|
||||
else if ( item$indicator_type == SUBNET )
|
||||
{
|
||||
break;
|
||||
case SUBNET:
|
||||
local net = to_subnet(item$indicator);
|
||||
if ( have_full_data )
|
||||
{
|
||||
|
||||
if ( !check_subnet(net, data_store$subnet_data) )
|
||||
data_store$subnet_data[net] = table();
|
||||
else
|
||||
|
@ -433,14 +430,8 @@ function insert(item: Item)
|
|||
}
|
||||
|
||||
meta_tbl = data_store$subnet_data[net];
|
||||
}
|
||||
|
||||
add min_data_store$subnet_data[net];
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( have_full_data )
|
||||
{
|
||||
break;
|
||||
default:
|
||||
if ( [lower_indicator, item$indicator_type] !in data_store$string_data )
|
||||
data_store$string_data[lower_indicator, item$indicator_type] = table();
|
||||
else
|
||||
|
@ -452,23 +443,59 @@ function insert(item: Item)
|
|||
}
|
||||
|
||||
meta_tbl = data_store$string_data[lower_indicator, item$indicator_type];
|
||||
break;
|
||||
}
|
||||
|
||||
# Insert new metadata or update if already present.
|
||||
meta_tbl[meta$source] = meta;
|
||||
|
||||
return is_new;
|
||||
}
|
||||
|
||||
# Function to encapsulate insertion logic. The first_dispatch parameter
|
||||
# indicates whether the item might be new for other nodes.
|
||||
function _insert(item: Item, first_dispatch: bool &default = T)
|
||||
{
|
||||
# Assume that the item is new by default.
|
||||
local is_new: bool = T;
|
||||
|
||||
# All intelligence is case insensitive at the moment.
|
||||
local lower_indicator = to_lower(item$indicator);
|
||||
|
||||
# Insert indicator into MinDataStore (might exist already).
|
||||
switch ( item$indicator_type )
|
||||
{
|
||||
case ADDR:
|
||||
local host = to_addr(item$indicator);
|
||||
add min_data_store$host_data[host];
|
||||
break;
|
||||
case SUBNET:
|
||||
local net = to_subnet(item$indicator);
|
||||
add min_data_store$subnet_data[net];
|
||||
break;
|
||||
default:
|
||||
add min_data_store$string_data[lower_indicator, item$indicator_type];
|
||||
break;
|
||||
}
|
||||
|
||||
if ( have_full_data )
|
||||
{
|
||||
# Insert new metadata or update if already present
|
||||
meta_tbl[meta$source] = meta;
|
||||
# Insert new metadata or update if already present.
|
||||
is_new = insert_meta_data(item);
|
||||
}
|
||||
|
||||
if ( is_new )
|
||||
# Trigger insert for cluster in case the item is new
|
||||
# or insert was called on a worker
|
||||
if ( first_dispatch && is_new )
|
||||
# Announce a (possibly) new item if this is the first dispatch and
|
||||
# we know it is new or have to assume that on a worker.
|
||||
event Intel::new_item(item);
|
||||
}
|
||||
|
||||
function insert(item: Item)
|
||||
{
|
||||
# Insert possibly new item.
|
||||
_insert(item, T);
|
||||
}
|
||||
|
||||
# Function to check whether an item is present.
|
||||
function item_exists(item: Item): bool
|
||||
{
|
||||
|
@ -549,12 +576,12 @@ function remove(item: Item, purge_indicator: bool)
|
|||
break;
|
||||
}
|
||||
# Trigger deletion in minimal data stores
|
||||
event Intel::purge_item(item);
|
||||
event Intel::remove_indicator(item);
|
||||
}
|
||||
}
|
||||
|
||||
# Handling of indicator removal in minimal data stores.
|
||||
event purge_item(item: Item)
|
||||
event remove_indicator(item: Item)
|
||||
{
|
||||
switch ( item$indicator_type )
|
||||
{
|
||||
|
@ -571,4 +598,3 @@ event purge_item(item: Item)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -225,9 +225,22 @@ global blocks: table[addr] of BlockInfo = {}
|
|||
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@load base/frameworks/cluster
|
||||
redef Cluster::manager2worker_events += /NetControl::catch_release_block_(new|delete)/;
|
||||
redef Cluster::worker2manager_events += /NetControl::catch_release_(add|delete|encountered)/;
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event bro_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_new);
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_delete);
|
||||
}
|
||||
@else
|
||||
event bro_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_add);
|
||||
Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_delete);
|
||||
Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_encountered);
|
||||
}
|
||||
@endif
|
||||
|
||||
@endif
|
||||
|
||||
function cr_check_rule(r: Rule): bool
|
||||
|
|
|
@ -16,10 +16,25 @@ export {
|
|||
global cluster_netcontrol_delete_rule: event(id: string, reason: string);
|
||||
}
|
||||
|
||||
## Workers need ability to forward commands to manager.
|
||||
redef Cluster::worker2manager_events += /NetControl::cluster_netcontrol_(add|remove|delete)_rule/;
|
||||
## Workers need to see the result events from the manager.
|
||||
redef Cluster::manager2worker_events += /NetControl::rule_(added|removed|timeout|error|exists|new|destroyed)/;
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event bro_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_added);
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_removed);
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_timeout);
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_error);
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_exists);
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_new);
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_destroyed);
|
||||
}
|
||||
@else
|
||||
event bro_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_add_rule);
|
||||
Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_remove_rule);
|
||||
Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_delete_rule);
|
||||
}
|
||||
@endif
|
||||
|
||||
function activate(p: PluginState, priority: int)
|
||||
{
|
||||
|
|
|
@ -6,8 +6,6 @@ module NetControl;
|
|||
@load ../plugin
|
||||
@load base/frameworks/broker
|
||||
|
||||
@ifdef ( Broker::__enable )
|
||||
|
||||
export {
|
||||
type AclRule : record {
|
||||
command: string;
|
||||
|
@ -243,7 +241,7 @@ function acld_add_rule_fun(p: PluginState, r: Rule) : bool
|
|||
if ( ar$command == "" )
|
||||
return F;
|
||||
|
||||
Broker::send_event(p$acld_config$acld_topic, Broker::event_args(acld_add_rule, p$acld_id, r, ar));
|
||||
Broker::publish(p$acld_config$acld_topic, acld_add_rule, p$acld_id, r, ar);
|
||||
return T;
|
||||
}
|
||||
|
||||
|
@ -266,19 +264,20 @@ function acld_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool
|
|||
ar$comment = reason;
|
||||
}
|
||||
|
||||
Broker::send_event(p$acld_config$acld_topic, Broker::event_args(acld_remove_rule, p$acld_id, r, ar));
|
||||
Broker::publish(p$acld_config$acld_topic, acld_remove_rule, p$acld_id, r, ar);
|
||||
return T;
|
||||
}
|
||||
|
||||
function acld_init(p: PluginState)
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect(cat(p$acld_config$acld_host), p$acld_config$acld_port, 1sec);
|
||||
Broker::subscribe_to_events(p$acld_config$acld_topic);
|
||||
Broker::peer(cat(p$acld_config$acld_host), p$acld_config$acld_port);
|
||||
Broker::subscribe(p$acld_config$acld_topic);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
local peer_address = cat(endpoint$network$address);
|
||||
local peer_port = endpoint$network$bound_port;
|
||||
if ( [peer_port, peer_address] !in netcontrol_acld_peers )
|
||||
# ok, this one was none of ours...
|
||||
return;
|
||||
|
@ -315,5 +314,3 @@ function create_acld(config: AcldConfig) : PluginState
|
|||
|
||||
return p;
|
||||
}
|
||||
|
||||
@endif
|
||||
|
|
|
@ -8,8 +8,6 @@ module NetControl;
|
|||
@load ../plugin
|
||||
@load base/frameworks/broker
|
||||
|
||||
@ifdef ( Broker::__enable )
|
||||
|
||||
export {
|
||||
## This record specifies the configuration that is passed to :bro:see:`NetControl::create_broker`.
|
||||
type BrokerConfig: record {
|
||||
|
@ -151,7 +149,7 @@ function broker_add_rule_fun(p: PluginState, r: Rule) : bool
|
|||
if ( ! broker_check_rule(p, r) )
|
||||
return F;
|
||||
|
||||
Broker::send_event(p$broker_config$topic, Broker::event_args(broker_add_rule, p$broker_id, r));
|
||||
Broker::publish(p$broker_config$topic, Broker::make_event(broker_add_rule, p$broker_id, r));
|
||||
return T;
|
||||
}
|
||||
|
||||
|
@ -160,19 +158,20 @@ function broker_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool
|
|||
if ( ! broker_check_rule(p, r) )
|
||||
return F;
|
||||
|
||||
Broker::send_event(p$broker_config$topic, Broker::event_args(broker_remove_rule, p$broker_id, r, reason));
|
||||
Broker::publish(p$broker_config$topic, Broker::make_event(broker_remove_rule, p$broker_id, r, reason));
|
||||
return T;
|
||||
}
|
||||
|
||||
function broker_init(p: PluginState)
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect(cat(p$broker_config$host), p$broker_config$bport, 1sec);
|
||||
Broker::subscribe_to_events(p$broker_config$topic);
|
||||
Broker::peer(cat(p$broker_config$host), p$broker_config$bport);
|
||||
Broker::subscribe(p$broker_config$topic);
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
local peer_address = cat(endpoint$network$address);
|
||||
local peer_port = endpoint$network$bound_port;
|
||||
if ( [peer_port, peer_address] !in netcontrol_broker_peers )
|
||||
return;
|
||||
|
||||
|
@ -219,5 +218,3 @@ function create_broker(config: BrokerConfig, can_expire: bool) : PluginState
|
|||
|
||||
return p;
|
||||
}
|
||||
|
||||
@endif
|
||||
|
|
|
@ -8,14 +8,5 @@
|
|||
@load ./actions/page
|
||||
@load ./actions/add-geodata
|
||||
|
||||
# The cluster framework must be loaded first.
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@load ./cluster
|
||||
@else
|
||||
@load ./non-cluster
|
||||
@endif
|
||||
|
||||
# Load here so that it can check whether clustering is enabled.
|
||||
@load ./actions/pp-alarms
|
||||
|
|
|
@ -156,8 +156,10 @@ function pretty_print_alarm(out: file, n: Info)
|
|||
@if ( Cluster::is_enabled() )
|
||||
pdescr = "local";
|
||||
|
||||
if ( n?$src_peer )
|
||||
pdescr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host);
|
||||
if ( n?$peer_descr )
|
||||
pdescr = n$peer_descr;
|
||||
else if ( n?$peer_name )
|
||||
pdescr = n$peer_name;
|
||||
|
||||
pdescr = fmt("<%s> ", pdescr);
|
||||
@endif
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
##! Implements notice functionality across clusters. Worker nodes
|
||||
##! will disable notice/alarm logging streams and forward notice
|
||||
##! events to the manager node for logging/processing.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Notice;
|
||||
|
||||
export {
|
||||
## This is the event used to transport notices on the cluster.
|
||||
##
|
||||
## n: The notice information to be sent to the cluster manager for
|
||||
## further processing.
|
||||
global cluster_notice: event(n: Notice::Info);
|
||||
}
|
||||
|
||||
## Manager can communicate notice suppression to workers.
|
||||
redef Cluster::manager2worker_events += /Notice::begin_suppression/;
|
||||
## Workers need ability to forward notices to manager.
|
||||
redef Cluster::worker2manager_events += /Notice::cluster_notice/;
|
||||
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type, identifier: string)
|
||||
{
|
||||
local suppress_until = ts + suppress_for;
|
||||
suppressing[note, identifier] = suppress_until;
|
||||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event Notice::cluster_notice(n: Notice::Info)
|
||||
{
|
||||
# Raise remotely received notices on the manager
|
||||
NOTICE(n);
|
||||
}
|
||||
@endif
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
## This is the entry point in the global namespace for the notice framework.
|
||||
function NOTICE(n: Notice::Info)
|
||||
{
|
||||
# Suppress this notice if necessary.
|
||||
if ( Notice::is_being_suppressed(n) )
|
||||
return;
|
||||
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Notice::internal_NOTICE(n);
|
||||
else
|
||||
# For non-managers, send the notice on to the manager.
|
||||
event Notice::cluster_notice(n);
|
||||
}
|
|
@ -4,6 +4,8 @@
|
|||
##! what is bad activity for sites. More extensive documentation about using
|
||||
##! the notice framework can be found in :doc:`/frameworks/notice`.
|
||||
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Notice;
|
||||
|
||||
export {
|
||||
|
@ -117,9 +119,10 @@ export {
|
|||
## Associated count, or perhaps a status code.
|
||||
n: count &log &optional;
|
||||
|
||||
## Peer that raised this notice.
|
||||
src_peer: event_peer &optional;
|
||||
## Textual description for the peer that raised this notice.
|
||||
## Name of remote peer that raised this notice.
|
||||
peer_name: string &optional;
|
||||
## Textual description for the peer that raised this notice,
|
||||
## including name, host address and port.
|
||||
peer_descr: string &log &optional;
|
||||
|
||||
## The actions which have been applied to this notice.
|
||||
|
@ -316,8 +319,36 @@ export {
|
|||
##
|
||||
## n: The record of notice data.
|
||||
global internal_NOTICE: function(n: Notice::Info);
|
||||
|
||||
## This is the event used to transport notices on the cluster.
|
||||
##
|
||||
## n: The notice information to be sent to the cluster manager for
|
||||
## further processing.
|
||||
global cluster_notice: event(n: Notice::Info);
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
function NOTICE(n: Notice::Info)
|
||||
{
|
||||
if ( Notice::is_being_suppressed(n) )
|
||||
return;
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Notice::internal_NOTICE(n);
|
||||
else
|
||||
{
|
||||
n$peer_name = n$peer_descr = Cluster::node;
|
||||
Broker::publish(Cluster::manager_topic, Notice::cluster_notice, n);
|
||||
}
|
||||
@else
|
||||
Notice::internal_NOTICE(n);
|
||||
@endif
|
||||
}
|
||||
|
||||
module Notice;
|
||||
|
||||
# This is used as a hack to implement per-item expiration intervals.
|
||||
function per_notice_suppression_interval(t: table[Notice::Type, string] of time, idx: any): interval
|
||||
{
|
||||
|
@ -368,24 +399,6 @@ event bro_init() &priority=5
|
|||
$interv=24hrs, $postprocessor=log_mailing_postprocessor]);
|
||||
}
|
||||
|
||||
# TODO: fix this.
|
||||
#function notice_tags(n: Notice::Info) : table[string] of string
|
||||
# {
|
||||
# local tgs: table[string] of string = table();
|
||||
# if ( is_remote_event() )
|
||||
# {
|
||||
# if ( n$src_peer$descr != "" )
|
||||
# tgs["es"] = n$src_peer$descr;
|
||||
# else
|
||||
# tgs["es"] = fmt("%s/%s", n$src_peer$host, n$src_peer$p);
|
||||
# }
|
||||
# else
|
||||
# {
|
||||
# tgs["es"] = peer_description;
|
||||
# }
|
||||
# return tgs;
|
||||
# }
|
||||
|
||||
function email_headers(subject_desc: string, dest: string): string
|
||||
{
|
||||
local header_text = string_cat(
|
||||
|
@ -507,12 +520,26 @@ hook Notice::notice(n: Notice::Info) &priority=-5
|
|||
[n$note, n$identifier] !in suppressing &&
|
||||
n$suppress_for != 0secs )
|
||||
{
|
||||
local suppress_until = n$ts + n$suppress_for;
|
||||
suppressing[n$note, n$identifier] = suppress_until;
|
||||
event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
|
||||
}
|
||||
}
|
||||
|
||||
event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type,
|
||||
identifier: string)
|
||||
{
|
||||
local suppress_until = ts + suppress_for;
|
||||
suppressing[note, identifier] = suppress_until;
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
Broker::auto_publish(Cluster::worker_topic, Notice::begin_suppression);
|
||||
Broker::auto_publish(Cluster::proxy_topic, Notice::begin_suppression);
|
||||
}
|
||||
|
||||
function is_being_suppressed(n: Notice::Info): bool
|
||||
{
|
||||
if ( n?$identifier && [n$note, n$identifier] in suppressing )
|
||||
|
@ -612,12 +639,6 @@ function apply_policy(n: Notice::Info)
|
|||
n$dst = n$iconn$resp_h;
|
||||
}
|
||||
|
||||
if ( ! n?$src_peer )
|
||||
n$src_peer = get_event_peer();
|
||||
if ( ! n?$peer_descr )
|
||||
n$peer_descr = n$src_peer?$descr ?
|
||||
n$src_peer$descr : fmt("%s", n$src_peer$host);
|
||||
|
||||
if ( ! n?$email_body_sections )
|
||||
n$email_body_sections = vector();
|
||||
if ( ! n?$email_delay_tokens )
|
||||
|
@ -652,6 +673,7 @@ function internal_NOTICE(n: Notice::Info)
|
|||
hook Notice::notice(n);
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
global NOTICE: function(n: Notice::Info);
|
||||
event Notice::cluster_notice(n: Notice::Info)
|
||||
{
|
||||
NOTICE(n);
|
||||
}
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
|
||||
@load ./main
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
## This is the entry point in the global namespace for the notice framework.
|
||||
function NOTICE(n: Notice::Info)
|
||||
{
|
||||
# Suppress this notice if necessary.
|
||||
if ( Notice::is_being_suppressed(n) )
|
||||
return;
|
||||
|
||||
Notice::internal_NOTICE(n);
|
||||
}
|
|
@ -13,8 +13,14 @@ export {
|
|||
global cluster_flow_clear: event(name: string);
|
||||
}
|
||||
|
||||
## Workers need ability to forward commands to manager.
|
||||
redef Cluster::worker2manager_events += /OpenFlow::cluster_flow_(mod|clear)/;
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
# Workers need ability to forward commands to manager.
|
||||
event bro_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_mod);
|
||||
Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_clear);
|
||||
}
|
||||
@endif
|
||||
|
||||
# the flow_mod function wrapper
|
||||
function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
|
||||
module OpenFlow;
|
||||
|
||||
@ifdef ( Broker::__enable )
|
||||
|
||||
export {
|
||||
redef enum Plugin += {
|
||||
BROKER,
|
||||
|
@ -49,27 +47,28 @@ function broker_describe(state: ControllerState): string
|
|||
|
||||
function broker_flow_mod_fun(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
|
||||
{
|
||||
Broker::send_event(state$broker_topic, Broker::event_args(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod));
|
||||
Broker::publish(state$broker_topic, Broker::make_event(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod));
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function broker_flow_clear_fun(state: OpenFlow::ControllerState): bool
|
||||
{
|
||||
Broker::send_event(state$broker_topic, Broker::event_args(broker_flow_clear, state$_name, state$broker_dpid));
|
||||
Broker::publish(state$broker_topic, Broker::make_event(broker_flow_clear, state$_name, state$broker_dpid));
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function broker_init(state: OpenFlow::ControllerState)
|
||||
{
|
||||
Broker::enable();
|
||||
Broker::connect(cat(state$broker_host), state$broker_port, 1sec);
|
||||
Broker::subscribe_to_events(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker.
|
||||
Broker::peer(cat(state$broker_host), state$broker_port);
|
||||
Broker::subscribe(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker.
|
||||
}
|
||||
|
||||
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string)
|
||||
{
|
||||
local peer_address = cat(endpoint$network$address);
|
||||
local peer_port = endpoint$network$bound_port;
|
||||
if ( [peer_port, peer_address] !in broker_peers )
|
||||
# ok, this one was none of ours...
|
||||
return;
|
||||
|
@ -94,5 +93,3 @@ function broker_new(name: string, host: addr, host_port: port, topic: string, dp
|
|||
|
||||
return c;
|
||||
}
|
||||
|
||||
@endif
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
@load base/utils/directions-and-hosts
|
||||
@load base/utils/numbers
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Software;
|
||||
|
||||
|
@ -70,6 +71,7 @@ export {
|
|||
const asset_tracking = LOCAL_HOSTS &redef;
|
||||
|
||||
## Other scripts should call this function when they detect software.
|
||||
##
|
||||
## id: The connection id where the software was discovered.
|
||||
##
|
||||
## info: A record representing the software discovered.
|
||||
|
@ -102,15 +104,21 @@ export {
|
|||
|
||||
## The set of software associated with an address. Data expires from
|
||||
## this table after one day by default so that a detected piece of
|
||||
## software will be logged once each day.
|
||||
global tracked: table[addr] of SoftwareSet
|
||||
&create_expire=1day
|
||||
&synchronized
|
||||
&redef;
|
||||
## software will be logged once each day. In a cluster, this table is
|
||||
## uniformly distributed among proxy nodes.
|
||||
global tracked: table[addr] of SoftwareSet &create_expire=1day;
|
||||
|
||||
## This event can be handled to access the :bro:type:`Software::Info`
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_software: event(rec: Info);
|
||||
|
||||
## This event can be handled to access software information whenever it's
|
||||
## version is found to have changed.
|
||||
global version_change: event(old: Info, new: Info);
|
||||
|
||||
## This event is raised when software is about to be registered for
|
||||
## tracking in :bro:see:`Software::tracked`.
|
||||
global register: event(info: Info);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
|
@ -437,40 +445,43 @@ function software_fmt(i: Info): string
|
|||
return fmt("%s %s", i$name, software_fmt_version(i$version));
|
||||
}
|
||||
|
||||
# Insert a mapping into the table
|
||||
# Overides old entries for the same software and generates events if needed.
|
||||
event register(id: conn_id, info: Info)
|
||||
event Software::register(info: Info)
|
||||
{
|
||||
# Host already known?
|
||||
if ( info$host !in tracked )
|
||||
tracked[info$host] = table();
|
||||
local ts: SoftwareSet;
|
||||
|
||||
if ( info$host in tracked )
|
||||
ts = tracked[info$host];
|
||||
else
|
||||
ts = tracked[info$host] = SoftwareSet();
|
||||
|
||||
local ts = tracked[info$host];
|
||||
# Software already registered for this host? We don't want to endlessly
|
||||
# log the same thing.
|
||||
if ( info$name in ts )
|
||||
{
|
||||
local old = ts[info$name];
|
||||
local changed = cmp_versions(old$version, info$version) != 0;
|
||||
|
||||
if ( changed )
|
||||
event Software::version_change(old, info);
|
||||
else if ( ! info$force_log )
|
||||
# If the version hasn't changed, then we're just redetecting the
|
||||
# same thing, then we don't care. This results in no extra logging.
|
||||
# But if the $force_log value is set then we'll continue.
|
||||
if ( ! info$force_log && cmp_versions(old$version, info$version) == 0 )
|
||||
# same thing, then we don't care.
|
||||
return;
|
||||
}
|
||||
ts[info$name] = info;
|
||||
|
||||
ts[info$name] = info;
|
||||
Log::write(Software::LOG, info);
|
||||
}
|
||||
|
||||
function found(id: conn_id, info: Info): bool
|
||||
{
|
||||
if ( info$force_log || addr_matches_host(info$host, asset_tracking) )
|
||||
{
|
||||
if ( ! info$force_log && ! addr_matches_host(info$host, asset_tracking) )
|
||||
return F;
|
||||
|
||||
if ( ! info?$ts )
|
||||
info$ts = network_time();
|
||||
|
||||
if ( info?$version ) # we have a version number and don't have to parse. check if the name is also set...
|
||||
if ( info?$version )
|
||||
{
|
||||
if ( ! info?$name )
|
||||
{
|
||||
|
@ -478,22 +489,26 @@ function found(id: conn_id, info: Info): bool
|
|||
return F;
|
||||
}
|
||||
}
|
||||
else # no version present, we have to parse...
|
||||
{
|
||||
if ( !info?$unparsed_version )
|
||||
else if ( ! info?$unparsed_version )
|
||||
{
|
||||
Reporter::error("No unparsed version string present in Info record with version in Software::found");
|
||||
return F;
|
||||
}
|
||||
|
||||
if ( ! info?$version )
|
||||
{
|
||||
local sw = parse(info$unparsed_version);
|
||||
info$unparsed_version = sw$unparsed_version;
|
||||
info$name = sw$name;
|
||||
info$version = sw$version;
|
||||
}
|
||||
|
||||
event register(id, info);
|
||||
@if ( Cluster::is_enabled() )
|
||||
Cluster::publish_hrw(Cluster::proxy_pool, info$host, Software::register,
|
||||
info);
|
||||
@else
|
||||
event Software::register(info);
|
||||
@endif
|
||||
|
||||
return T;
|
||||
}
|
||||
else
|
||||
return F;
|
||||
}
|
||||
|
|
|
@ -55,18 +55,20 @@ export {
|
|||
global cluster_threshold_crossed: event(ss_name: string, key: SumStats::Key, thold_index: count);
|
||||
}
|
||||
|
||||
# Add events to the cluster framework to make this work.
|
||||
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|get_result|threshold_crossed)/;
|
||||
redef Cluster::manager2worker_events += /SumStats::(get_a_key)/;
|
||||
redef Cluster::worker2manager_events += /SumStats::cluster_(send_result|key_intermediate_response)/;
|
||||
redef Cluster::worker2manager_events += /SumStats::(send_a_key|send_no_key)/;
|
||||
|
||||
# This variable is maintained to know what keys have recently sent or received
|
||||
# intermediate updates so they don't overwhelm the manager.
|
||||
global recent_global_view_keys: set[string, Key] &create_expire=1min;
|
||||
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
|
||||
event bro_init() &priority=100
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_send_result);
|
||||
Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_key_intermediate_response);
|
||||
Broker::auto_publish(Cluster::manager_topic, SumStats::send_a_key);
|
||||
Broker::auto_publish(Cluster::manager_topic, SumStats::send_no_key);
|
||||
}
|
||||
|
||||
# Result tables indexed on a uid that are currently being sent to the
|
||||
# manager.
|
||||
global sending_results: table[string] of ResultTable = table() &read_expire=1min;
|
||||
|
@ -207,6 +209,14 @@ function request_key(ss_name: string, key: Key): Result
|
|||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
|
||||
event bro_init() &priority=100
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_ss_request);
|
||||
Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_get_result);
|
||||
Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_threshold_crossed);
|
||||
Broker::auto_publish(Cluster::worker_topic, SumStats::get_a_key);
|
||||
}
|
||||
|
||||
# This variable is maintained by manager nodes as they collect and aggregate
|
||||
# results.
|
||||
# Index on a uid.
|
||||
|
|
|
@ -603,6 +603,29 @@ type ThreadStats: record {
|
|||
num_threads: count;
|
||||
};
|
||||
|
||||
## Statistics about Broker communication.
|
||||
##
|
||||
## .. bro:see:: get_broker_stats
|
||||
type BrokerStats: record {
|
||||
num_peers: count;
|
||||
## Number of active data stores.
|
||||
num_stores: count;
|
||||
## Number of pending data store queries.
|
||||
num_pending_queries: count;
|
||||
## Number of total log messages received.
|
||||
num_events_incoming: count;
|
||||
## Number of total log messages sent.
|
||||
num_events_outgoing: count;
|
||||
## Number of total log records received.
|
||||
num_logs_incoming: count;
|
||||
## Number of total log records sent.
|
||||
num_logs_outgoing: count;
|
||||
## Number of total identifiers received.
|
||||
num_ids_incoming: count;
|
||||
## Number of total identifiers sent.
|
||||
num_ids_outgoing: count;
|
||||
};
|
||||
|
||||
## Deprecated.
|
||||
##
|
||||
## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere
|
||||
|
@ -737,7 +760,7 @@ type IPAddrAnonymizationClass: enum {
|
|||
## A locally unique ID identifying a communication peer. The ID is returned by
|
||||
## :bro:id:`connect`.
|
||||
##
|
||||
## .. bro:see:: connect Communication
|
||||
## .. bro:see:: connect
|
||||
type peer_id: count;
|
||||
|
||||
## A communication peer.
|
||||
|
@ -760,7 +783,7 @@ type event_peer: record {
|
|||
p: port;
|
||||
is_local: bool; ##< True if this record describes the local process.
|
||||
descr: string; ##< The peer's :bro:see:`peer_description`.
|
||||
class: string &optional; ##< The self-assigned *class* of the peer. See :bro:see:`Communication::Node`.
|
||||
class: string &optional; ##< The self-assigned *class* of the peer.
|
||||
};
|
||||
|
||||
## Deprecated.
|
||||
|
@ -4783,6 +4806,11 @@ export {
|
|||
const max_frag_data = 30000 &redef;
|
||||
}
|
||||
|
||||
module Cluster;
|
||||
export {
|
||||
type Cluster::Pool: record {};
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
## Seed for hashes computed internally for probabilistic data structures. Using
|
||||
|
@ -4797,8 +4825,8 @@ const bits_per_uid: count = 96 &redef;
|
|||
|
||||
# Load these frameworks here because they use fairly deep integration with
|
||||
# BiFs and script-land defined types.
|
||||
@load base/frameworks/broker
|
||||
@load base/frameworks/logging
|
||||
@load base/frameworks/broker
|
||||
@load base/frameworks/input
|
||||
@load base/frameworks/analyzer
|
||||
@load base/frameworks/files
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
@load base/utils/exec
|
||||
@load base/utils/files
|
||||
@load base/utils/geoip-distance
|
||||
@load base/utils/hash_hrw
|
||||
@load base/utils/numbers
|
||||
@load base/utils/paths
|
||||
@load base/utils/patterns
|
||||
|
@ -32,7 +33,6 @@
|
|||
@load base/frameworks/signatures
|
||||
@load base/frameworks/packet-filter
|
||||
@load base/frameworks/software
|
||||
@load base/frameworks/communication
|
||||
@load base/frameworks/control
|
||||
@load base/frameworks/cluster
|
||||
@load base/frameworks/intel
|
||||
|
|
|
@ -116,8 +116,12 @@ event bro_init() &priority=5
|
|||
Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports);
|
||||
}
|
||||
|
||||
# Setup the clusterized config that is needed to tie messages together on a cluster.
|
||||
redef Cluster::worker2manager_events += /DHCP::aggregate_msgs/;
|
||||
@if ( Cluster::is_enabled() )
|
||||
event bro_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, DHCP::aggregate_msgs);
|
||||
}
|
||||
@endif
|
||||
|
||||
function join_data_expiration(t: table[count] of Info, idx: count): interval
|
||||
{
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
@load ./main
|
||||
@load base/utils/files
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module IRC;
|
||||
|
||||
|
@ -23,9 +24,33 @@ export {
|
|||
## Sniffed mime type of the file.
|
||||
dcc_mime_type: string &log &optional;
|
||||
};
|
||||
|
||||
## The broker topic name to which expected DCC transfer updates are
|
||||
## relayed.
|
||||
const dcc_transfer_update_topic = "bro/irc/dcc_transfer_update" &redef;
|
||||
}
|
||||
|
||||
global dcc_expected_transfers: table[addr, port] of Info &synchronized &read_expire=5mins;
|
||||
global dcc_expected_transfers: table[addr, port] of Info &read_expire=5mins;
|
||||
|
||||
event dcc_transfer_add(host: addr, p: port, info: Info)
|
||||
{
|
||||
dcc_expected_transfers[host, p] = info;
|
||||
Analyzer::schedule_analyzer(0.0.0.0, host, p,
|
||||
Analyzer::ANALYZER_IRC_DATA, 5 min);
|
||||
}
|
||||
|
||||
event dcc_transfer_remove(host: addr, p: port)
|
||||
{
|
||||
delete dcc_expected_transfers[host, p];
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
local lnt = Cluster::local_node_type();
|
||||
|
||||
if ( lnt == Cluster::WORKER )
|
||||
Broker::subscribe(dcc_transfer_update_topic);
|
||||
}
|
||||
|
||||
function log_dcc(f: fa_file)
|
||||
{
|
||||
|
@ -51,6 +76,9 @@ function log_dcc(f: fa_file)
|
|||
delete irc$dcc_mime_type;
|
||||
|
||||
delete dcc_expected_transfers[cid$resp_h, cid$resp_p];
|
||||
Cluster::relay_rr(Cluster::proxy_pool, dcc_transfer_update_topic,
|
||||
dcc_transfer_update_topic, dcc_transfer_remove,
|
||||
cid$resp_h, cid$resp_p);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -74,6 +102,9 @@ event irc_dcc_message(c: connection, is_orig: bool,
|
|||
local p = count_to_port(dest_port, tcp);
|
||||
Analyzer::schedule_analyzer(0.0.0.0, address, p, Analyzer::ANALYZER_IRC_DATA, 5 min);
|
||||
dcc_expected_transfers[address, p] = c$irc;
|
||||
Cluster::relay_rr(Cluster::proxy_pool, dcc_transfer_update_topic,
|
||||
dcc_transfer_update_topic, dcc_transfer_add,
|
||||
address, p, c$irc);
|
||||
}
|
||||
|
||||
event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10
|
||||
|
@ -86,5 +117,10 @@ event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10
|
|||
event connection_state_remove(c: connection) &priority=-5
|
||||
{
|
||||
if ( [c$id$resp_h, c$id$resp_p] in dcc_expected_transfers )
|
||||
{
|
||||
delete dcc_expected_transfers[c$id$resp_h, c$id$resp_p];
|
||||
Cluster::relay_rr(Cluster::proxy_pool, dcc_transfer_update_topic,
|
||||
dcc_transfer_update_topic, dcc_transfer_remove,
|
||||
c$id$resp_h, c$id$resp_p);
|
||||
}
|
||||
}
|
||||
|
|
76
scripts/base/utils/hash_hrw.bro
Normal file
76
scripts/base/utils/hash_hrw.bro
Normal file
|
@ -0,0 +1,76 @@
|
|||
##! An implementation of highest random weight (HRW) hashing, also called
|
||||
##! rendezvous hashing. See
|
||||
##! `<https://en.wikipedia.org/wiki/Rendezvous_hashing>`_.
|
||||
|
||||
module HashHRW;
|
||||
|
||||
export {
|
||||
## A site/node is a unique location to which you want a subset of keys
|
||||
## to be distributed.
|
||||
type Site: record {
|
||||
## A unique identifier for the site, should not exceed what
|
||||
## can be contained in a 32-bit integer.
|
||||
id: count;
|
||||
## Other data to associate with the site.
|
||||
user_data: any &optional;
|
||||
};
|
||||
|
||||
## A table of sites, indexed by their id.
|
||||
type SiteTable: table[count] of Site;
|
||||
|
||||
## A collection of sites to distribute keys across.
|
||||
type Pool: record {
|
||||
sites: SiteTable &default=SiteTable();
|
||||
};
|
||||
|
||||
## Add a site to a pool.
|
||||
##
|
||||
## Returns: F is the site is already in the pool, else T.
|
||||
global add_site: function(pool: Pool, site: Site): bool;
|
||||
|
||||
## Remove a site from a pool.
|
||||
##
|
||||
## Returns: F if the site is not in the pool, else T.
|
||||
global rem_site: function(pool: Pool, site: Site): bool;
|
||||
|
||||
## Returns: the site to which the key maps.
|
||||
global get_site: function(pool: Pool, key: any): Site;
|
||||
}
|
||||
|
||||
function add_site(pool: Pool, site: Site): bool
|
||||
{
|
||||
if ( site$id in pool$sites )
|
||||
return F;
|
||||
|
||||
pool$sites[site$id] = site;
|
||||
return T;
|
||||
}
|
||||
|
||||
function rem_site(pool: Pool, site: Site): bool
|
||||
{
|
||||
if ( site$id !in pool$sites )
|
||||
return F;
|
||||
|
||||
delete pool$sites[site$id];
|
||||
return T;
|
||||
}
|
||||
|
||||
function get_site(pool: Pool, key: any): Site
|
||||
{
|
||||
local best_site_id = 0;
|
||||
local best_weight = -1;
|
||||
local d = fnv1a32(key);
|
||||
|
||||
for ( site_id in pool$sites )
|
||||
{
|
||||
local w = hrw_weight(d, site_id);
|
||||
|
||||
if ( w > best_weight || (w == best_weight && site_id > best_site_id) )
|
||||
{
|
||||
best_weight = w;
|
||||
best_site_id = site_id;
|
||||
}
|
||||
}
|
||||
|
||||
return pool$sites[best_site_id];
|
||||
}
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
# Scripts which are commented out in test-all-policy.bro.
|
||||
@load protocols/ssl/notary.bro
|
||||
@load frameworks/communication/listen.bro
|
||||
@load frameworks/control/controllee.bro
|
||||
@load frameworks/control/controller.bro
|
||||
@load frameworks/files/extract-all-files.bro
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
##! Loading this script will make the Bro instance listen for remote
|
||||
##! Bro instances to connect.
|
||||
|
||||
@load base/frameworks/communication
|
||||
|
||||
module Communication;
|
||||
|
||||
event bro_init() &priority=-10
|
||||
{
|
||||
enable_communication();
|
||||
listen(listen_interface, listen_port, listen_ssl, listen_ipv6,
|
||||
listen_ipv6_zone_id, listen_retry);
|
||||
}
|
|
@ -8,12 +8,28 @@
|
|||
##! bro <scripts> frameworks/control/controllee
|
||||
|
||||
@load base/frameworks/control
|
||||
# If an instance is a controllee, it implicitly needs to listen for remote
|
||||
# connections.
|
||||
@load frameworks/communication/listen
|
||||
@load base/frameworks/broker
|
||||
|
||||
module Control;
|
||||
|
||||
event bro_init() &priority=-10
|
||||
{
|
||||
Broker::subscribe(Control::topic_prefix);
|
||||
Broker::auto_publish(Control::topic_prefix + "/id_value_response",
|
||||
Control::id_value_response);
|
||||
Broker::auto_publish(Control::topic_prefix + "/peer_status_response",
|
||||
Control::peer_status_response);
|
||||
Broker::auto_publish(Control::topic_prefix + "/net_stats_response",
|
||||
Control::net_stats_response);
|
||||
Broker::auto_publish(Control::topic_prefix + "/configuration_update_response",
|
||||
Control::configuration_update_response);
|
||||
Broker::auto_publish(Control::topic_prefix + "/shutdown_response",
|
||||
Control::shutdown_response);
|
||||
|
||||
if ( Control::controllee_listen )
|
||||
Broker::listen();
|
||||
}
|
||||
|
||||
event Control::id_value_request(id: string)
|
||||
{
|
||||
local val = lookup_ID(id);
|
||||
|
@ -23,14 +39,18 @@ event Control::id_value_request(id: string)
|
|||
event Control::peer_status_request()
|
||||
{
|
||||
local status = "";
|
||||
for ( p in Communication::nodes )
|
||||
{
|
||||
local peer = Communication::nodes[p];
|
||||
if ( ! peer$connected )
|
||||
next;
|
||||
|
||||
status += fmt("%.6f peer=%s host=%s\n",
|
||||
network_time(), peer$peer$descr, peer$host);
|
||||
# @todo: need to expose broker::endpoint::peers and broker::peer_status
|
||||
local peers = Broker::peers();
|
||||
|
||||
for ( i in peers )
|
||||
{
|
||||
local bpeer = peers[i];
|
||||
status += fmt("%.6f peer=%s host=%s status=%s\n",
|
||||
network_time(),
|
||||
bpeer$peer$id,
|
||||
bpeer$peer$network$address,
|
||||
bpeer$status);
|
||||
}
|
||||
|
||||
event Control::peer_status_response(status);
|
||||
|
@ -61,5 +81,5 @@ event Control::shutdown_request()
|
|||
# Send the acknowledgement event.
|
||||
event Control::shutdown_response();
|
||||
# Schedule the shutdown to let the current event queue flush itself first.
|
||||
event terminate_event();
|
||||
schedule 1sec { terminate_event() };
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::host_port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
|
||||
|
||||
@load base/frameworks/control
|
||||
@load base/frameworks/communication
|
||||
@load base/frameworks/broker
|
||||
|
||||
module Control;
|
||||
|
||||
|
@ -19,19 +19,24 @@ event bro_init() &priority=5
|
|||
# shutdown.
|
||||
if ( cmd !in commands )
|
||||
{
|
||||
# TODO: do an actual error here. Maybe through the reporter events?
|
||||
print fmt("The '%s' control command is unknown.", cmd);
|
||||
Reporter::error(fmt("The '%s' control command is unknown.", cmd));
|
||||
terminate();
|
||||
}
|
||||
|
||||
# Establish the communication configuration and only request response
|
||||
# messages.
|
||||
Communication::nodes["control"] = [$host=host, $zone_id=zone_id,
|
||||
$p=host_port, $sync=F, $connect=T,
|
||||
$class="control", $events=Control::controllee_events];
|
||||
Broker::auto_publish(Control::topic_prefix + "/id_value_request",
|
||||
Control::id_value_request);
|
||||
Broker::auto_publish(Control::topic_prefix + "/peer_status_request",
|
||||
Control::peer_status_request);
|
||||
Broker::auto_publish(Control::topic_prefix + "/net_stats_request",
|
||||
Control::net_stats_request);
|
||||
Broker::auto_publish(Control::topic_prefix + "/configuration_update_request",
|
||||
Control::configuration_update_request);
|
||||
Broker::auto_publish(Control::topic_prefix + "/shutdown_request",
|
||||
Control::shutdown_request);
|
||||
Broker::subscribe(Control::topic_prefix);
|
||||
Broker::peer(cat(host), host_port);
|
||||
}
|
||||
|
||||
|
||||
event Control::id_value_response(id: string, val: string) &priority=-10
|
||||
{
|
||||
event terminate_event();
|
||||
|
@ -57,11 +62,11 @@ event Control::shutdown_response() &priority=-10
|
|||
event terminate_event();
|
||||
}
|
||||
|
||||
function configuration_update_func(p: event_peer)
|
||||
function configurable_ids(): id_table
|
||||
{
|
||||
# Send all &redef'able consts to the peer.
|
||||
local rval: id_table = table();
|
||||
local globals = global_ids();
|
||||
local cnt = 0;
|
||||
|
||||
for ( id in globals )
|
||||
{
|
||||
if ( id in ignore_ids )
|
||||
|
@ -77,39 +82,59 @@ function configuration_update_func(p: event_peer)
|
|||
# NOTE: functions are currently not fully supported for serialization and hence
|
||||
# aren't sent.
|
||||
if ( t$constant && t$redefinable && t$type_name != "func" )
|
||||
{
|
||||
send_id(p, id);
|
||||
++cnt;
|
||||
}
|
||||
rval[id] = t;
|
||||
}
|
||||
|
||||
print fmt("sent %d IDs", cnt);
|
||||
event terminate_event();
|
||||
return rval;
|
||||
}
|
||||
|
||||
event remote_connection_handshake_done(p: event_peer) &priority=-10
|
||||
function send_control_request()
|
||||
{
|
||||
if ( cmd == "id_value" )
|
||||
{
|
||||
if ( arg != "" )
|
||||
switch ( cmd ) {
|
||||
case "id_value":
|
||||
if ( arg == "" )
|
||||
Reporter::fatal("The Control::id_value command requires that Control::arg also has some value.");
|
||||
|
||||
event Control::id_value_request(arg);
|
||||
else
|
||||
{
|
||||
# TODO: do an actual error here. Maybe through the reporter events?
|
||||
print "The id_value command requires that Control::arg have some value.";
|
||||
terminate();
|
||||
}
|
||||
}
|
||||
else if ( cmd == "peer_status" )
|
||||
break;
|
||||
|
||||
case "peer_status":
|
||||
event Control::peer_status_request();
|
||||
else if ( cmd == "net_stats" )
|
||||
break;
|
||||
|
||||
case "net_stats":
|
||||
event Control::net_stats_request();
|
||||
else if ( cmd == "shutdown" )
|
||||
break;
|
||||
|
||||
case "shutdown":
|
||||
event Control::shutdown_request();
|
||||
else if ( cmd == "configuration_update" )
|
||||
{
|
||||
configuration_update_func(p);
|
||||
# Signal configuration update to peer.
|
||||
break;
|
||||
|
||||
case "configuration_update":
|
||||
event Control::configuration_update_request();
|
||||
break;
|
||||
|
||||
default:
|
||||
Reporter::fatal(fmt("unhandled Control::cmd, %s", cmd));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=-10
|
||||
{
|
||||
if ( cmd == "configuration_update" )
|
||||
{
|
||||
# Send all &redef'able consts to the peer.
|
||||
local ids = configurable_ids();
|
||||
|
||||
for ( id in ids )
|
||||
{
|
||||
local topic = fmt("%s/id/%s", Control::topic_prefix, id);
|
||||
Broker::publish_id(topic, id);
|
||||
}
|
||||
|
||||
Reporter::info(fmt("Control framework sent %d IDs", |ids|));
|
||||
}
|
||||
|
||||
send_control_request();
|
||||
}
|
||||
|
|
|
@ -22,23 +22,16 @@ export {
|
|||
const interesting_version_changes: set[string] = { } &redef;
|
||||
}
|
||||
|
||||
event log_software(rec: Info)
|
||||
event Software::version_change(old: Software::Info, new: Software::Info)
|
||||
{
|
||||
local ts = tracked[rec$host];
|
||||
if ( old$name !in interesting_version_changes )
|
||||
return;
|
||||
|
||||
if ( rec$name in ts )
|
||||
{
|
||||
local old = ts[rec$name];
|
||||
|
||||
# Is it a potentially interesting version change?
|
||||
if ( rec$name in interesting_version_changes )
|
||||
{
|
||||
local msg = fmt("%.6f %s switched from %s to %s (%s)",
|
||||
network_time(), rec$software_type,
|
||||
local msg = fmt("%.6f %s '%s' version changed from %s to %s",
|
||||
network_time(), old$software_type, old$name,
|
||||
software_fmt_version(old$version),
|
||||
software_fmt(rec), rec$software_type);
|
||||
NOTICE([$note=Software_Version_Change, $src=rec$host,
|
||||
$msg=msg, $sub=software_fmt(rec)]);
|
||||
}
|
||||
}
|
||||
software_fmt_version(new$version));
|
||||
|
||||
NOTICE([$note=Software_Version_Change, $src=new$host,
|
||||
$msg=msg, $sub=software_fmt(new)]);
|
||||
}
|
||||
|
|
|
@ -12,22 +12,11 @@ export {
|
|||
## Apply BPF filters to each worker in a way that causes them to
|
||||
## automatically flow balance traffic between them.
|
||||
AUTO_BPF,
|
||||
# Load balance traffic across the workers by making each one apply
|
||||
# a restrict filter to only listen to a single MAC address. This
|
||||
# is a somewhat common deployment option for sites doing network
|
||||
# based load balancing with MAC address rewriting and passing the
|
||||
# traffic to a single interface. Multiple MAC addresses will show
|
||||
# up on the same interface and need filtered to a single address.
|
||||
#MAC_ADDR_BPF,
|
||||
};
|
||||
|
||||
## Defines the method of load balancing to use.
|
||||
const method = AUTO_BPF &redef;
|
||||
|
||||
# Configure the cluster framework to enable the load balancing filter configuration.
|
||||
#global send_filter: event(for_node: string, filter: string);
|
||||
#global confirm_filter_installation: event(success: bool);
|
||||
|
||||
redef record Cluster::Node += {
|
||||
## A BPF filter for load balancing traffic sniffed on a single
|
||||
## interface across a number of processes. In normal uses, this
|
||||
|
@ -37,97 +26,86 @@ export {
|
|||
};
|
||||
}
|
||||
|
||||
#redef Cluster::manager2worker_events += /LoadBalancing::send_filter/;
|
||||
#redef Cluster::worker2manager_events += /LoadBalancing::confirm_filter_installation/;
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
if ( method != AUTO_BPF )
|
||||
return;
|
||||
|
||||
local worker_ip_interface: table[addr, string] of count = table();
|
||||
for ( n in Cluster::nodes )
|
||||
{
|
||||
local this_node = Cluster::nodes[n];
|
||||
local sorted_node_names: vector of string = vector();
|
||||
local node: Cluster::Node;
|
||||
local name: string;
|
||||
|
||||
# Only workers!
|
||||
if ( this_node$node_type != Cluster::WORKER ||
|
||||
! this_node?$interface )
|
||||
# Sort nodes list so that every node iterates over it in same order.
|
||||
for ( name in Cluster::nodes )
|
||||
sorted_node_names[|sorted_node_names|] = name;
|
||||
|
||||
sort(sorted_node_names, strcmp);
|
||||
|
||||
for ( idx in sorted_node_names )
|
||||
{
|
||||
name = sorted_node_names[idx];
|
||||
node = Cluster::nodes[name];
|
||||
|
||||
if ( node$node_type != Cluster::WORKER )
|
||||
next;
|
||||
|
||||
if ( [this_node$ip, this_node$interface] !in worker_ip_interface )
|
||||
worker_ip_interface[this_node$ip, this_node$interface] = 0;
|
||||
++worker_ip_interface[this_node$ip, this_node$interface];
|
||||
if ( ! node?$interface )
|
||||
next;
|
||||
|
||||
if ( [node$ip, node$interface] !in worker_ip_interface )
|
||||
worker_ip_interface[node$ip, node$interface] = 0;
|
||||
|
||||
++worker_ip_interface[node$ip, node$interface];
|
||||
}
|
||||
|
||||
# Now that we've counted up how many processes are running on an interface
|
||||
# let's create the filters for each worker.
|
||||
# Now that we've counted up how many processes are running per
|
||||
# interface, let's create the filters for each worker.
|
||||
local lb_proc_track: table[addr, string] of count = table();
|
||||
for ( no in Cluster::nodes )
|
||||
{
|
||||
local that_node = Cluster::nodes[no];
|
||||
if ( that_node$node_type == Cluster::WORKER &&
|
||||
that_node?$interface && [that_node$ip, that_node$interface] in worker_ip_interface )
|
||||
{
|
||||
if ( [that_node$ip, that_node$interface] !in lb_proc_track )
|
||||
lb_proc_track[that_node$ip, that_node$interface] = 0;
|
||||
|
||||
local this_lb_proc = lb_proc_track[that_node$ip, that_node$interface];
|
||||
local total_lb_procs = worker_ip_interface[that_node$ip, that_node$interface];
|
||||
for ( idx in sorted_node_names )
|
||||
{
|
||||
name = sorted_node_names[idx];
|
||||
node = Cluster::nodes[name];
|
||||
|
||||
if ( node$node_type != Cluster::WORKER )
|
||||
next;
|
||||
|
||||
if ( ! node?$interface )
|
||||
next;
|
||||
|
||||
if ( [node$ip, node$interface] !in worker_ip_interface )
|
||||
next;
|
||||
|
||||
if ( [node$ip, node$interface] !in lb_proc_track )
|
||||
lb_proc_track[node$ip, node$interface] = 0;
|
||||
|
||||
local this_lb_proc = lb_proc_track[node$ip, node$interface];
|
||||
local total_lb_procs = worker_ip_interface[node$ip, node$interface];
|
||||
++lb_proc_track[node$ip, node$interface];
|
||||
|
||||
++lb_proc_track[that_node$ip, that_node$interface];
|
||||
if ( total_lb_procs > 1 )
|
||||
node$lb_filter = PacketFilter::sampling_filter(total_lb_procs,
|
||||
this_lb_proc);
|
||||
}
|
||||
|
||||
# Finally, install filter for the current node if it needs one.
|
||||
for ( idx in sorted_node_names )
|
||||
{
|
||||
that_node$lb_filter = PacketFilter::sampling_filter(total_lb_procs, this_lb_proc);
|
||||
Communication::nodes[no]$capture_filter = that_node$lb_filter;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
name = sorted_node_names[idx];
|
||||
node = Cluster::nodes[name];
|
||||
|
||||
#event remote_connection_established(p: event_peer) &priority=-5
|
||||
# {
|
||||
# if ( is_remote_event() )
|
||||
# return;
|
||||
#
|
||||
# local for_node = p$descr;
|
||||
# # Send the filter to the peer.
|
||||
# if ( for_node in Cluster::nodes &&
|
||||
# Cluster::nodes[for_node]?$lb_filter )
|
||||
# {
|
||||
# local filter = Cluster::nodes[for_node]$lb_filter;
|
||||
# event LoadBalancing::send_filter(for_node, filter);
|
||||
# }
|
||||
# }
|
||||
if ( name != Cluster::node )
|
||||
next;
|
||||
|
||||
#event LoadBalancing::confirm_filter_installation(success: bool)
|
||||
# {
|
||||
# # This doesn't really matter yet since we aren't getting back a meaningful success response.
|
||||
# }
|
||||
if ( ! node?$lb_filter )
|
||||
next;
|
||||
|
||||
@endif
|
||||
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::WORKER )
|
||||
|
||||
#event LoadBalancing::send_filter(for_node: string, filter: string)
|
||||
event remote_capture_filter(p: event_peer, filter: string)
|
||||
{
|
||||
#if ( for_node !in Cluster::nodes )
|
||||
# return;
|
||||
#
|
||||
#if ( Cluster::node == for_node )
|
||||
# {
|
||||
restrict_filters["lb_filter"] = filter;
|
||||
restrict_filters["lb_filter"] = node$lb_filter;
|
||||
PacketFilter::install();
|
||||
#event LoadBalancing::confirm_filter_installation(T);
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
@endif
|
||||
|
||||
@endif
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
##! use on a network per day.
|
||||
|
||||
@load base/utils/directions-and-hosts
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Known;
|
||||
|
||||
|
@ -20,22 +21,127 @@ export {
|
|||
host: addr &log;
|
||||
};
|
||||
|
||||
## Toggles between different implementations of this script.
|
||||
## When true, use a Broker data store, else use a regular Bro set
|
||||
## with keys uniformly distributed over proxy nodes in cluster
|
||||
## operation.
|
||||
const use_host_store = T &redef;
|
||||
|
||||
## The hosts whose existence should be logged and tracked.
|
||||
## See :bro:type:`Host` for possible choices.
|
||||
const host_tracking = LOCAL_HOSTS &redef;
|
||||
|
||||
## Holds the set of all known hosts. Keys in the store are addresses
|
||||
## and their associated value will always be the "true" boolean.
|
||||
global host_store: Cluster::StoreInfo;
|
||||
|
||||
## The Broker topic name to use for :bro:see:`Known::host_store`.
|
||||
const host_store_name = "bro/known/hosts" &redef;
|
||||
|
||||
## The expiry interval of new entries in :bro:see:`Known::host_store`.
|
||||
## This also changes the interval at which hosts get logged.
|
||||
const host_store_expiry = 1day &redef;
|
||||
|
||||
## The timeout interval to use for operations against
|
||||
## :bro:see:`Known::host_store`.
|
||||
const host_store_timeout = 15sec &redef;
|
||||
|
||||
## The set of all known addresses to store for preventing duplicate
|
||||
## logging of addresses. It can also be used from other scripts to
|
||||
## inspect if an address has been seen in use.
|
||||
## Maintain the list of known hosts for 24 hours so that the existence
|
||||
## of each individual address is logged each day.
|
||||
global known_hosts: set[addr] &create_expire=1day &synchronized &redef;
|
||||
##
|
||||
## In cluster operation, this set is distributed uniformly across
|
||||
## proxy nodes.
|
||||
global hosts: set[addr] &create_expire=1day &redef;
|
||||
|
||||
## An event that can be handled to access the :bro:type:`Known::HostsInfo`
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_known_hosts: event(rec: HostsInfo);
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
if ( ! Known::use_host_store )
|
||||
return;
|
||||
|
||||
Known::host_store = Cluster::create_store(Known::host_store_name);
|
||||
}
|
||||
|
||||
event Known::host_found(info: HostsInfo)
|
||||
{
|
||||
if ( ! Known::use_host_store )
|
||||
return;
|
||||
|
||||
when ( local r = Broker::put_unique(Known::host_store$store, info$host,
|
||||
T, Known::host_store_expiry) )
|
||||
{
|
||||
if ( r$status == Broker::SUCCESS )
|
||||
{
|
||||
if ( r$result as bool )
|
||||
Log::write(Known::HOSTS_LOG, info);
|
||||
}
|
||||
else
|
||||
Reporter::error(fmt("%s: data store put_unique failure",
|
||||
Known::host_store_name));
|
||||
}
|
||||
timeout Known::host_store_timeout
|
||||
{
|
||||
# Can't really tell if master store ended up inserting a key.
|
||||
Log::write(Known::HOSTS_LOG, info);
|
||||
}
|
||||
}
|
||||
|
||||
event known_host_add(info: HostsInfo)
|
||||
{
|
||||
if ( use_host_store )
|
||||
return;
|
||||
|
||||
if ( info$host in Known::hosts )
|
||||
return;
|
||||
|
||||
add Known::hosts[info$host];
|
||||
|
||||
@if ( ! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY )
|
||||
Log::write(Known::HOSTS_LOG, info);
|
||||
@endif
|
||||
}
|
||||
|
||||
event Cluster::node_up(name: string, id: string)
|
||||
{
|
||||
if ( use_host_store )
|
||||
return;
|
||||
|
||||
if ( Cluster::local_node_type() != Cluster::WORKER )
|
||||
return;
|
||||
|
||||
# Drop local suppression cache on workers to force HRW key repartitioning.
|
||||
Known::hosts = set();
|
||||
}
|
||||
|
||||
event Cluster::node_down(name: string, id: string)
|
||||
{
|
||||
if ( use_host_store )
|
||||
return;
|
||||
|
||||
if ( Cluster::local_node_type() != Cluster::WORKER )
|
||||
return;
|
||||
|
||||
# Drop local suppression cache on workers to force HRW key repartitioning.
|
||||
Known::hosts = set();
|
||||
}
|
||||
|
||||
event Known::host_found(info: HostsInfo)
|
||||
{
|
||||
if ( use_host_store )
|
||||
return;
|
||||
|
||||
Cluster::publish_hrw(Cluster::proxy_pool, info$host, known_host_add, info);
|
||||
event known_host_add(info);
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Log::create_stream(Known::HOSTS_LOG, [$columns=HostsInfo, $ev=log_known_hosts, $path="known_hosts"]);
|
||||
|
@ -43,17 +149,15 @@ event bro_init()
|
|||
|
||||
event connection_established(c: connection) &priority=5
|
||||
{
|
||||
if ( c$orig$state != TCP_ESTABLISHED )
|
||||
return;
|
||||
|
||||
if ( c$resp$state != TCP_ESTABLISHED )
|
||||
return;
|
||||
|
||||
local id = c$id;
|
||||
|
||||
for ( host in set(id$orig_h, id$resp_h) )
|
||||
{
|
||||
if ( host !in known_hosts &&
|
||||
c$orig$state == TCP_ESTABLISHED &&
|
||||
c$resp$state == TCP_ESTABLISHED &&
|
||||
addr_matches_host(host, host_tracking) )
|
||||
{
|
||||
add known_hosts[host];
|
||||
Log::write(Known::HOSTS_LOG, [$ts=network_time(), $host=host]);
|
||||
}
|
||||
}
|
||||
if ( addr_matches_host(host, host_tracking) )
|
||||
event Known::host_found([$ts = network_time(), $host = host]);
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
##! during the session, the protocol will also be logged.
|
||||
|
||||
@load base/utils/directions-and-hosts
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Known;
|
||||
|
||||
|
@ -26,14 +27,46 @@ export {
|
|||
service: set[string] &log;
|
||||
};
|
||||
|
||||
## Toggles between different implementations of this script.
|
||||
## When true, use a Broker data store, else use a regular Bro set
|
||||
## with keys uniformly distributed over proxy nodes in cluster
|
||||
## operation.
|
||||
const use_service_store = T &redef;
|
||||
|
||||
## The hosts whose services should be tracked and logged.
|
||||
## See :bro:type:`Host` for possible choices.
|
||||
const service_tracking = LOCAL_HOSTS &redef;
|
||||
|
||||
type AddrPortPair: record {
|
||||
host: addr;
|
||||
p: port;
|
||||
};
|
||||
|
||||
## Holds the set of all known services. Keys in the store are
|
||||
## :bro:type:`Known::AddrPortPair` and their associated value is
|
||||
## always the boolean value of "true".
|
||||
global service_store: Cluster::StoreInfo;
|
||||
|
||||
## The Broker topic name to use for :bro:see:`Known::service_store`.
|
||||
const service_store_name = "bro/known/services" &redef;
|
||||
|
||||
## The expiry interval of new entries in :bro:see:`Known::service_store`.
|
||||
## This also changes the interval at which services get logged.
|
||||
const service_store_expiry = 1day &redef;
|
||||
|
||||
## The timeout interval to use for operations against
|
||||
## :bro:see:`Known::service_store`.
|
||||
const service_store_timeout = 15sec &redef;
|
||||
|
||||
## Tracks the set of daily-detected services for preventing the logging
|
||||
## of duplicates, but can also be inspected by other scripts for
|
||||
## different purposes.
|
||||
global known_services: set[addr, port] &create_expire=1day &synchronized;
|
||||
##
|
||||
## In cluster operation, this set is uniformly distributed across
|
||||
## proxy nodes.
|
||||
##
|
||||
## This set is automatically populated and shouldn't be directly modified.
|
||||
global services: set[addr, port] &create_expire=1day;
|
||||
|
||||
## Event that can be handled to access the :bro:type:`Known::ServicesInfo`
|
||||
## record as it is sent on to the logging framework.
|
||||
|
@ -46,27 +79,89 @@ redef record connection += {
|
|||
known_services_done: bool &default=F;
|
||||
};
|
||||
|
||||
event bro_init() &priority=5
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Log::create_stream(Known::SERVICES_LOG, [$columns=ServicesInfo,
|
||||
$ev=log_known_services,
|
||||
$path="known_services"]);
|
||||
if ( ! Known::use_service_store )
|
||||
return;
|
||||
|
||||
Known::service_store = Cluster::create_store(Known::service_store_name);
|
||||
}
|
||||
|
||||
event log_it(ts: time, a: addr, p: port, services: set[string])
|
||||
{
|
||||
if ( [a, p] !in known_services )
|
||||
{
|
||||
add known_services[a, p];
|
||||
event service_info_commit(info: ServicesInfo)
|
||||
|
||||
local i: ServicesInfo;
|
||||
i$ts=ts;
|
||||
i$host=a;
|
||||
i$port_num=p;
|
||||
i$port_proto=get_port_transport_proto(p);
|
||||
i$service=services;
|
||||
Log::write(Known::SERVICES_LOG, i);
|
||||
{
|
||||
if ( ! Known::use_service_store )
|
||||
return;
|
||||
|
||||
local key = AddrPortPair($host = info$host, $p = info$port_num);
|
||||
|
||||
when ( local r = Broker::put_unique(Known::service_store$store, key,
|
||||
T, Known::service_store_expiry) )
|
||||
{
|
||||
if ( r$status == Broker::SUCCESS )
|
||||
{
|
||||
if ( r$result as bool )
|
||||
Log::write(Known::SERVICES_LOG, info);
|
||||
}
|
||||
else
|
||||
Reporter::error(fmt("%s: data store put_unique failure",
|
||||
Known::service_store_name));
|
||||
}
|
||||
timeout Known::service_store_timeout
|
||||
{
|
||||
Log::write(Known::SERVICES_LOG, info);
|
||||
}
|
||||
}
|
||||
|
||||
event known_service_add(info: ServicesInfo)
|
||||
{
|
||||
if ( Known::use_service_store )
|
||||
return;
|
||||
|
||||
if ( [info$host, info$port_num] in Known::services )
|
||||
return;
|
||||
|
||||
add Known::services[info$host, info$port_num];
|
||||
|
||||
@if ( ! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY )
|
||||
Log::write(Known::SERVICES_LOG, info);
|
||||
@endif
|
||||
}
|
||||
|
||||
event Cluster::node_up(name: string, id: string)
|
||||
{
|
||||
if ( Known::use_service_store )
|
||||
return;
|
||||
|
||||
if ( Cluster::local_node_type() != Cluster::WORKER )
|
||||
return;
|
||||
|
||||
# Drop local suppression cache on workers to force HRW key repartitioning.
|
||||
Known::services = set();
|
||||
}
|
||||
|
||||
event Cluster::node_down(name: string, id: string)
|
||||
{
|
||||
if ( Known::use_service_store )
|
||||
return;
|
||||
|
||||
if ( Cluster::local_node_type() != Cluster::WORKER )
|
||||
return;
|
||||
|
||||
# Drop local suppression cache on workers to force HRW key repartitioning.
|
||||
Known::services = set();
|
||||
}
|
||||
|
||||
event service_info_commit(info: ServicesInfo)
|
||||
{
|
||||
if ( Known::use_service_store )
|
||||
return;
|
||||
|
||||
local key = cat(info$host, info$port_num);
|
||||
Cluster::publish_hrw(Cluster::proxy_pool, key, known_service_add, info);
|
||||
event known_service_add(info);
|
||||
}
|
||||
|
||||
function known_services_done(c: connection)
|
||||
|
@ -74,18 +169,31 @@ function known_services_done(c: connection)
|
|||
local id = c$id;
|
||||
c$known_services_done = T;
|
||||
|
||||
if ( ! addr_matches_host(id$resp_h, service_tracking) ||
|
||||
"ftp-data" in c$service || # don't include ftp data sessions
|
||||
("DNS" in c$service && c$resp$size == 0) ) # for dns, require that the server talks.
|
||||
if ( ! addr_matches_host(id$resp_h, service_tracking) )
|
||||
return;
|
||||
|
||||
# If no protocol was detected, wait a short
|
||||
# time before attempting to log in case a protocol is detected
|
||||
# on another connection.
|
||||
if ( |c$service| == 1 )
|
||||
{
|
||||
if ( "ftp-data" in c$service )
|
||||
# Don't include ftp data sessions.
|
||||
return;
|
||||
|
||||
if ( "DNS" in c$service && c$resp$size == 0 )
|
||||
# For dns, require that the server talks.
|
||||
return;
|
||||
}
|
||||
|
||||
local info = ServicesInfo($ts = network_time(), $host = id$resp_h,
|
||||
$port_num = id$resp_p,
|
||||
$port_proto = get_port_transport_proto(id$resp_p),
|
||||
$service = c$service);
|
||||
|
||||
# If no protocol was detected, wait a short time before attempting to log
|
||||
# in case a protocol is detected on another connection.
|
||||
if ( |c$service| == 0 )
|
||||
schedule 5min { log_it(network_time(), id$resp_h, id$resp_p, c$service) };
|
||||
schedule 5min { service_info_commit(info) };
|
||||
else
|
||||
event log_it(network_time(), id$resp_h, id$resp_p, c$service);
|
||||
event service_info_commit(info);
|
||||
}
|
||||
|
||||
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=-5
|
||||
|
@ -96,6 +204,19 @@ event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &pr
|
|||
# Handle the connection ending in case no protocol was ever detected.
|
||||
event connection_state_remove(c: connection) &priority=-5
|
||||
{
|
||||
if ( ! c$known_services_done && c$resp$state == TCP_ESTABLISHED )
|
||||
if ( c$known_services_done )
|
||||
return;
|
||||
|
||||
if ( c$resp$state != TCP_ESTABLISHED )
|
||||
return;
|
||||
|
||||
known_services_done(c);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Known::SERVICES_LOG, [$columns=ServicesInfo,
|
||||
$ev=log_known_services,
|
||||
$path="known_services"]);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,25 +31,40 @@ event signature_match(state: signature_state, msg: string, data: string) &priori
|
|||
local si: Software::Info;
|
||||
si = [$name=msg, $unparsed_version=msg, $host=c$id$resp_h, $host_p=c$id$resp_p, $software_type=WEB_APPLICATION];
|
||||
si$url = build_url_http(c$http);
|
||||
if ( c$id$resp_h in Software::tracked &&
|
||||
si$name in Software::tracked[c$id$resp_h] )
|
||||
Software::found(c$id, si);
|
||||
}
|
||||
|
||||
event Software::register(info: Software::Info) &priority=5
|
||||
{
|
||||
if ( info$host !in Software::tracked )
|
||||
return;
|
||||
|
||||
local ss = Software::tracked[info$host];
|
||||
|
||||
if ( info$name !in ss )
|
||||
return;
|
||||
|
||||
local old_info = ss[info$name];
|
||||
|
||||
if ( ! old_info?$url )
|
||||
return;
|
||||
|
||||
if ( ! info?$url )
|
||||
return;
|
||||
|
||||
# If the new url is a substring of an existing, known url then let's
|
||||
# use that as the new url for the software.
|
||||
# PROBLEM: different version of the same software on the same server with a shared root path
|
||||
local is_substring = 0;
|
||||
if ( Software::tracked[c$id$resp_h][si$name]?$url &&
|
||||
|si$url| <= |Software::tracked[c$id$resp_h][si$name]$url| )
|
||||
is_substring = strstr(Software::tracked[c$id$resp_h][si$name]$url, si$url);
|
||||
|
||||
if ( is_substring == 1 )
|
||||
{
|
||||
Software::tracked[c$id$resp_h][si$name]$url = si$url;
|
||||
if ( |info$url| <= |old_info$url| )
|
||||
is_substring = strstr(old_info$url, info$url);
|
||||
|
||||
if ( is_substring != 1 )
|
||||
return;
|
||||
|
||||
old_info$url = info$url;
|
||||
# Force the software to be logged because it indicates a URL
|
||||
# closer to the root of the site.
|
||||
si$force_log = T;
|
||||
}
|
||||
}
|
||||
|
||||
Software::found(c$id, si);
|
||||
info$force_log = T;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
@load base/utils/directions-and-hosts
|
||||
@load base/protocols/ssl
|
||||
@load base/files/x509
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module Known;
|
||||
|
||||
|
@ -30,26 +31,138 @@ export {
|
|||
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS.
|
||||
const cert_tracking = LOCAL_HOSTS &redef;
|
||||
|
||||
## Toggles between different implementations of this script.
|
||||
## When true, use a Broker data store, else use a regular Bro set
|
||||
## with keys uniformly distributed over proxy nodes in cluster
|
||||
## operation.
|
||||
const use_cert_store = T &redef;
|
||||
|
||||
type AddrCertHashPair: record {
|
||||
host: addr;
|
||||
hash: string;
|
||||
};
|
||||
|
||||
## Holds the set of all known certificates. Keys in the store are of
|
||||
## type :bro:type:`Known::AddrCertHashPair` and their associated value is
|
||||
## always the boolean value of "true".
|
||||
global cert_store: Cluster::StoreInfo;
|
||||
|
||||
## The Broker topic name to use for :bro:see:`Known::cert_store`.
|
||||
const cert_store_name = "bro/known/certs" &redef;
|
||||
|
||||
## The expiry interval of new entries in :bro:see:`Known::cert_store`.
|
||||
## This also changes the interval at which certs get logged.
|
||||
const cert_store_expiry = 1day &redef;
|
||||
|
||||
## The timeout interval to use for operations against
|
||||
## :bro:see:`Known::cert_store`.
|
||||
const cert_store_timeout = 15sec &redef;
|
||||
|
||||
## The set of all known certificates to store for preventing duplicate
|
||||
## logging. It can also be used from other scripts to
|
||||
## inspect if a certificate has been seen in use. The string value
|
||||
## in the set is for storing the DER formatted certificate' SHA1 hash.
|
||||
global certs: set[addr, string] &create_expire=1day &synchronized &redef;
|
||||
##
|
||||
## In cluster operation, this set is uniformly distributed across
|
||||
## proxy nodes.
|
||||
global certs: set[addr, string] &create_expire=1day &redef;
|
||||
|
||||
## Event that can be handled to access the loggable record as it is sent
|
||||
## on to the logging framework.
|
||||
global log_known_certs: event(rec: CertsInfo);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
event bro_init()
|
||||
{
|
||||
Log::create_stream(Known::CERTS_LOG, [$columns=CertsInfo, $ev=log_known_certs, $path="known_certs"]);
|
||||
if ( ! Known::use_cert_store )
|
||||
return;
|
||||
|
||||
Known::cert_store = Cluster::create_store(Known::cert_store_name);
|
||||
}
|
||||
|
||||
event Known::cert_found(info: CertsInfo, hash: string)
|
||||
{
|
||||
if ( ! Known::use_cert_store )
|
||||
return;
|
||||
|
||||
local key = AddrCertHashPair($host = info$host, $hash = hash);
|
||||
|
||||
when ( local r = Broker::put_unique(Known::cert_store$store, key,
|
||||
T, Known::cert_store_expiry) )
|
||||
{
|
||||
if ( r$status == Broker::SUCCESS )
|
||||
{
|
||||
if ( r$result as bool )
|
||||
Log::write(Known::CERTS_LOG, info);
|
||||
}
|
||||
else
|
||||
Reporter::error(fmt("%s: data store put_unique failure",
|
||||
Known::cert_store_name));
|
||||
}
|
||||
timeout Known::cert_store_timeout
|
||||
{
|
||||
# Can't really tell if master store ended up inserting a key.
|
||||
Log::write(Known::CERTS_LOG, info);
|
||||
}
|
||||
}
|
||||
|
||||
event known_cert_add(info: CertsInfo, hash: string)
|
||||
{
|
||||
if ( Known::use_cert_store )
|
||||
return;
|
||||
|
||||
if ( [info$host, hash] in Known::certs )
|
||||
return;
|
||||
|
||||
add Known::certs[info$host, hash];
|
||||
|
||||
@if ( ! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY )
|
||||
Log::write(Known::CERTS_LOG, info);
|
||||
@endif
|
||||
}
|
||||
|
||||
event Known::cert_found(info: CertsInfo, hash: string)
|
||||
{
|
||||
if ( Known::use_cert_store )
|
||||
return;
|
||||
|
||||
local key = cat(info$host, hash);
|
||||
Cluster::publish_hrw(Cluster::proxy_pool, key, known_cert_add, info, hash);
|
||||
event known_cert_add(info, hash);
|
||||
}
|
||||
|
||||
event Cluster::node_up(name: string, id: string)
|
||||
{
|
||||
if ( Known::use_cert_store )
|
||||
return;
|
||||
|
||||
if ( Cluster::local_node_type() != Cluster::WORKER )
|
||||
return;
|
||||
|
||||
Known::certs = table();
|
||||
}
|
||||
|
||||
event Cluster::node_down(name: string, id: string)
|
||||
{
|
||||
if ( Known::use_cert_store )
|
||||
return;
|
||||
|
||||
if ( Cluster::local_node_type() != Cluster::WORKER )
|
||||
return;
|
||||
|
||||
Known::certs = table();
|
||||
}
|
||||
|
||||
event ssl_established(c: connection) &priority=3
|
||||
{
|
||||
if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| < 1 ||
|
||||
! c$ssl$cert_chain[0]?$x509 )
|
||||
if ( ! c$ssl?$cert_chain )
|
||||
return;
|
||||
|
||||
if ( |c$ssl$cert_chain| < 1 )
|
||||
return;
|
||||
|
||||
if ( ! c$ssl$cert_chain[0]?$x509 )
|
||||
return;
|
||||
|
||||
local fuid = c$ssl$cert_chain_fuids[0];
|
||||
|
@ -61,16 +174,21 @@ event ssl_established(c: connection) &priority=3
|
|||
return;
|
||||
}
|
||||
|
||||
local host = c$id$resp_h;
|
||||
|
||||
if ( ! addr_matches_host(host, cert_tracking) )
|
||||
return;
|
||||
|
||||
local hash = c$ssl$cert_chain[0]$sha1;
|
||||
local cert = c$ssl$cert_chain[0]$x509$certificate;
|
||||
|
||||
local host = c$id$resp_h;
|
||||
if ( [host, hash] !in certs && addr_matches_host(host, cert_tracking) )
|
||||
{
|
||||
add certs[host, hash];
|
||||
Log::write(Known::CERTS_LOG, [$ts=network_time(), $host=host,
|
||||
local info = CertsInfo($ts = network_time(), $host = host,
|
||||
$port_num = c$id$resp_p, $subject = cert$subject,
|
||||
$issuer_subject = cert$issuer,
|
||||
$serial=cert$serial]);
|
||||
$serial = cert$serial);
|
||||
event Known::cert_found(info, hash);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Known::CERTS_LOG, [$columns=CertsInfo, $ev=log_known_certs, $path="known_certs"]);
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
# Also caches all intermediate certificates encountered so far and use them
|
||||
# for future validations.
|
||||
|
||||
@load base/frameworks/cluster
|
||||
@load base/frameworks/notice
|
||||
@load base/protocols/ssl
|
||||
|
||||
|
@ -61,12 +62,13 @@ export {
|
|||
global intermediate_cache: table[string] of vector of opaque of x509;
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@load base/frameworks/cluster
|
||||
redef Cluster::manager2worker_events += /SSL::intermediate_add/;
|
||||
redef Cluster::worker2manager_events += /SSL::new_intermediate/;
|
||||
event bro_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, SSL::intermediate_add);
|
||||
Broker::auto_publish(Cluster::manager_topic, SSL::new_intermediate);
|
||||
}
|
||||
@endif
|
||||
|
||||
|
||||
function add_to_cache(key: string, value: vector of opaque of x509)
|
||||
{
|
||||
intermediate_cache[key] = value;
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
# The base/ scripts are all loaded by default and not included here.
|
||||
|
||||
# @load frameworks/communication/listen.bro
|
||||
# @load frameworks/control/controllee.bro
|
||||
# @load frameworks/control/controller.bro
|
||||
@load frameworks/dpd/detect-protocols.bro
|
||||
|
|
|
@ -156,6 +156,7 @@ set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE)
|
|||
set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE)
|
||||
|
||||
add_subdirectory(analyzer)
|
||||
add_subdirectory(broker)
|
||||
add_subdirectory(broxygen)
|
||||
add_subdirectory(file_analysis)
|
||||
add_subdirectory(input)
|
||||
|
@ -163,14 +164,6 @@ add_subdirectory(iosource)
|
|||
add_subdirectory(logging)
|
||||
add_subdirectory(probabilistic)
|
||||
|
||||
if ( ENABLE_BROKER )
|
||||
add_subdirectory(broker)
|
||||
else ()
|
||||
# Just to satisfy coverage unit tests until new Broker-based
|
||||
# communication is enabled by default.
|
||||
add_subdirectory(broker-dummy)
|
||||
endif ()
|
||||
|
||||
set(bro_SUBDIRS
|
||||
# Order is important here.
|
||||
${bro_PLUGIN_LIBS}
|
||||
|
|
32
src/Event.cc
32
src/Event.cc
|
@ -54,6 +54,38 @@ void Event::Describe(ODesc* d) const
|
|||
d->Add("(");
|
||||
}
|
||||
|
||||
void Event::Dispatch(bool no_remote)
|
||||
{
|
||||
if ( src == SOURCE_BROKER )
|
||||
no_remote = true;
|
||||
|
||||
if ( event_serializer )
|
||||
{
|
||||
SerialInfo info(event_serializer);
|
||||
event_serializer->Serialize(&info, handler->Name(), args);
|
||||
}
|
||||
|
||||
if ( handler->ErrorHandler() )
|
||||
reporter->BeginErrorHandler();
|
||||
|
||||
try
|
||||
{
|
||||
handler->Call(args, no_remote);
|
||||
}
|
||||
|
||||
catch ( InterpreterException& e )
|
||||
{
|
||||
// Already reported.
|
||||
}
|
||||
|
||||
if ( obj )
|
||||
// obj->EventDone();
|
||||
Unref(obj);
|
||||
|
||||
if ( handler->ErrorHandler() )
|
||||
reporter->EndErrorHandler();
|
||||
}
|
||||
|
||||
EventMgr::EventMgr()
|
||||
{
|
||||
head = tail = 0;
|
||||
|
|
29
src/Event.h
29
src/Event.h
|
@ -34,34 +34,7 @@ protected:
|
|||
|
||||
// This method is protected to make sure that everybody goes through
|
||||
// EventMgr::Dispatch().
|
||||
void Dispatch(bool no_remote = false)
|
||||
{
|
||||
if ( event_serializer )
|
||||
{
|
||||
SerialInfo info(event_serializer);
|
||||
event_serializer->Serialize(&info, handler->Name(), args);
|
||||
}
|
||||
|
||||
if ( handler->ErrorHandler() )
|
||||
reporter->BeginErrorHandler();
|
||||
|
||||
try
|
||||
{
|
||||
handler->Call(args, no_remote);
|
||||
}
|
||||
|
||||
catch ( InterpreterException& e )
|
||||
{
|
||||
// Already reported.
|
||||
}
|
||||
|
||||
if ( obj )
|
||||
// obj->EventDone();
|
||||
Unref(obj);
|
||||
|
||||
if ( handler->ErrorHandler() )
|
||||
reporter->EndErrorHandler();
|
||||
}
|
||||
void Dispatch(bool no_remote = false);
|
||||
|
||||
EventHandlerPtr handler;
|
||||
val_list* args;
|
||||
|
|
|
@ -5,10 +5,8 @@
|
|||
#include "RemoteSerializer.h"
|
||||
#include "NetVar.h"
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
#include "broker/Manager.h"
|
||||
#include "broker/Data.h"
|
||||
#endif
|
||||
|
||||
EventHandler::EventHandler(const char* arg_name)
|
||||
{
|
||||
|
@ -32,19 +30,16 @@ EventHandler::operator bool() const
|
|||
return enabled && ((local && local->HasBodies())
|
||||
|| receivers.length()
|
||||
|| generate_always
|
||||
#ifdef ENABLE_BROKER
|
||||
|| ! auto_remote_send.empty()
|
||||
// TODO: and require a subscriber interested in a topic or unsolicited flags?
|
||||
#endif
|
||||
);
|
||||
|| ! auto_publish.empty());
|
||||
}
|
||||
|
||||
FuncType* EventHandler::FType()
|
||||
FuncType* EventHandler::FType(bool check_export)
|
||||
{
|
||||
if ( type )
|
||||
return type;
|
||||
|
||||
ID* id = lookup_ID(name, current_module.c_str());
|
||||
ID* id = lookup_ID(name, current_module.c_str(), false, false,
|
||||
check_export);
|
||||
|
||||
if ( ! id )
|
||||
return 0;
|
||||
|
@ -84,14 +79,11 @@ void EventHandler::Call(val_list* vl, bool no_remote)
|
|||
remote_serializer->SendCall(&info, receivers[i], name, vl);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
|
||||
if ( ! auto_remote_send.empty() )
|
||||
if ( ! auto_publish.empty() )
|
||||
{
|
||||
// TODO: also short-circuit based on interested subscribers/flags?
|
||||
broker::message msg;
|
||||
msg.reserve(vl->length() + 1);
|
||||
msg.emplace_back(Name());
|
||||
// Send event in form [name, xs...] where xs represent the arguments.
|
||||
broker::vector xs;
|
||||
xs.reserve(vl->length());
|
||||
bool valid_args = true;
|
||||
|
||||
for ( auto i = 0; i < vl->length(); ++i )
|
||||
|
@ -99,30 +91,33 @@ void EventHandler::Call(val_list* vl, bool no_remote)
|
|||
auto opt_data = bro_broker::val_to_data((*vl)[i]);
|
||||
|
||||
if ( opt_data )
|
||||
msg.emplace_back(move(*opt_data));
|
||||
xs.emplace_back(move(*opt_data));
|
||||
else
|
||||
{
|
||||
valid_args = false;
|
||||
auto_remote_send.clear();
|
||||
reporter->Error("failed auto-remote event '%s', disabled",
|
||||
Name());
|
||||
auto_publish.clear();
|
||||
reporter->Error("failed auto-remote event '%s', disabled", Name());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( valid_args )
|
||||
{
|
||||
for ( auto it = auto_remote_send.begin();
|
||||
it != auto_remote_send.end(); ++it )
|
||||
for ( auto it = auto_publish.begin(); ; )
|
||||
{
|
||||
if ( std::next(it) == auto_remote_send.end() )
|
||||
broker_mgr->Event(it->first, move(msg), it->second);
|
||||
const auto& topic = *it;
|
||||
++it;
|
||||
|
||||
if ( it != auto_publish.end() )
|
||||
broker_mgr->PublishEvent(topic, Name(), xs);
|
||||
else
|
||||
broker_mgr->Event(it->first, msg, it->second);
|
||||
{
|
||||
broker_mgr->PublishEvent(topic, Name(), std::move(xs));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if ( local )
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#define EVENTHANDLER
|
||||
|
||||
#include <assert.h>
|
||||
#include <map>
|
||||
#include <unordered_set>
|
||||
#include <string>
|
||||
#include "List.h"
|
||||
#include "BroList.h"
|
||||
|
@ -22,24 +22,22 @@ public:
|
|||
|
||||
const char* Name() { return name; }
|
||||
Func* LocalHandler() { return local; }
|
||||
FuncType* FType();
|
||||
FuncType* FType(bool check_export = true);
|
||||
|
||||
void SetLocalHandler(Func* f);
|
||||
|
||||
void AddRemoteHandler(SourceID peer);
|
||||
void RemoveRemoteHandler(SourceID peer);
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
void AutoRemote(std::string topic, int flags)
|
||||
void AutoPublish(std::string topic)
|
||||
{
|
||||
auto_remote_send[std::move(topic)] = flags;
|
||||
auto_publish.insert(std::move(topic));
|
||||
}
|
||||
|
||||
void AutoRemoteStop(const std::string& topic)
|
||||
void AutoUnpublish(const std::string& topic)
|
||||
{
|
||||
auto_remote_send.erase(topic);
|
||||
auto_publish.erase(topic);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Call(val_list* vl, bool no_remote = false);
|
||||
|
||||
|
@ -81,9 +79,7 @@ private:
|
|||
typedef List(SourceID) receiver_list;
|
||||
receiver_list receivers;
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
std::map<std::string, int> auto_remote_send; // topic -> flags
|
||||
#endif
|
||||
std::unordered_set<std::string> auto_publish;
|
||||
};
|
||||
|
||||
// Encapsulates a ptr to an event handler to overload the boolean operator.
|
||||
|
|
125
src/Expr.cc
125
src/Expr.cc
|
@ -18,8 +18,6 @@
|
|||
|
||||
const char* expr_name(BroExprTag t)
|
||||
{
|
||||
static char errbuf[512];
|
||||
|
||||
static const char* expr_names[int(NUM_EXPRS)] = {
|
||||
"name", "const",
|
||||
"(*)",
|
||||
|
@ -31,7 +29,7 @@ const char* expr_name(BroExprTag t)
|
|||
"$=", "in", "<<>>",
|
||||
"()", "event", "schedule",
|
||||
"coerce", "record_coerce", "table_coerce",
|
||||
"sizeof", "flatten"
|
||||
"sizeof", "flatten", "cast", "is"
|
||||
};
|
||||
|
||||
if ( int(t) >= NUM_EXPRS )
|
||||
|
@ -4537,13 +4535,21 @@ Val* CallExpr::Eval(Frame* f) const
|
|||
if ( func_val && v )
|
||||
{
|
||||
const ::Func* func = func_val->AsFunc();
|
||||
calling_expr = this;
|
||||
const CallExpr* current_call = f ? f->GetCall() : 0;
|
||||
call_stack.emplace_back(CallInfo{this, func});
|
||||
|
||||
if ( f )
|
||||
f->SetCall(this);
|
||||
|
||||
ret = func->Call(v, f); // No try/catch here; we pass exceptions upstream.
|
||||
try
|
||||
{
|
||||
ret = func->Call(v, f);
|
||||
}
|
||||
catch ( ... )
|
||||
{
|
||||
call_stack.pop_back();
|
||||
throw;
|
||||
}
|
||||
|
||||
if ( f )
|
||||
f->SetCall(current_call);
|
||||
|
@ -4551,7 +4557,7 @@ Val* CallExpr::Eval(Frame* f) const
|
|||
// Don't Unref() the arguments, as Func::Call already did that.
|
||||
delete v;
|
||||
|
||||
calling_expr = 0;
|
||||
call_stack.pop_back();
|
||||
}
|
||||
else
|
||||
delete_vals(v);
|
||||
|
@ -5203,6 +5209,112 @@ bool RecordAssignExpr::DoUnserialize(UnserialInfo* info)
|
|||
return true;
|
||||
}
|
||||
|
||||
CastExpr::CastExpr(Expr* arg_op, BroType* t) : UnaryExpr(EXPR_CAST, arg_op)
|
||||
{
|
||||
auto stype = Op()->Type();
|
||||
|
||||
::Ref(t);
|
||||
SetType(t);
|
||||
|
||||
if ( ! can_cast_value_to_type(stype, t) )
|
||||
ExprError("cast not supported");
|
||||
}
|
||||
|
||||
Val* CastExpr::Eval(Frame* f) const
|
||||
{
|
||||
if ( IsError() )
|
||||
return 0;
|
||||
|
||||
Val* v = op->Eval(f);
|
||||
|
||||
if ( ! v )
|
||||
return 0;
|
||||
|
||||
Val* nv = cast_value_to_type(v, Type());
|
||||
|
||||
if ( nv )
|
||||
{
|
||||
Unref(v);
|
||||
return nv;
|
||||
}
|
||||
|
||||
ODesc d;
|
||||
d.Add("cannot cast value of type '");
|
||||
v->Type()->Describe(&d);
|
||||
d.Add("' to type '");
|
||||
Type()->Describe(&d);
|
||||
d.Add("'");
|
||||
Unref(v);
|
||||
reporter->ExprRuntimeError(this, "%s", d.Description());
|
||||
return 0; // not reached.
|
||||
}
|
||||
|
||||
void CastExpr::ExprDescribe(ODesc* d) const
|
||||
{
|
||||
Op()->Describe(d);
|
||||
d->Add(" as ");
|
||||
Type()->Describe(d);
|
||||
}
|
||||
|
||||
IMPLEMENT_SERIAL(CastExpr, SER_CAST_EXPR);
|
||||
|
||||
bool CastExpr::DoSerialize(SerialInfo* info) const
|
||||
{
|
||||
DO_SERIALIZE(SER_CAST_EXPR, UnaryExpr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CastExpr::DoUnserialize(UnserialInfo* info)
|
||||
{
|
||||
DO_UNSERIALIZE(UnaryExpr);
|
||||
return true;
|
||||
}
|
||||
|
||||
IsExpr::IsExpr(Expr* arg_op, BroType* arg_t) : UnaryExpr(EXPR_IS, arg_op)
|
||||
{
|
||||
t = arg_t;
|
||||
::Ref(t);
|
||||
|
||||
SetType(base_type(TYPE_BOOL));
|
||||
}
|
||||
|
||||
IsExpr::~IsExpr()
|
||||
{
|
||||
Unref(t);
|
||||
}
|
||||
|
||||
Val* IsExpr::Fold(Val* v) const
|
||||
{
|
||||
if ( IsError() )
|
||||
return 0;
|
||||
|
||||
if ( can_cast_value_to_type(v, t) )
|
||||
return new Val(1, TYPE_BOOL);
|
||||
else
|
||||
return new Val(0, TYPE_BOOL);
|
||||
}
|
||||
|
||||
void IsExpr::ExprDescribe(ODesc* d) const
|
||||
{
|
||||
Op()->Describe(d);
|
||||
d->Add(" is ");
|
||||
t->Describe(d);
|
||||
}
|
||||
|
||||
IMPLEMENT_SERIAL(IsExpr, SER_IS_EXPR_ /* sic */);
|
||||
|
||||
bool IsExpr::DoSerialize(SerialInfo* info) const
|
||||
{
|
||||
DO_SERIALIZE(SER_IS_EXPR_, UnaryExpr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsExpr::DoUnserialize(UnserialInfo* info)
|
||||
{
|
||||
DO_UNSERIALIZE(UnaryExpr);
|
||||
return true;
|
||||
}
|
||||
|
||||
Expr* get_assign_expr(Expr* op1, Expr* op2, int is_init)
|
||||
{
|
||||
if ( op1->Type()->Tag() == TYPE_RECORD &&
|
||||
|
@ -5212,7 +5324,6 @@ Expr* get_assign_expr(Expr* op1, Expr* op2, int is_init)
|
|||
return new AssignExpr(op1, op2, is_init);
|
||||
}
|
||||
|
||||
|
||||
int check_and_promote_expr(Expr*& e, BroType* t)
|
||||
{
|
||||
BroType* et = e->Type();
|
||||
|
|
35
src/Expr.h
35
src/Expr.h
|
@ -44,7 +44,9 @@ typedef enum {
|
|||
EXPR_VECTOR_COERCE,
|
||||
EXPR_SIZE,
|
||||
EXPR_FLATTEN,
|
||||
#define NUM_EXPRS (int(EXPR_FLATTEN) + 1)
|
||||
EXPR_CAST,
|
||||
EXPR_IS,
|
||||
#define NUM_EXPRS (int(EXPR_IS) + 1)
|
||||
} BroExprTag;
|
||||
|
||||
extern const char* expr_name(BroExprTag t);
|
||||
|
@ -1044,6 +1046,37 @@ protected:
|
|||
DECLARE_SERIAL(RecordAssignExpr);
|
||||
};
|
||||
|
||||
class CastExpr : public UnaryExpr {
|
||||
public:
|
||||
CastExpr(Expr* op, BroType* t);
|
||||
|
||||
protected:
|
||||
friend class Expr;
|
||||
CastExpr() { }
|
||||
|
||||
Val* Eval(Frame* f) const override;
|
||||
void ExprDescribe(ODesc* d) const override;
|
||||
|
||||
DECLARE_SERIAL(CastExpr);
|
||||
};
|
||||
|
||||
class IsExpr : public UnaryExpr {
|
||||
public:
|
||||
IsExpr(Expr* op, BroType* t);
|
||||
virtual ~IsExpr();
|
||||
|
||||
protected:
|
||||
friend class Expr;
|
||||
IsExpr() { }
|
||||
|
||||
Val* Fold(Val* v) const override;
|
||||
void ExprDescribe(ODesc* d) const override;
|
||||
DECLARE_SERIAL(IsExpr);
|
||||
|
||||
private:
|
||||
BroType* t;
|
||||
};
|
||||
|
||||
inline Val* Expr::ExprVal() const
|
||||
{
|
||||
if ( ! IsConst() )
|
||||
|
|
59
src/Func.cc
59
src/Func.cc
|
@ -50,7 +50,7 @@
|
|||
|
||||
extern RETSIGTYPE sig_handler(int signo);
|
||||
|
||||
const Expr* calling_expr = 0;
|
||||
vector<CallInfo> call_stack;
|
||||
bool did_builtin_init = false;
|
||||
|
||||
vector<Func*> Func::unique_ids;
|
||||
|
@ -637,10 +637,60 @@ bool BuiltinFunc::DoUnserialize(UnserialInfo* info)
|
|||
|
||||
void builtin_error(const char* msg, BroObj* arg)
|
||||
{
|
||||
if ( calling_expr )
|
||||
calling_expr->Error(msg, arg);
|
||||
else
|
||||
if ( call_stack.empty() )
|
||||
{
|
||||
reporter->Error(msg, arg);
|
||||
return;
|
||||
}
|
||||
|
||||
auto last_call = call_stack.back();
|
||||
|
||||
if ( call_stack.size() < 2 )
|
||||
{
|
||||
// Don't need to check for wrapper function like "<module>::__<func>"
|
||||
last_call.call->Error(msg, arg);
|
||||
return;
|
||||
}
|
||||
|
||||
auto starts_with_double_underscore = [](const std::string& name) -> bool
|
||||
{ return name.size() > 2 && name[0] == '_' && name[1] == '_'; };
|
||||
auto last_loc = last_call.call->GetLocationInfo();
|
||||
std::string last_func = last_call.func->Name();
|
||||
|
||||
auto pos = last_func.find_first_of("::");
|
||||
std::string wrapper_func;
|
||||
|
||||
if ( pos == std::string::npos )
|
||||
{
|
||||
if ( ! starts_with_double_underscore(last_func) )
|
||||
{
|
||||
last_call.call->Error(msg, arg);
|
||||
return;
|
||||
}
|
||||
|
||||
wrapper_func = last_func.substr(2);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto module_name = last_func.substr(0, pos);
|
||||
auto func_name = last_func.substr(pos + 2);
|
||||
|
||||
if ( ! starts_with_double_underscore(func_name) )
|
||||
{
|
||||
last_call.call->Error(msg, arg);
|
||||
return;
|
||||
}
|
||||
|
||||
wrapper_func = module_name + "::" + func_name.substr(2);
|
||||
}
|
||||
|
||||
auto parent_call = call_stack[call_stack.size() - 2];
|
||||
auto parent_func = parent_call.func->Name();
|
||||
|
||||
if ( wrapper_func == parent_func )
|
||||
parent_call.call->Error(msg, arg);
|
||||
else
|
||||
last_call.call->Error(msg, arg);
|
||||
}
|
||||
|
||||
#include "bro.bif.func_h"
|
||||
|
@ -671,6 +721,7 @@ void init_builtin_funcs()
|
|||
TimerStats = internal_type("TimerStats")->AsRecordType();
|
||||
FileAnalysisStats = internal_type("FileAnalysisStats")->AsRecordType();
|
||||
ThreadStats = internal_type("ThreadStats")->AsRecordType();
|
||||
BrokerStats = internal_type("BrokerStats")->AsRecordType();
|
||||
|
||||
var_sizes = internal_type("var_sizes")->AsTableType();
|
||||
|
||||
|
|
10
src/Func.h
10
src/Func.h
|
@ -140,10 +140,12 @@ extern void init_builtin_funcs_subdirs();
|
|||
|
||||
extern bool check_built_in_call(BuiltinFunc* f, CallExpr* call);
|
||||
|
||||
// This global is set prior to the interpreter making a function call.
|
||||
// It's there so that built-in functions can access the location information
|
||||
// associated with a call when reporting error messages.
|
||||
extern const Expr* calling_expr;
|
||||
struct CallInfo {
|
||||
const CallExpr* call;
|
||||
const Func* func;
|
||||
};
|
||||
|
||||
extern vector<CallInfo> call_stack;
|
||||
|
||||
// This is set to true after the built-in functions have been initialized.
|
||||
extern bool did_builtin_init;
|
||||
|
|
30
src/Net.cc
30
src/Net.cc
|
@ -33,10 +33,7 @@
|
|||
#include "iosource/PktSrc.h"
|
||||
#include "iosource/PktDumper.h"
|
||||
#include "plugin/Manager.h"
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
#include "broker/Manager.h"
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
#include "setsignal.h"
|
||||
|
@ -312,11 +309,7 @@ void net_run()
|
|||
}
|
||||
#endif
|
||||
current_iosrc = src;
|
||||
bool communication_enabled = using_communication;
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
communication_enabled |= broker_mgr->Enabled();
|
||||
#endif
|
||||
auto communication_enabled = using_communication || broker_mgr->Active();
|
||||
|
||||
if ( src )
|
||||
src->Process(); // which will call net_packet_dispatch()
|
||||
|
@ -334,7 +327,8 @@ void net_run()
|
|||
}
|
||||
}
|
||||
|
||||
else if ( (have_pending_timers || communication_enabled) &&
|
||||
else if ( (have_pending_timers || communication_enabled ||
|
||||
BifConst::exit_only_after_terminate) &&
|
||||
! pseudo_realtime )
|
||||
{
|
||||
// Take advantage of the lull to get up to
|
||||
|
@ -387,6 +381,24 @@ void net_run()
|
|||
// Check whether we have timers scheduled for
|
||||
// the future on which we need to wait.
|
||||
have_pending_timers = timer_mgr->Size() > 0;
|
||||
|
||||
if ( pseudo_realtime && communication_enabled )
|
||||
{
|
||||
auto have_active_packet_source = false;
|
||||
|
||||
for ( auto& ps : iosource_mgr->GetPktSrcs() )
|
||||
{
|
||||
if ( ps->IsOpen() )
|
||||
{
|
||||
have_active_packet_source = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( ! have_active_packet_source )
|
||||
// Can turn off pseudo realtime now
|
||||
pseudo_realtime = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the final statistics now, and not when net_finish() is
|
||||
|
|
|
@ -408,6 +408,14 @@ RE_Matcher::RE_Matcher(const char* pat)
|
|||
AddPat(pat);
|
||||
}
|
||||
|
||||
RE_Matcher::RE_Matcher(const char* exact_pat, const char* anywhere_pat)
|
||||
{
|
||||
re_anywhere = new Specific_RE_Matcher(MATCH_ANYWHERE);
|
||||
re_anywhere->SetPat(anywhere_pat);
|
||||
re_exact = new Specific_RE_Matcher(MATCH_EXACTLY);
|
||||
re_exact->SetPat(exact_pat);
|
||||
}
|
||||
|
||||
RE_Matcher::~RE_Matcher()
|
||||
{
|
||||
delete re_anywhere;
|
||||
|
|
3
src/RE.h
3
src/RE.h
|
@ -173,7 +173,8 @@ class RE_Matcher : SerialObj {
|
|||
public:
|
||||
RE_Matcher();
|
||||
explicit RE_Matcher(const char* pat);
|
||||
~RE_Matcher() override;
|
||||
RE_Matcher(const char* exact_pat, const char* anywhere_pat);
|
||||
virtual ~RE_Matcher() override;
|
||||
|
||||
void AddPat(const char* pat);
|
||||
|
||||
|
|
|
@ -113,13 +113,13 @@ TraversalCode Scope::Traverse(TraversalCallback* cb) const
|
|||
|
||||
|
||||
ID* lookup_ID(const char* name, const char* curr_module, bool no_global,
|
||||
bool same_module_only)
|
||||
bool same_module_only, bool check_export)
|
||||
{
|
||||
string fullname = make_full_var_name(curr_module, name);
|
||||
|
||||
string ID_module = extract_module_name(fullname.c_str());
|
||||
bool need_export = ID_module != GLOBAL_MODULE_NAME &&
|
||||
ID_module != curr_module;
|
||||
bool need_export = check_export && (ID_module != GLOBAL_MODULE_NAME &&
|
||||
ID_module != curr_module);
|
||||
|
||||
for ( int i = scopes.length() - 1; i >= 0; --i )
|
||||
{
|
||||
|
|
|
@ -64,7 +64,8 @@ extern bool in_debug;
|
|||
// If no_global is true, don't search in the default "global" namespace.
|
||||
// This passed ownership of a ref'ed ID to the caller.
|
||||
extern ID* lookup_ID(const char* name, const char* module,
|
||||
bool no_global = false, bool same_module_only=false);
|
||||
bool no_global = false, bool same_module_only = false,
|
||||
bool check_export = true);
|
||||
extern ID* install_ID(const char* name, const char* module_name,
|
||||
bool is_global, bool is_export);
|
||||
|
||||
|
|
|
@ -162,6 +162,8 @@ SERIAL_EXPR(SET_CONSTRUCTOR_EXPR, 41)
|
|||
SERIAL_EXPR(VECTOR_CONSTRUCTOR_EXPR, 42)
|
||||
SERIAL_EXPR(TABLE_COERCE_EXPR, 43)
|
||||
SERIAL_EXPR(VECTOR_COERCE_EXPR, 44)
|
||||
SERIAL_EXPR(CAST_EXPR, 45)
|
||||
SERIAL_EXPR(IS_EXPR_, 46) // Name conflict with internal SER_IS_EXPR constant.
|
||||
|
||||
#define SERIAL_STMT(name, val) SERIAL_CONST(name, val, STMT)
|
||||
SERIAL_STMT(STMT, 1)
|
||||
|
|
|
@ -18,7 +18,7 @@ SerializationFormat::~SerializationFormat()
|
|||
free(output);
|
||||
}
|
||||
|
||||
void SerializationFormat::StartRead(char* data, uint32 arg_len)
|
||||
void SerializationFormat::StartRead(const char* data, uint32 arg_len)
|
||||
{
|
||||
input = data;
|
||||
input_len = arg_len;
|
||||
|
|
|
@ -19,7 +19,7 @@ public:
|
|||
virtual ~SerializationFormat();
|
||||
|
||||
// Unserialization.
|
||||
virtual void StartRead(char* data, uint32 len);
|
||||
virtual void StartRead(const char* data, uint32 len);
|
||||
virtual void EndRead();
|
||||
|
||||
virtual bool Read(int* v, const char* tag) = 0;
|
||||
|
@ -87,7 +87,7 @@ protected:
|
|||
uint32 output_size;
|
||||
uint32 output_pos;
|
||||
|
||||
char* input;
|
||||
const char* input;
|
||||
uint32 input_len;
|
||||
uint32 input_pos;
|
||||
|
||||
|
|
31
src/Stats.cc
31
src/Stats.cc
|
@ -9,10 +9,7 @@
|
|||
#include "DNS_Mgr.h"
|
||||
#include "Trigger.h"
|
||||
#include "threading/Manager.h"
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
#include "broker/Manager.h"
|
||||
#endif
|
||||
|
||||
uint64 killed_by_inactivity = 0;
|
||||
|
||||
|
@ -226,25 +223,19 @@ void ProfileLogger::Log()
|
|||
));
|
||||
}
|
||||
|
||||
#ifdef ENABLE_BROKER
|
||||
auto cs = broker_mgr->ConsumeStatistics();
|
||||
auto cs = broker_mgr->GetStatistics();
|
||||
|
||||
file->Write(fmt("%0.6f Comm: peers=%zu stores=%zu "
|
||||
"store_queries=%zu store_responses=%zu "
|
||||
"outgoing_conn_status=%zu incoming_conn_status=%zu "
|
||||
"reports=%zu\n",
|
||||
network_time, cs.outgoing_peer_count, cs.data_store_count,
|
||||
cs.pending_query_count, cs.response_count,
|
||||
cs.outgoing_conn_status_count, cs.incoming_conn_status_count,
|
||||
cs.report_count));
|
||||
|
||||
for ( const auto& s : cs.print_count )
|
||||
file->Write(fmt(" %-25s prints dequeued=%zu\n", s.first.data(), s.second));
|
||||
for ( const auto& s : cs.event_count )
|
||||
file->Write(fmt(" %-25s events dequeued=%zu\n", s.first.data(), s.second));
|
||||
for ( const auto& s : cs.log_count )
|
||||
file->Write(fmt(" %-25s logs dequeued=%zu\n", s.first.data(), s.second));
|
||||
#endif
|
||||
"pending_queries=%zu "
|
||||
"events_in=%zu events_out=%zu "
|
||||
"logs_in=%zu logs_out=%zu "
|
||||
"ids_in=%zu ids_out=%zu ",
|
||||
network_time, cs.num_peers, cs.num_stores,
|
||||
cs.num_pending_queries,
|
||||
cs.num_events_incoming, cs.num_events_outgoing,
|
||||
cs.num_logs_incoming, cs.num_logs_outgoing,
|
||||
cs.num_ids_incoming, cs.num_ids_outgoing
|
||||
));
|
||||
|
||||
// Script-level state.
|
||||
unsigned int size, mem = 0;
|
||||
|
|
210
src/Stmt.cc
210
src/Stmt.cc
|
@ -546,8 +546,8 @@ static BroStmtTag get_last_stmt_tag(const Stmt* stmt)
|
|||
return get_last_stmt_tag(stmts->Stmts()[len - 1]);
|
||||
}
|
||||
|
||||
Case::Case(ListExpr* c, Stmt* arg_s)
|
||||
: cases(c), s(arg_s)
|
||||
Case::Case(ListExpr* arg_expr_cases, id_list* arg_type_cases, Stmt* arg_s)
|
||||
: expr_cases(arg_expr_cases), type_cases(arg_type_cases), s(arg_s)
|
||||
{
|
||||
BroStmtTag t = get_last_stmt_tag(Body());
|
||||
|
||||
|
@ -557,13 +557,18 @@ Case::Case(ListExpr* c, Stmt* arg_s)
|
|||
|
||||
Case::~Case()
|
||||
{
|
||||
Unref(cases);
|
||||
Unref(expr_cases);
|
||||
Unref(s);
|
||||
|
||||
loop_over_list((*type_cases), i)
|
||||
Unref((*type_cases)[i]);
|
||||
|
||||
delete type_cases;
|
||||
}
|
||||
|
||||
void Case::Describe(ODesc* d) const
|
||||
{
|
||||
if ( ! Cases() )
|
||||
if ( ! (expr_cases || type_cases) )
|
||||
{
|
||||
if ( ! d->IsBinary() )
|
||||
d->Add("default:");
|
||||
|
@ -578,20 +583,49 @@ void Case::Describe(ODesc* d) const
|
|||
return;
|
||||
}
|
||||
|
||||
const expr_list& e = Cases()->Exprs();
|
||||
|
||||
if ( ! d->IsBinary() )
|
||||
d->Add("case");
|
||||
|
||||
if ( expr_cases )
|
||||
{
|
||||
const expr_list& e = expr_cases->Exprs();
|
||||
|
||||
d->AddCount(e.length());
|
||||
|
||||
loop_over_list(e, j)
|
||||
loop_over_list(e, i)
|
||||
{
|
||||
if ( j > 0 && ! d->IsReadable() )
|
||||
if ( i > 0 && d->IsReadable() )
|
||||
d->Add(",");
|
||||
|
||||
d->SP();
|
||||
e[j]->Describe(d);
|
||||
e[i]->Describe(d);
|
||||
}
|
||||
}
|
||||
|
||||
if ( type_cases )
|
||||
{
|
||||
const id_list& t = *type_cases;
|
||||
|
||||
d->AddCount(t.length());
|
||||
|
||||
loop_over_list(t, i)
|
||||
{
|
||||
if ( i > 0 && d->IsReadable() )
|
||||
d->Add(",");
|
||||
|
||||
d->SP();
|
||||
d->Add("type");
|
||||
d->SP();
|
||||
t[i]->Type()->Describe(d);
|
||||
|
||||
if ( t[i]->Name() )
|
||||
{
|
||||
d->SP();
|
||||
d->Add("as");
|
||||
d->SP();
|
||||
d->Add(t[i]->Name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ( d->IsReadable() )
|
||||
|
@ -607,12 +641,17 @@ TraversalCode Case::Traverse(TraversalCallback* cb) const
|
|||
{
|
||||
TraversalCode tc;
|
||||
|
||||
if ( cases )
|
||||
if ( expr_cases )
|
||||
{
|
||||
tc = cases->Traverse(cb);
|
||||
tc = expr_cases->Traverse(cb);
|
||||
HANDLE_TC_STMT_PRE(tc);
|
||||
}
|
||||
|
||||
if ( type_cases )
|
||||
{
|
||||
// No traverse support for types.
|
||||
}
|
||||
|
||||
tc = s->Traverse(cb);
|
||||
HANDLE_TC_STMT_PRE(tc);
|
||||
|
||||
|
@ -634,17 +673,48 @@ IMPLEMENT_SERIAL(Case, SER_CASE);
|
|||
bool Case::DoSerialize(SerialInfo* info) const
|
||||
{
|
||||
DO_SERIALIZE(SER_CASE, BroObj);
|
||||
return cases->Serialize(info) && this->s->Serialize(info);
|
||||
|
||||
if ( ! expr_cases->Serialize(info) )
|
||||
return false;
|
||||
|
||||
id_list empty;
|
||||
id_list* types = (type_cases ? type_cases : &empty);
|
||||
|
||||
if ( ! SERIALIZE(types->length()) )
|
||||
return false;
|
||||
|
||||
loop_over_list((*types), i)
|
||||
{
|
||||
if ( ! (*types)[i]->Serialize(info) )
|
||||
return false;
|
||||
}
|
||||
|
||||
return this->s->Serialize(info);
|
||||
}
|
||||
|
||||
bool Case::DoUnserialize(UnserialInfo* info)
|
||||
{
|
||||
DO_UNSERIALIZE(BroObj);
|
||||
|
||||
cases = (ListExpr*) Expr::Unserialize(info, EXPR_LIST);
|
||||
if ( ! cases )
|
||||
expr_cases = (ListExpr*) Expr::Unserialize(info, EXPR_LIST);
|
||||
if ( ! expr_cases )
|
||||
return false;
|
||||
|
||||
int len;
|
||||
if ( ! UNSERIALIZE(&len) )
|
||||
return false;
|
||||
|
||||
type_cases = new id_list;
|
||||
|
||||
while ( len-- )
|
||||
{
|
||||
ID* id = ID::Unserialize(info);
|
||||
if ( ! id )
|
||||
return false;
|
||||
|
||||
type_cases->append(id);
|
||||
}
|
||||
|
||||
this->s = Stmt::Unserialize(info);
|
||||
return this->s != 0;
|
||||
}
|
||||
|
@ -661,7 +731,7 @@ void SwitchStmt::Init()
|
|||
comp_hash = new CompositeHash(t);
|
||||
Unref(t);
|
||||
|
||||
case_label_map.SetDeleteFunc(int_del_func);
|
||||
case_label_value_map.SetDeleteFunc(int_del_func);
|
||||
}
|
||||
|
||||
SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
|
||||
|
@ -669,16 +739,22 @@ SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
|
|||
{
|
||||
Init();
|
||||
|
||||
if ( ! is_atomic_type(e->Type()) )
|
||||
e->Error("switch expression must be of an atomic type");
|
||||
bool have_exprs = false;
|
||||
bool have_types = false;
|
||||
|
||||
loop_over_list(*cases, i)
|
||||
{
|
||||
Case* c = (*cases)[i];
|
||||
ListExpr* le = c->Cases();
|
||||
ListExpr* le = c->ExprCases();
|
||||
id_list* tl = c->TypeCases();
|
||||
|
||||
if ( le )
|
||||
{
|
||||
have_exprs = true;
|
||||
|
||||
if ( ! is_atomic_type(e->Type()) )
|
||||
e->Error("switch expression must be of an atomic type when cases are expressions");
|
||||
|
||||
if ( ! le->Type()->AsTypeList()->AllMatch(e->Type(), false) )
|
||||
{
|
||||
le->Error("case expression type differs from switch type", e);
|
||||
|
@ -736,12 +812,34 @@ SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
|
|||
exprs[j]->Error("case label expression isn't constant");
|
||||
else
|
||||
{
|
||||
if ( ! AddCaseLabelMapping(exprs[j]->ExprVal(), i) )
|
||||
if ( ! AddCaseLabelValueMapping(exprs[j]->ExprVal(), i) )
|
||||
exprs[j]->Error("duplicate case label");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if ( tl )
|
||||
{
|
||||
have_types = true;
|
||||
|
||||
loop_over_list((*tl), j)
|
||||
{
|
||||
BroType* ct = (*tl)[j]->Type();
|
||||
|
||||
if ( ! can_cast_value_to_type(e->Type(), ct) )
|
||||
{
|
||||
c->Error("cannot cast switch expression to case type");
|
||||
continue;
|
||||
}
|
||||
|
||||
if ( ! AddCaseLabelTypeMapping((*tl)[j], i) )
|
||||
{
|
||||
c->Error("duplicate case label");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else
|
||||
{
|
||||
if ( default_case_idx != -1 )
|
||||
|
@ -750,6 +848,10 @@ SwitchStmt::SwitchStmt(Expr* index, case_list* arg_cases) :
|
|||
default_case_idx = i;
|
||||
}
|
||||
}
|
||||
|
||||
if ( have_exprs && have_types )
|
||||
Error("cannot mix cases with expressions and types");
|
||||
|
||||
}
|
||||
|
||||
SwitchStmt::~SwitchStmt()
|
||||
|
@ -761,7 +863,7 @@ SwitchStmt::~SwitchStmt()
|
|||
delete comp_hash;
|
||||
}
|
||||
|
||||
bool SwitchStmt::AddCaseLabelMapping(const Val* v, int idx)
|
||||
bool SwitchStmt::AddCaseLabelValueMapping(const Val* v, int idx)
|
||||
{
|
||||
HashKey* hk = comp_hash->ComputeHash(v, 1);
|
||||
|
||||
|
@ -772,7 +874,7 @@ bool SwitchStmt::AddCaseLabelMapping(const Val* v, int idx)
|
|||
type_name(v->Type()->Tag()), type_name(e->Type()->Tag()));
|
||||
}
|
||||
|
||||
int* label_idx = case_label_map.Lookup(hk);
|
||||
int* label_idx = case_label_value_map.Lookup(hk);
|
||||
|
||||
if ( label_idx )
|
||||
{
|
||||
|
@ -780,12 +882,32 @@ bool SwitchStmt::AddCaseLabelMapping(const Val* v, int idx)
|
|||
return false;
|
||||
}
|
||||
|
||||
case_label_map.Insert(hk, new int(idx));
|
||||
case_label_value_map.Insert(hk, new int(idx));
|
||||
delete hk;
|
||||
return true;
|
||||
}
|
||||
|
||||
int SwitchStmt::FindCaseLabelMatch(const Val* v) const
|
||||
bool SwitchStmt::AddCaseLabelTypeMapping(ID* t, int idx)
|
||||
{
|
||||
for ( auto i : case_label_type_list )
|
||||
{
|
||||
if ( same_type(i.first->Type(), t->Type()) )
|
||||
return false;
|
||||
}
|
||||
|
||||
auto e = std::make_pair(t, idx);
|
||||
case_label_type_list.push_back(e);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::pair<int, ID*> SwitchStmt::FindCaseLabelMatch(const Val* v) const
|
||||
{
|
||||
int label_idx = -1;
|
||||
ID* label_id = 0;
|
||||
|
||||
// Find matching expression cases.
|
||||
if ( case_label_value_map.Length() )
|
||||
{
|
||||
HashKey* hk = comp_hash->ComputeHash(v, 1);
|
||||
|
||||
|
@ -794,24 +916,42 @@ int SwitchStmt::FindCaseLabelMatch(const Val* v) const
|
|||
reporter->PushLocation(e->GetLocationInfo());
|
||||
reporter->Error("switch expression type mismatch (%s/%s)",
|
||||
type_name(v->Type()->Tag()), type_name(e->Type()->Tag()));
|
||||
return -1;
|
||||
return std::make_pair(-1, nullptr);
|
||||
}
|
||||
|
||||
int* label_idx = case_label_map.Lookup(hk);
|
||||
if ( auto i = case_label_value_map.Lookup(hk) )
|
||||
label_idx = *i;
|
||||
|
||||
delete hk;
|
||||
}
|
||||
|
||||
if ( ! label_idx )
|
||||
return default_case_idx;
|
||||
// Find matching type cases.
|
||||
for ( auto i : case_label_type_list )
|
||||
{
|
||||
auto id = i.first;
|
||||
auto type = id->Type();
|
||||
|
||||
if ( can_cast_value_to_type(v, type) )
|
||||
{
|
||||
label_idx = i.second;
|
||||
label_id = id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( label_idx < 0 )
|
||||
return std::make_pair(default_case_idx, nullptr);
|
||||
else
|
||||
return *label_idx;
|
||||
return std::make_pair(label_idx, label_id);
|
||||
}
|
||||
|
||||
Val* SwitchStmt::DoExec(Frame* f, Val* v, stmt_flow_type& flow) const
|
||||
{
|
||||
Val* rval = 0;
|
||||
|
||||
int matching_label_idx = FindCaseLabelMatch(v);
|
||||
auto m = FindCaseLabelMatch(v);
|
||||
int matching_label_idx = m.first;
|
||||
ID* matching_id = m.second;
|
||||
|
||||
if ( matching_label_idx == -1 )
|
||||
return 0;
|
||||
|
@ -820,6 +960,12 @@ Val* SwitchStmt::DoExec(Frame* f, Val* v, stmt_flow_type& flow) const
|
|||
{
|
||||
const Case* c = (*cases)[i];
|
||||
|
||||
if ( matching_id )
|
||||
{
|
||||
auto cv = cast_value_to_type(v, matching_id->Type());
|
||||
f->SetElement(matching_id->Offset(), cv);
|
||||
}
|
||||
|
||||
flow = FLOW_NEXT;
|
||||
rval = c->Body()->Exec(f, flow);
|
||||
|
||||
|
@ -841,7 +987,7 @@ int SwitchStmt::IsPure() const
|
|||
loop_over_list(*cases, i)
|
||||
{
|
||||
Case* c = (*cases)[i];
|
||||
if ( ! c->Cases()->IsPure() || ! c->Body()->IsPure() )
|
||||
if ( ! c->ExprCases()->IsPure() || ! c->Body()->IsPure() )
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -928,7 +1074,7 @@ bool SwitchStmt::DoUnserialize(UnserialInfo* info)
|
|||
|
||||
loop_over_list(*cases, i)
|
||||
{
|
||||
const ListExpr* le = (*cases)[i]->Cases();
|
||||
const ListExpr* le = (*cases)[i]->ExprCases();
|
||||
|
||||
if ( ! le )
|
||||
continue;
|
||||
|
@ -937,7 +1083,7 @@ bool SwitchStmt::DoUnserialize(UnserialInfo* info)
|
|||
|
||||
loop_over_list(exprs, j)
|
||||
{
|
||||
if ( ! AddCaseLabelMapping(exprs[j]->ExprVal(), i) )
|
||||
if ( ! AddCaseLabelValueMapping(exprs[j]->ExprVal(), i) )
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
35
src/Stmt.h
35
src/Stmt.h
|
@ -183,11 +183,14 @@ protected:
|
|||
|
||||
class Case : public BroObj {
|
||||
public:
|
||||
Case(ListExpr* c, Stmt* arg_s);
|
||||
Case(ListExpr* c, id_list* types, Stmt* arg_s);
|
||||
~Case() override;
|
||||
|
||||
const ListExpr* Cases() const { return cases; }
|
||||
ListExpr* Cases() { return cases; }
|
||||
const ListExpr* ExprCases() const { return expr_cases; }
|
||||
ListExpr* ExprCases() { return expr_cases; }
|
||||
|
||||
const id_list* TypeCases() const { return type_cases; }
|
||||
id_list* TypeCases() { return type_cases; }
|
||||
|
||||
const Stmt* Body() const { return s; }
|
||||
Stmt* Body() { return s; }
|
||||
|
@ -201,11 +204,12 @@ public:
|
|||
|
||||
protected:
|
||||
friend class Stmt;
|
||||
Case() { cases = 0; s = 0; }
|
||||
Case() { expr_cases = 0; type_cases = 0; s = 0; }
|
||||
|
||||
DECLARE_SERIAL(Case);
|
||||
|
||||
ListExpr* cases;
|
||||
ListExpr* expr_cases;
|
||||
id_list* type_cases;
|
||||
Stmt* s;
|
||||
};
|
||||
|
||||
|
@ -232,20 +236,27 @@ protected:
|
|||
// Initialize composite hash and case label map.
|
||||
void Init();
|
||||
|
||||
// Adds an entry in case_label_map for the given value to associate it
|
||||
// Adds an entry in case_label_value_map for the given value to associate it
|
||||
// with the given index in the cases list. If the entry already exists,
|
||||
// returns false, else returns true.
|
||||
bool AddCaseLabelMapping(const Val* v, int idx);
|
||||
bool AddCaseLabelValueMapping(const Val* v, int idx);
|
||||
|
||||
// Returns index of a case label that's equal to the value, or
|
||||
// default_case_idx if no case label matches (which may be -1 if there's
|
||||
// no default label).
|
||||
int FindCaseLabelMatch(const Val* v) const;
|
||||
// Adds an entry in case_label_type_map for the given type (w/ ID) to
|
||||
// associate it with the given index in the cases list. If an entry
|
||||
// for the type already exists, returns false; else returns true.
|
||||
bool AddCaseLabelTypeMapping(ID* t, int idx);
|
||||
|
||||
// Returns index of a case label that matches the value, or
|
||||
// default_case_idx if no case label matches (which may be -1 if
|
||||
// there's no default label). The second tuple element is the ID of
|
||||
// the matching type-based case if it defines one.
|
||||
std::pair<int, ID*> FindCaseLabelMatch(const Val* v) const;
|
||||
|
||||
case_list* cases;
|
||||
int default_case_idx;
|
||||
CompositeHash* comp_hash;
|
||||
PDict(int) case_label_map;
|
||||
PDict(int) case_label_value_map;
|
||||
std::vector<std::pair<ID*, int>> case_label_type_list;
|
||||
};
|
||||
|
||||
class AddStmt : public ExprStmt {
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "Timer.h"
|
||||
#include "Desc.h"
|
||||
#include "Serializer.h"
|
||||
#include "broker/Manager.h"
|
||||
|
||||
// Names of timers in same order than in TimerType.
|
||||
const char* TimerNames[] = {
|
||||
|
@ -103,6 +104,7 @@ int TimerMgr::Advance(double arg_t, int max_expire)
|
|||
last_timestamp = 0;
|
||||
num_expired = 0;
|
||||
last_advance = timer_mgr->Time();
|
||||
broker_mgr->AdvanceTime(arg_t);
|
||||
|
||||
return DoAdvance(t, max_expire);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue