Merge remote-tracking branch 'origin/master' into topic/johanna/ocsp

This commit is contained in:
Johanna Amann 2017-02-08 10:35:12 -08:00
commit dfc871f831
1372 changed files with 158561 additions and 123481 deletions

3
.gitmodules vendored
View file

@ -25,3 +25,6 @@
[submodule "aux/broker"] [submodule "aux/broker"]
path = aux/broker path = aux/broker
url = git://git.bro.org/broker url = git://git.bro.org/broker
[submodule "aux/netcontrol-connectors"]
path = aux/netcontrol-connectors
url = git://git.bro.org/bro-netcontrol

1379
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -88,7 +88,7 @@ endif ()
include_directories(BEFORE include_directories(BEFORE
${PCAP_INCLUDE_DIR} ${PCAP_INCLUDE_DIR}
${OpenSSL_INCLUDE_DIR} ${OPENSSL_INCLUDE_DIR}
${BIND_INCLUDE_DIR} ${BIND_INCLUDE_DIR}
${BinPAC_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR} ${ZLIB_INCLUDE_DIR}
@ -141,7 +141,7 @@ endif ()
set(brodeps set(brodeps
${BinPAC_LIBRARY} ${BinPAC_LIBRARY}
${PCAP_LIBRARY} ${PCAP_LIBRARY}
${OpenSSL_LIBRARIES} ${OPENSSL_LIBRARIES}
${BIND_LIBRARY} ${BIND_LIBRARY}
${ZLIB_LIBRARY} ${ZLIB_LIBRARY}
${JEMALLOC_LIBRARIES} ${JEMALLOC_LIBRARIES}
@ -170,9 +170,19 @@ include(RequireCXX11)
# Tell the plugin code that we're building as part of the main tree. # Tell the plugin code that we're building as part of the main tree.
set(BRO_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE) set(BRO_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)
set(DEFAULT_BROPATH .:${BRO_SCRIPT_INSTALL_PATH}:${BRO_SCRIPT_INSTALL_PATH}/policy:${BRO_SCRIPT_INSTALL_PATH}/site)
if ( NOT BINARY_PACKAGING_MODE )
set(BRO_DIST ${CMAKE_SOURCE_DIR})
endif ()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.h.in configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.h.in
${CMAKE_CURRENT_BINARY_DIR}/bro-config.h) ${CMAKE_CURRENT_BINARY_DIR}/bro-config.h)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.in
${CMAKE_CURRENT_BINARY_DIR}/bro-config @ONLY)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/bro-config DESTINATION bin)
include_directories(${CMAKE_CURRENT_BINARY_DIR}) include_directories(${CMAKE_CURRENT_BINARY_DIR})
######################################################################## ########################################################################

View file

@ -1,4 +1,4 @@
Copyright (c) 1995-2015, The Regents of the University of California Copyright (c) 1995-2016, The Regents of the University of California
through the Lawrence Berkeley National Laboratory and the through the Lawrence Berkeley National Laboratory and the
International Computer Science Institute. All rights reserved. International Computer Science Institute. All rights reserved.

View file

@ -42,10 +42,6 @@ dist:
@$(HAVE_MODULES) && find $(VERSION_MIN) -name .git\* | xargs rm -rf || exit 0 @$(HAVE_MODULES) && find $(VERSION_MIN) -name .git\* | xargs rm -rf || exit 0
@$(HAVE_MODULES) && tar -czf $(VERSION_MIN).tgz $(VERSION_MIN) && echo Package: $(VERSION_MIN).tgz && rm -rf $(VERSION_MIN) || exit 0 @$(HAVE_MODULES) && tar -czf $(VERSION_MIN).tgz $(VERSION_MIN) && echo Package: $(VERSION_MIN).tgz && rm -rf $(VERSION_MIN) || exit 0
bindist:
@( cd pkg && ( ./make-deb-packages || ./make-mac-packages || \
./make-rpm-packages ) )
distclean: distclean:
rm -rf $(BUILD) rm -rf $(BUILD)
$(MAKE) -C testing $@ $(MAKE) -C testing $@
@ -65,4 +61,4 @@ configured:
@test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 ) @test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 )
@test -e $(BUILD)/Makefile || ( echo "Error: No build/Makefile found. Did you run configure?" && exit 1 ) @test -e $(BUILD)/Makefile || ( echo "Error: No build/Makefile found. Did you run configure?" && exit 1 )
.PHONY : all install clean doc docclean dist bindist distclean configured .PHONY : all install clean doc docclean dist distclean configured

335
NEWS
View file

@ -4,8 +4,8 @@ release. For an exhaustive list of changes, see the ``CHANGES`` file
(note that submodules, such as BroControl and Broccoli, come with (note that submodules, such as BroControl and Broccoli, come with
their own ``CHANGES``.) their own ``CHANGES``.)
Bro 2.5 (in progress) Bro 2.5
===================== =======
New Dependencies New Dependencies
---------------- ----------------
@ -13,19 +13,92 @@ New Dependencies
- Bro now requires a compiler with C++11 support for building the - Bro now requires a compiler with C++11 support for building the
source code. source code.
- Bro now requires the C++ Actor Framework, CAF, which must be
installed first. See http://actor-framework.org.
- Bro now requires Python instead of Perl to compile the source code. - Bro now requires Python instead of Perl to compile the source code.
- The pcap buffer size can set through the new option Pcap::bufsize. - When enabling Broker (which is disabled by default), Bro now requires
version 0.14 of the C++ Actor Framework.
New Functionality New Functionality
----------------- -----------------
- SMB analyzer. This is the rewrite that has been in development for
several years. The scripts are currently not loaded by default and
must be loaded manually by loading policy/protocols/smb. The next
release will load the smb scripts by default.
- Implements SMB1+2.
- Fully integrated with the file analysis framework so that files
transferred over SMB can be analyzed.
- Includes GSSAPI and NTLM analyzer and reimplements the DCE-RPC
analyzer.
- New logs: smb_cmd.log, smb_files.log, smb_mapping.log, ntlm.log,
and dce_rpc.log
- Not every possible SMB command or functionality is implemented, but
generally, file handling should work whenever files are transferred.
Please speak up on the mailing list if there is an obvious oversight.
- Bro now includes the NetControl framework. The framework allows for easy
interaction of Bro with hard- and software switches, firewalls, etc.
New log files: netcontrol.log, netcontrol_catch_release.log,
netcontrol_drop.log, and netcontrol_shunt.log.
- Bro now includes the OpenFlow framework which exposes the data structures
necessary to interface to OpenFlow capable hardware.
- Bro's Intelligence Framework was refactored and new functionality
has been added:
- The framework now supports the new indicator type Intel::SUBNET.
As subnets are matched against seen addresses, the new field 'matched'
in intel.log was introduced to indicate which indicator type(s) caused
the hit.
- The new function remove() allows to delete intelligence items.
- The intel framework now supports expiration of intelligence items.
Expiration can be configured using the new Intel::item_expiration constant
and can be handled by using the item_expired() hook. The new script
do_expire.bro removes expired items.
- The new hook extend_match() allows extending the framework. The new
policy script whitelist.bro uses the hook to implement whitelisting.
- Intel notices are now suppressible and mails for intel notices now
list the identified services as well as the intel source.
- There is a new file entropy analyzer for files.
- Bro now supports the remote framebuffer protocol (RFB) that is used by
VNC servers for remote graphical displays. New log file: rfb.log.
- Bro now supports the Radiotap header for 802.11 frames.
- Bro now has rudimentary IMAP and XMPP analyzers examining the initial
phases of the protocol. Right now these analyzers only identify
STARTTLS sessions, handing them over to TLS analysis. These analyzers
do not yet analyze any further IMAP/XMPP content.
- New funtionality has been added to the SSL/TLS analyzer:
- Bro now supports (draft) TLS 1.3.
- The new event ssl_extension_signature_algorithm() allows access to the
TLS signature_algorithms extension that lists client supported signature
and hash algorithm pairs.
- The new event ssl_extension_key_share gives access to the supported named
groups in TLS 1.3.
- The new event ssl_application_data gives information about application data
that is exchanged before encryption fully starts. This is used to detect
when encryption starts in TLS 1.3.
- Bro now tracks VLAN IDs. To record them inside the connection log, - Bro now tracks VLAN IDs. To record them inside the connection log,
load protocols/conn/vlan-logging.bro. load protocols/conn/vlan-logging.bro.
- A new dns_CAA_reply() event gives access to DNS Certification Authority
Authorization replies.
- A new per-packet event raw_packet() provides access to layer 2 - A new per-packet event raw_packet() provides access to layer 2
information. Use with care, generating events per packet is information. Use with care, generating events per packet is
expensive. expensive.
@ -35,33 +108,251 @@ New Functionality
argument that will be used for decoding errors into weird.log argument that will be used for decoding errors into weird.log
(instead of reporter.log). (instead of reporter.log).
- A new get_current_packet_header() bif returns the headers of the current
packet.
- Three new built-in functions for handling set[subnet] and table[subnet]:
- check_subnet(subnet, table) checks if a specific subnet is a member
of a set/table. This is different from the "in" operator, which always
performs a longest prefix match.
- matching_subnets(subnet, table) returns all subnets of the set or table
that contain the given subnet.
- filter_subnet_table(subnet, table) works like matching_subnets, but returns
a table containing all matching entries.
- Several built-in functions for handling IP addresses and subnets were added:
- is_v4_subnet(subnet) checks whether a subnet specification is IPv4.
- is_v6_subnet(subnet) checks whether a subnet specification is IPv6.
- addr_to_subnet(addr) converts an IP address to a /32 subnet.
- subnet_to_addr(subnet) returns the IP address part of a subnet.
- subnet_width(subnet) returns the width of a subnet.
- The IRC analyzer now recognizes StartTLS sessions and enables the SSL
analyzer for them.
- The misc/stats.bro script is now loaded by default and logs more Bro
execution statistics to the stats.log file than it did previously. It
now also uses the standard Bro log format.
- A set of new built-in functions for gathering execution statistics:
get_net_stats(), get_conn_stats(), get_proc_stats(),
get_event_stats(), get_reassembler_stats(), get_dns_stats(),
get_timer_stats(), get_file_analysis_stats(), get_thread_stats(),
get_gap_stats(), get_matcher_stats()
- Two new functions haversine_distance() and haversine_distance_ip()
for calculating geographic distances. The latter function requires that Bro
be built with libgeoip.
- Table expiration timeout expressions are evaluated dynamically as
timestamps are updated.
- The pcap buffer size can be set through the new option Pcap::bufsize.
- Input framework readers stream types Table and Event can now define a custom
event (specified by the new "error_ev" field) to receive error messages
emitted by the input stream. This can, e.g., be used to raise notices in
case errors occur when reading an important input source.
- The logging framework now supports user-defined record separators,
renaming of column names, as well as extension data columns that can
be added to specific or all logfiles (e.g., to add new names).
- The new "bro-config" script can be used to determine the Bro installation
paths.
- New BroControl functionality in aux/broctl:
- There is a new node type "logger" that can be specified in
node.cfg (that file has a commented-out example). The purpose of
this new node type is to receive logs from all nodes in a cluster
in order to reduce the load on the manager node. However, if
there is no "logger" node, then the manager node will handle
logging as usual.
- The post-terminate script will send email if it fails to archive
any log files. These mails can be turned off by changing the
value of the new BroControl option MailArchiveLogFail.
- Added the ability for "broctl deploy" to reload the BroControl
configuration (both broctl.cfg and node.cfg). This happens
automatically if broctl detects any changes to those config files
since the last time the config was loaded. Note that this feature
is relevant only when using the BroControl shell interactively.
- The BroControl plugin API has a new function "broctl_config".
This gives plugin authors the ability to add their own script code
to the autogenerated broctl-config.bro script.
- There is a new BroControl plugin for custom load balancing. This
plugin can be used by setting "lb_method=custom" for your worker
nodes in node.cfg. To support packet source plugins, it allows
configuration of a prefix and suffix for the interface name.
- New Bro plugins in aux/plugins: - New Bro plugins in aux/plugins:
- af_packet: Native AF_PACKET support. - af_packet: Native AF_PACKET support.
- kafka : Log writer interfacing to Kafka.
- myricom: Native Myricom SNF v3 support. - myricom: Native Myricom SNF v3 support.
- pf_ring: Native PF_RING support. - pf_ring: Native PF_RING support.
- postgresql: A PostgreSQL reader/writer.
- redis: An experimental log writer for Redis. - redis: An experimental log writer for Redis.
- tcprs: An TCP-level analyzer detecting retransmissions, reordering, and more. - tcprs: A TCP-level analyzer detecting retransmissions, reordering, and more.
Changed Functionality Changed Functionality
--------------------- ---------------------
- Some script-level identifier have changed their names: - Log changes:
snaplen -> Pcap::snaplen - Connections
precompile_pcap_filter() -> Pcap::precompile_pcap_filter()
install_pcap_filter() -> Pcap::install_pcap_filter()
pcap_error() -> Pcap::pcap_error()
The 'history' field gains two new flags: '^' indicates that
Bro heuristically flipped the direction of the connection.
't/T' indicates the first TCP payload retransmission from
originator or responder, respectively.
- Intelligence
New field 'matched' to indicate which indicator type(s) caused the hit.
- DNS
New 'rtt' field to indicate the round trip time between when a
request was sent and when a reply started.
- SMTP
New 'cc' field which includes the 'Cc' header from MIME
messages sent over SMTP.
Changes in 'mailfrom' and 'rcptto' fields to remove some
non-address cruft that will tend to be found. The main
example is the change from ``"<user@domain>"`` to
``"user@domain.com"``.
- HTTP
Removed 'filename' field (which was seldomly used).
New 'orig_filenames' and 'resp_filenames' fields which each
contain a vector of filenames seen in entities transferred.
- stats.log
The following fields have been added: active_tcp_conns,
active_udp_conns, active_icmp_conns, tcp_conns, udp_conns,
icmp_conns, timers, active_timers, files, active_files, dns_requests,
active_dns_requests, reassem_tcp_size, reassem_file_size,
reassem_frag_size, reassem_unknown_size.
The following fields have been renamed: lag -> pkt_lag.
The following fields have been removed: pkts_recv.
- The BrokerComm and BrokerStore namespaces were renamed to Broker.
The Broker "print()" function was renamed to Broker::send_print(), and
the "event()" function was renamed to Broker::send_event().
- The constant ``SSH::skip_processing_after_detection`` was removed. The
functionality was replaced by the new constant
``SSH::disable_analyzer_after_detection``.
- The ``net_stats()`` and ``resource_usage()`` functions have been
removed, and their functionality is now provided by the new execution
statistics functions (see above).
- Some script-level identifiers have changed their names:
- snaplen -> Pcap::snaplen
- precompile_pcap_filter() -> Pcap::precompile_pcap_filter()
- install_pcap_filter() -> Pcap::install_pcap_filter()
- pcap_error() -> Pcap::error()
- TCP analysis was changed to process connections without the initial
SYN packet. In the past, connections without a full handshake were
treated as partial, meaning that most application-layer analyzers
would refuse to inspect the payload. Now, Bro will consider these
connections as complete and all analyzers will process them normally.
- The ``policy/misc/capture-loss.bro`` script is now loaded by default.
- The traceroute detection script package ``policy/misc/detect-traceroute``
is no longer loaded by default.
- Changed BroControl functionality in aux/broctl:
- The networks.cfg file now contains private IP space 172.16.0.0/12
by default.
- Upon startup, if broctl can't get IP addresses from the "ifconfig"
command for any reason, then broctl will now also try to use the
"ip" command.
- BroControl will now automatically search the Bro plugin directory
for BroControl plugins (in addition to all the other places where
BroControl searches). This enables automatic loading of
BroControl plugins that are provided by a Bro plugin.
- Changed the default value of the StatusCmdShowAll option so that
the "broctl status" command runs faster. This also means that
there is no longer a "Peers" column in the status output by
default.
- Users can now specify a more granular log expiration interval. The
BroControl option LogExpireInterval can be set to an arbitrary
time interval instead of just an integer number of days. The time
interval is specified as an integer followed by a time unit:
"day", "hr", or "min". For backward compatibility, an integer
value without a time unit is still interpreted as a number of
days.
- Changed the text of crash report emails. Now crash reports tell
the user to forward the mail to the Bro team only when a backtrace
is included in the crash report. If there is no backtrace, then
the crash report includes instructions on how to get backtraces
included in future crash reports.
- There is a new option SitePolicyScripts that replaces SitePolicyStandalone
(the old option is still available, but will be removed in the next
release).
Removed Functionality
---------------------
- The app-stats scripts have been removed because they weren't
being maintained and they were becoming inaccurate (as a result, the
app_stats.log is also gone). They were also prone to needing more regular
updates as the internet changed and will likely be more relevant if
maintained externally.
- The event ack_above_hole() has been removed, as it was a subset
of content_gap() and led to plenty of noise.
- The command line options ``--analyze``, ``--set-seed``, and
``--md5-hashkey`` have been removed.
- The packaging scripts pkg/make-\*-packages are gone. They aren't
used anymore for the binary Bro packages that the project
distributes; haven't been supported in a while; and have
problems.
Deprecated Functionality Deprecated Functionality
------------------------ ------------------------
- The built-in functions decode_base64_custom() and - The built-in functions decode_base64_custom() and
encode_base64_custom() are no longer needed and will be removed encode_base64_custom() are no longer needed and will be removed
in the future. Their functionality is now provided directly by in the future. Their functionality is now provided directly by
decode_base64() and encode_base64(), which take an optional decode_base64() and encode_base64(), which take an optional
parameter to change the Base64 alphabet. parameter to change the Base64 alphabet.
Bro 2.4 Bro 2.4
======= =======
@ -105,11 +396,11 @@ New Functionality
sessions, supports v1, and introduces the intelligence type sessions, supports v1, and introduces the intelligence type
``Intel::PUBKEY_HASH`` and location ``SSH::IN_SERVER_HOST_KEY``. The ``Intel::PUBKEY_HASH`` and location ``SSH::IN_SERVER_HOST_KEY``. The
analayzer also generates a set of additional events analayzer also generates a set of additional events
(``ssh_auth_successful``, ``ssh_auth_failed``, ``ssh_capabilities``, (``ssh_auth_successful``, ``ssh_auth_failed``, ``ssh_auth_attempted``,
``ssh2_server_host_key``, ``ssh1_server_host_key``, ``ssh_auth_result``, ``ssh_capabilities``, ``ssh2_server_host_key``,
``ssh_encrypted_packet``, ``ssh2_dh_server_params``, ``ssh1_server_host_key``, ``ssh_encrypted_packet``,
``ssh2_gss_error``, ``ssh2_ecc_key``). See next section for ``ssh2_dh_server_params``, ``ssh2_gss_error``, ``ssh2_ecc_key``). See
incompatible SSH changes. next section for incompatible SSH changes.
- Bro's file analysis now supports reassembly of files that are not - Bro's file analysis now supports reassembly of files that are not
transferred/seen sequentially. The default file reassembly buffer transferred/seen sequentially. The default file reassembly buffer

View file

@ -1 +1 @@
2.4-217 2.5-51

@ -1 +1 @@
Subproject commit 214294c502d377bb7bf511eac8c43608e54c875a Subproject commit 0f1ecfa97236635fb93e013404e6b30d6c506ddd

@ -1 +1 @@
Subproject commit 4e0d2bff4b2c287f66186c3654ef784bb0748d11 Subproject commit b1e75f6a212250b1730a438f27fc778618b67ec3

@ -1 +1 @@
Subproject commit 959cc0a8181e7f4b07559a6aecca2a0d7d3d445c Subproject commit ed52e3414b31b05ec9abed627b4153c8e2243441

@ -1 +1 @@
Subproject commit 1299fab8f6e98c8b0b88d01c60bb6b21329e19e5 Subproject commit 73dbc79ac24cdfef07d8574a4da5d43056ba5fa5

@ -1 +1 @@
Subproject commit 9a2e8ec7b365bde282edc7301c7936eed6b4fbbb Subproject commit 23def70c44128d19138029615dd154359286e111

@ -1 +1 @@
Subproject commit 71a1e3efc437aa9f981be71affa1c4615e8d98a5 Subproject commit 9d5c7bcac9b04710931bc8a42b545f0691561b2f

@ -0,0 +1 @@
Subproject commit 9f3d6fce49cad3b45b5ddd0fe1f3c79186e1d2e7

@ -1 +1 @@
Subproject commit 35007df0974b566f75d7c82af5b4d5a022333d87 Subproject commit 2322840bcdbd618ae7bd24e22d874fb30ab89bbb

View file

@ -14,12 +14,18 @@
/* We are on a Linux system */ /* We are on a Linux system */
#cmakedefine HAVE_LINUX #cmakedefine HAVE_LINUX
/* We are on a Mac OS X (Darwin) system */
#cmakedefine HAVE_DARWIN
/* Define if you have the `mallinfo' function. */ /* Define if you have the `mallinfo' function. */
#cmakedefine HAVE_MALLINFO #cmakedefine HAVE_MALLINFO
/* Define if you have the <memory.h> header file. */ /* Define if you have the <memory.h> header file. */
#cmakedefine HAVE_MEMORY_H #cmakedefine HAVE_MEMORY_H
/* Define if you have the <netinet/ether.h> header file */
#cmakedefine HAVE_NETINET_ETHER_H
/* Define if you have the <netinet/if_ether.h> header file. */ /* Define if you have the <netinet/if_ether.h> header file. */
#cmakedefine HAVE_NETINET_IF_ETHER_H #cmakedefine HAVE_NETINET_IF_ETHER_H

63
bro-config.in Executable file
View file

@ -0,0 +1,63 @@
#!/bin/sh
version=@VERSION@
prefix=@CMAKE_INSTALL_PREFIX@
script_dir=@BRO_SCRIPT_INSTALL_PATH@
site_dir=@BRO_SCRIPT_INSTALL_PATH@/site
plugin_dir=@BRO_PLUGIN_INSTALL_PATH@
config_dir=@BRO_ETC_INSTALL_DIR@
python_dir=@PY_MOD_INSTALL_DIR@
bropath=@DEFAULT_BROPATH@
bro_dist=@BRO_DIST@
usage="\
Usage: bro-config [--version] [--prefix] [--script_dir] [--site_dir] [--plugin_dir] [--config_dir] [--python_dir] [--bropath] [--bro_dist]"
if [ $# -eq 0 ] ; then
echo "${usage}" 1>&2
exit 1
fi
while [ $# -ne 0 ]; do
case "$1" in
-*=*) optarg=`echo "$1" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
*) optarg= ;;
esac
case $1 in
--version)
echo $version
;;
--prefix)
echo $prefix
;;
--script_dir)
echo $script_dir
;;
--site_dir)
echo $site_dir
;;
--plugin_dir)
echo $plugin_dir
;;
--config_dir)
echo $config_dir
;;
--python_dir)
echo $python_dir
;;
--bropath)
echo $bropath
;;
--bro_dist)
echo $bro_dist
;;
*)
echo "${usage}" 1>&2
exit 1
;;
esac
shift
done
exit 0

2
cmake

@ -1 +1 @@
Subproject commit 843cdf6a91f06e5407bffbc79a343bff3cf4c81f Subproject commit d29fbf6152e54fbb536910af02a80874b1917311

28
configure vendored
View file

@ -41,7 +41,8 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--enable-perftools-debug use Google's perftools for debugging --enable-perftools-debug use Google's perftools for debugging
--enable-jemalloc link against jemalloc --enable-jemalloc link against jemalloc
--enable-ruby build ruby bindings for broccoli (deprecated) --enable-ruby build ruby bindings for broccoli (deprecated)
--disable-broker disable use of the Broker communication library --enable-broker enable use of the Broker communication library
(requires C++ Actor Framework)
--disable-broccoli don't build or install the Broccoli library --disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl --disable-broctl don't install Broctl
--disable-auxtools don't build or install auxiliary tools --disable-auxtools don't build or install auxiliary tools
@ -57,10 +58,10 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-flex=PATH path to flex executable --with-flex=PATH path to flex executable
--with-bison=PATH path to bison executable --with-bison=PATH path to bison executable
--with-python=PATH path to Python executable --with-python=PATH path to Python executable
--with-libcaf=PATH path to C++ Actor Framework installation
(a required Broker dependency)
Optional Packages in Non-Standard Locations: Optional Packages in Non-Standard Locations:
--with-caf=PATH path to C++ Actor Framework installation
(a required Broker dependency)
--with-geoip=PATH path to the libGeoIP install root --with-geoip=PATH path to the libGeoIP install root
--with-perftools=PATH path to Google Perftools install root --with-perftools=PATH path to Google Perftools install root
--with-jemalloc=PATH path to jemalloc install root --with-jemalloc=PATH path to jemalloc install root
@ -121,13 +122,12 @@ append_cache_entry BRO_ROOT_DIR PATH $prefix
append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry BROKER_PYTHON_HOME PATH $prefix
append_cache_entry BROKER_PYTHON_BINDINGS BOOL false append_cache_entry BROKER_PYTHON_BINDINGS BOOL false
append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
append_cache_entry ENABLE_JEMALLOC BOOL false append_cache_entry ENABLE_JEMALLOC BOOL false
append_cache_entry ENABLE_BROKER BOOL true append_cache_entry ENABLE_BROKER BOOL false
append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BinPAC_SKIP_INSTALL BOOL true
append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true
@ -161,10 +161,6 @@ while [ $# -ne 0 ]; do
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
append_cache_entry BRO_ROOT_DIR PATH $optarg append_cache_entry BRO_ROOT_DIR PATH $optarg
append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl
if [ -z "$user_disabled_broker" ]; then
append_cache_entry BROKER_PYTHON_HOME PATH $optarg
fi
;; ;;
--scriptdir=*) --scriptdir=*)
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg
@ -199,10 +195,10 @@ while [ $# -ne 0 ]; do
--enable-jemalloc) --enable-jemalloc)
append_cache_entry ENABLE_JEMALLOC BOOL true append_cache_entry ENABLE_JEMALLOC BOOL true
;; ;;
--enable-broker)
append_cache_entry ENABLE_BROKER BOOL true
;;
--disable-broker) --disable-broker)
append_cache_entry ENABLE_BROKER BOOL false
remove_cache_entry BROKER_PYTHON_HOME
user_disabled_broker="true"
;; ;;
--disable-broccoli) --disable-broccoli)
append_cache_entry INSTALL_BROCCOLI BOOL false append_cache_entry INSTALL_BROCCOLI BOOL false
@ -226,7 +222,7 @@ while [ $# -ne 0 ]; do
append_cache_entry DISABLE_RUBY_BINDINGS BOOL false append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
;; ;;
--with-openssl=*) --with-openssl=*)
append_cache_entry OpenSSL_ROOT_DIR PATH $optarg append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
;; ;;
--with-bind=*) --with-bind=*)
append_cache_entry BIND_ROOT_DIR PATH $optarg append_cache_entry BIND_ROOT_DIR PATH $optarg
@ -276,8 +272,12 @@ while [ $# -ne 0 ]; do
--with-swig=*) --with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg append_cache_entry SWIG_EXECUTABLE PATH $optarg
;; ;;
--with-caf=*)
append_cache_entry CAF_ROOT_DIR PATH $optarg
;;
--with-libcaf=*) --with-libcaf=*)
append_cache_entry LIBCAF_ROOT_DIR PATH $optarg echo "warning: --with-libcaf deprecated, use --with-caf instead"
append_cache_entry CAF_ROOT_DIR PATH $optarg
;; ;;
--with-rocksdb=*) --with-rocksdb=*)
append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg

View file

@ -1,5 +1,5 @@
This work is licensed under the Creative Commons This work is licensed under the Creative Commons
Attribution-NonCommercial 3.0 Unported License. To view a copy of this Attribution 4.0 International License. To view a copy of this
license, visit http://creativecommons.org/licenses/by-nc/3.0/ or send license, visit https://creativecommons.org/licenses/by/4.0/ or send
a letter to Creative Commons, 444 Castro Street, Suite 900, Mountain a letter to Creative Commons, 444 Castro Street, Suite 900, Mountain
View, California, 94041, USA. View, California, 94041, USA.

View file

@ -39,9 +39,11 @@ Manager
******* *******
The manager is a Bro process that has two primary jobs. It receives log The manager is a Bro process that has two primary jobs. It receives log
messages and notices from the rest of the nodes in the cluster using the Bro messages and notices from the rest of the nodes in the cluster using the Bro
communications protocol. The result is a single log instead of many communications protocol (note that if you are using a logger, then the
discrete logs that you have to combine in some manner with post-processing. logger receives all logs instead of the manager). The result
The manager also takes the opportunity to de-duplicate notices, and it has the is a single log instead of many discrete logs that you have to
combine in some manner with post-processing. The manager also takes
the opportunity to de-duplicate notices, and it has the
ability to do so since it's acting as the choke point for notices and how ability to do so since it's acting as the choke point for notices and how
notices might be processed into actions (e.g., emailing, paging, or blocking). notices might be processed into actions (e.g., emailing, paging, or blocking).
@ -51,6 +53,20 @@ connections to the rest of the cluster. Once the workers are started and
connect to the manager, logs and notices will start arriving to the manager connect to the manager, logs and notices will start arriving to the manager
process from the workers. process from the workers.
Logger
******
The logger is an optional Bro process that receives log messages from the
rest of the nodes in the cluster using the Bro communications protocol.
The purpose of having a logger receive logs instead of the manager is
to reduce the load on the manager. If no logger is needed, then the
manager will receive logs instead.
The logger process is started first by BroControl and it only opens its
designated port and waits for connections, it doesn't initiate any
connections to the rest of the cluster. Once the rest of the cluster is
started and connect to the logger, logs will start arriving to the logger
process.
Proxy Proxy
***** *****
The proxy is a Bro process that manages synchronized state. Variables can The proxy is a Bro process that manages synchronized state. Variables can
@ -96,13 +112,13 @@ logging is done remotely to the manager, and normally very little is written
to disk. to disk.
The rule of thumb we have followed recently is to allocate approximately 1 The rule of thumb we have followed recently is to allocate approximately 1
core for every 80Mbps of traffic that is being analyzed. However, this core for every 250Mbps of traffic that is being analyzed. However, this
estimate could be extremely traffic mix-specific. It has generally worked estimate could be extremely traffic mix-specific. It has generally worked
for mixed traffic with many users and servers. For example, if your traffic for mixed traffic with many users and servers. For example, if your traffic
peaks around 2Gbps (combined) and you want to handle traffic at peak load, peaks around 2Gbps (combined) and you want to handle traffic at peak load,
you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps you may want to have 8 cores available (2048 / 250 == 8.2). If the 250Mbps
estimate works for your traffic, this could be handled by 3 physical hosts estimate works for your traffic, this could be handled by 2 physical hosts
dedicated to being workers with each one containing dual 6-core processors. dedicated to being workers with each one containing a quad-core processor.
Once a flow-based load balancer is put into place this model is extremely Once a flow-based load balancer is put into place this model is extremely
easy to scale. It is recommended that you estimate the amount of easy to scale. It is recommended that you estimate the amount of

View file

@ -0,0 +1 @@
../../../../aux/plugins/af_packet/README

View file

@ -1 +0,0 @@
../../../../aux/plugins/dataseries/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/kafka/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/myricom/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/postgresql/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/tcprs/README

View file

@ -66,7 +66,7 @@ master_doc = 'index'
# General information about the project. # General information about the project.
project = u'Bro' project = u'Bro'
copyright = u'2013, The Bro Project' copyright = u'2016, The Bro Project'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the

View file

@ -44,7 +44,10 @@ workers can consume a lot of CPU resources. The maximum recommended
number of workers to run on a machine should be one or two less than number of workers to run on a machine should be one or two less than
the number of CPU cores available on that machine. Using a load-balancing the number of CPU cores available on that machine. Using a load-balancing
method (such as PF_RING) along with CPU pinning can decrease the load on method (such as PF_RING) along with CPU pinning can decrease the load on
the worker machines. the worker machines. Also, in order to reduce the load on the manager
process, it is recommended to have a logger in your configuration. If a
logger is defined in your cluster configuration, then it will receive logs
instead of the manager process.
Basic Cluster Configuration Basic Cluster Configuration
@ -61,13 +64,17 @@ a Bro cluster (do this as the Bro user on the manager host only):
:doc:`BroControl <../components/broctl/README>` documentation. :doc:`BroControl <../components/broctl/README>` documentation.
- Edit the BroControl node configuration file, ``<prefix>/etc/node.cfg`` - Edit the BroControl node configuration file, ``<prefix>/etc/node.cfg``
to define where manager, proxies, and workers are to run. For a cluster to define where logger, manager, proxies, and workers are to run. For a
configuration, you must comment-out (or remove) the standalone node cluster configuration, you must comment-out (or remove) the standalone node
in that file, and either uncomment or add node entries for each node in that file, and either uncomment or add node entries for each node
in your cluster (manager, proxy, and workers). For example, if you wanted in your cluster (logger, manager, proxy, and workers). For example, if you
to run four Bro nodes (two workers, one proxy, and a manager) on a cluster wanted to run five Bro nodes (two workers, one proxy, a logger, and a
consisting of three machines, your cluster configuration would look like manager) on a cluster consisting of three machines, your cluster
this:: configuration would look like this::
[logger]
type=logger
host=10.0.0.10
[manager] [manager]
type=manager type=manager
@ -94,28 +101,13 @@ a Bro cluster (do this as the Bro user on the manager host only):
file lists all of the networks which the cluster should consider as local file lists all of the networks which the cluster should consider as local
to the monitored environment. to the monitored environment.
- Install workers and proxies using BroControl:: - Install Bro on all machines in the cluster using BroControl::
> broctl install > broctl install
- Some tasks need to be run on a regular basis. On the manager node, - See the :doc:`BroControl <../components/broctl/README>` documentation
insert a line like this into the crontab of the user running the for information on setting up a cron job on the manager host that can
cluster:: monitor the cluster.
0-59/5 * * * * <prefix>/bin/broctl cron
(Note: if you are editing the system crontab instead of a user's own
crontab, then you need to also specify the user which the command
will be run as. The username must be placed after the time fields
and before the broctl command.)
Note that on some systems (FreeBSD in particular), the default PATH
for cron jobs does not include the directories where bash and python
are installed (the symptoms of this problem would be that "broctl cron"
works when run directly by the user, but does not work from a cron job).
To solve this problem, you would either need to create symlinks
to bash and python in a directory that is in the default PATH for
cron jobs, or specify a new PATH in the crontab.
PF_RING Cluster Configuration PF_RING Cluster Configuration
@ -174,7 +166,7 @@ Installing PF_RING
5. Configure BroControl to use PF_RING (explained below). 5. Configure BroControl to use PF_RING (explained below).
6. Run "broctl install" on the manager. This command will install Bro and 6. Run "broctl install" on the manager. This command will install Bro and
all required scripts to the other machines in your cluster. required scripts to all machines in your cluster.
Using PF_RING Using PF_RING
^^^^^^^^^^^^^ ^^^^^^^^^^^^^

View file

@ -14,7 +14,7 @@ from sphinx.locale import l_, _
from sphinx.directives import ObjectDescription from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode from sphinx.util.nodes import make_refnode
import string from sphinx import version_info
from docutils import nodes from docutils import nodes
from docutils.parsers.rst import Directive from docutils.parsers.rst import Directive
@ -29,9 +29,17 @@ class SeeDirective(Directive):
def run(self): def run(self):
n = see('') n = see('')
n.refs = string.split(string.join(self.content)) n.refs = " ".join(self.content).split()
return [n] return [n]
# Wrapper for creating a tuple for index nodes, staying backwards
# compatible to Sphinx < 1.4:
def make_index_tuple(indextype, indexentry, targetname, targetname2):
if version_info >= (1, 4, 0, '', 0):
return (indextype, indexentry, targetname, targetname2, None)
else:
return (indextype, indexentry, targetname, targetname2)
def process_see_nodes(app, doctree, fromdocname): def process_see_nodes(app, doctree, fromdocname):
for node in doctree.traverse(see): for node in doctree.traverse(see):
content = [] content = []
@ -95,8 +103,9 @@ class BroGeneric(ObjectDescription):
indextext = self.get_index_text(self.objtype, name) indextext = self.get_index_text(self.objtype, name)
if indextext: if indextext:
self.indexnode['entries'].append(('single', indextext, self.indexnode['entries'].append(make_index_tuple('single',
targetname, targetname)) indextext, targetname,
targetname))
def get_index_text(self, objectname, name): def get_index_text(self, objectname, name):
return _('%s (%s)') % (name, self.objtype) return _('%s (%s)') % (name, self.objtype)
@ -120,9 +129,9 @@ class BroNamespace(BroGeneric):
self.update_type_map(name) self.update_type_map(name)
indextext = self.get_index_text(self.objtype, name) indextext = self.get_index_text(self.objtype, name)
self.indexnode['entries'].append(('single', indextext, self.indexnode['entries'].append(make_index_tuple('single', indextext,
targetname, targetname)) targetname, targetname))
self.indexnode['entries'].append(('single', self.indexnode['entries'].append(make_index_tuple('single',
"namespaces; %s" % (sig), "namespaces; %s" % (sig),
targetname, targetname)) targetname, targetname))
@ -148,7 +157,7 @@ class BroEnum(BroGeneric):
self.update_type_map(name) self.update_type_map(name)
indextext = self.get_index_text(self.objtype, name) indextext = self.get_index_text(self.objtype, name)
#self.indexnode['entries'].append(('single', indextext, #self.indexnode['entries'].append(make_index_tuple('single', indextext,
# targetname, targetname)) # targetname, targetname))
m = sig.split() m = sig.split()
@ -162,7 +171,7 @@ class BroEnum(BroGeneric):
self.env.domaindata['bro']['notices'] = [] self.env.domaindata['bro']['notices'] = []
self.env.domaindata['bro']['notices'].append( self.env.domaindata['bro']['notices'].append(
(m[0], self.env.docname, targetname)) (m[0], self.env.docname, targetname))
self.indexnode['entries'].append(('single', self.indexnode['entries'].append(make_index_tuple('single',
"%s (enum values); %s" % (m[1], m[0]), "%s (enum values); %s" % (m[1], m[0]),
targetname, targetname)) targetname, targetname))
@ -204,7 +213,7 @@ class BroNotices(Index):
entries = content.setdefault(modname, []) entries = content.setdefault(modname, [])
entries.append([n[0], 0, n[1], n[2], '', '', '']) entries.append([n[0], 0, n[1], n[2], '', '', ''])
content = sorted(content.iteritems()) content = sorted(content.items())
return content, False return content, False
@ -280,5 +289,5 @@ class BroDomain(Domain):
'unknown target for ":bro:%s:`%s`"' % (typ, target)) 'unknown target for ":bro:%s:`%s`"' % (typ, target))
def get_objects(self): def get_objects(self):
for (typ, name), docname in self.data['objects'].iteritems(): for (typ, name), docname in self.data['objects'].items():
yield name, name, typ, docname, typ + '-' + name, 1 yield name, name, typ, docname, typ + '-' + name, 1

View file

@ -17,20 +17,20 @@ Connecting to Peers
=================== ===================
Communication via Broker must first be turned on via Communication via Broker must first be turned on via
:bro:see:`BrokerComm::enable`. :bro:see:`Broker::enable`.
Bro can accept incoming connections by calling :bro:see:`BrokerComm::listen` Bro can accept incoming connections by calling :bro:see:`Broker::listen`
and then monitor connection status updates via the and then monitor connection status updates via the
:bro:see:`BrokerComm::incoming_connection_established` and :bro:see:`Broker::incoming_connection_established` and
:bro:see:`BrokerComm::incoming_connection_broken` events. :bro:see:`Broker::incoming_connection_broken` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
Bro can initiate outgoing connections by calling :bro:see:`BrokerComm::connect` Bro can initiate outgoing connections by calling :bro:see:`Broker::connect`
and then monitor connection status updates via the and then monitor connection status updates via the
:bro:see:`BrokerComm::outgoing_connection_established`, :bro:see:`Broker::outgoing_connection_established`,
:bro:see:`BrokerComm::outgoing_connection_broken`, and :bro:see:`Broker::outgoing_connection_broken`, and
:bro:see:`BrokerComm::outgoing_connection_incompatible` events. :bro:see:`Broker::outgoing_connection_incompatible` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
@ -38,14 +38,14 @@ Remote Printing
=============== ===============
To receive remote print messages, first use the To receive remote print messages, first use the
:bro:see:`BrokerComm::subscribe_to_prints` function to advertise to peers a :bro:see:`Broker::subscribe_to_prints` function to advertise to peers a
topic prefix of interest and then create an event handler for topic prefix of interest and then create an event handler for
:bro:see:`BrokerComm::print_handler` to handle any print messages that are :bro:see:`Broker::print_handler` to handle any print messages that are
received. received.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro
To send remote print messages, just call :bro:see:`BrokerComm::print`. To send remote print messages, just call :bro:see:`Broker::send_print`.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro
@ -69,14 +69,14 @@ Remote Events
============= =============
Receiving remote events is similar to remote prints. Just use the Receiving remote events is similar to remote prints. Just use the
:bro:see:`BrokerComm::subscribe_to_events` function and possibly define any :bro:see:`Broker::subscribe_to_events` function and possibly define any
new events along with handlers that peers may want to send. new events along with handlers that peers may want to send.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
There are two different ways to send events. The first is to call the There are two different ways to send events. The first is to call the
:bro:see:`BrokerComm::event` function directly. The second option is to call :bro:see:`Broker::send_event` function directly. The second option is to call
the :bro:see:`BrokerComm::auto_event` function where you specify a the :bro:see:`Broker::auto_event` function where you specify a
particular event that will be automatically sent to peers whenever the particular event that will be automatically sent to peers whenever the
event is called locally via the normal event invocation syntax. event is called locally via the normal event invocation syntax.
@ -104,14 +104,14 @@ Remote Logging
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
Use the :bro:see:`BrokerComm::subscribe_to_logs` function to advertise interest Use the :bro:see:`Broker::subscribe_to_logs` function to advertise interest
in logs written by peers. The topic names that Bro uses are implicitly of the in logs written by peers. The topic names that Bro uses are implicitly of the
form "bro/log/<stream-name>". form "bro/log/<stream-name>".
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
To send remote logs either redef :bro:see:`Log::enable_remote_logging` or To send remote logs either redef :bro:see:`Log::enable_remote_logging` or
use the :bro:see:`BrokerComm::enable_remote_logs` function. The former use the :bro:see:`Broker::enable_remote_logs` function. The former
allows any log stream to be sent to peers while the latter enables remote allows any log stream to be sent to peers while the latter enables remote
logging for particular streams. logging for particular streams.
@ -137,24 +137,24 @@ Tuning Access Control
By default, endpoints do not restrict the message topics that it sends By default, endpoints do not restrict the message topics that it sends
to peers and do not restrict what message topics and data store to peers and do not restrict what message topics and data store
identifiers get advertised to peers. These are the default identifiers get advertised to peers. These are the default
:bro:see:`BrokerComm::EndpointFlags` supplied to :bro:see:`BrokerComm::enable`. :bro:see:`Broker::EndpointFlags` supplied to :bro:see:`Broker::enable`.
If not using the ``auto_publish`` flag, one can use the If not using the ``auto_publish`` flag, one can use the
:bro:see:`BrokerComm::publish_topic` and :bro:see:`BrokerComm::unpublish_topic` :bro:see:`Broker::publish_topic` and :bro:see:`Broker::unpublish_topic`
functions to manipulate the set of message topics (must match exactly) functions to manipulate the set of message topics (must match exactly)
that are allowed to be sent to peer endpoints. These settings take that are allowed to be sent to peer endpoints. These settings take
precedence over the per-message ``peers`` flag supplied to functions precedence over the per-message ``peers`` flag supplied to functions
that take a :bro:see:`BrokerComm::SendFlags` such as :bro:see:`BrokerComm::print`, that take a :bro:see:`Broker::SendFlags` such as :bro:see:`Broker::send_print`,
:bro:see:`BrokerComm::event`, :bro:see:`BrokerComm::auto_event` or :bro:see:`Broker::send_event`, :bro:see:`Broker::auto_event` or
:bro:see:`BrokerComm::enable_remote_logs`. :bro:see:`Broker::enable_remote_logs`.
If not using the ``auto_advertise`` flag, one can use the If not using the ``auto_advertise`` flag, one can use the
:bro:see:`BrokerComm::advertise_topic` and :bro:see:`Broker::advertise_topic` and
:bro:see:`BrokerComm::unadvertise_topic` functions :bro:see:`Broker::unadvertise_topic` functions
to manipulate the set of topic prefixes that are allowed to be to manipulate the set of topic prefixes that are allowed to be
advertised to peers. If an endpoint does not advertise a topic prefix, then advertised to peers. If an endpoint does not advertise a topic prefix, then
the only way peers can send messages to it is via the ``unsolicited`` the only way peers can send messages to it is via the ``unsolicited``
flag of :bro:see:`BrokerComm::SendFlags` and choosing a topic with a matching flag of :bro:see:`Broker::SendFlags` and choosing a topic with a matching
prefix (i.e. full topic may be longer than receivers prefix, just the prefix (i.e. full topic may be longer than receivers prefix, just the
prefix needs to match). prefix needs to match).
@ -192,8 +192,8 @@ last modification time.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro
In the above example, if a local copy of the store contents isn't In the above example, if a local copy of the store contents isn't
needed, just replace the :bro:see:`BrokerStore::create_clone` call with needed, just replace the :bro:see:`Broker::create_clone` call with
:bro:see:`BrokerStore::create_frontend`. Queries will then be made against :bro:see:`Broker::create_frontend`. Queries will then be made against
the remote master store instead of the local clone. the remote master store instead of the local clone.
Note that all data store queries must be made within Bro's asynchronous Note that all data store queries must be made within Bro's asynchronous

View file

@ -1,18 +1,18 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
terminate(); terminate();
} }

View file

@ -1,20 +1,20 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event BrokerComm::incoming_connection_broken(peer_name: string) event Broker::incoming_connection_broken(peer_name: string)
{ {
print "BrokerComm::incoming_connection_broken", peer_name; print "Broker::incoming_connection_broken", peer_name;
terminate(); terminate();
} }

View file

@ -1,30 +1,30 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
global my_event: event(msg: string, c: count); global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count); global my_auto_event: event(msg: string, c: count);
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
BrokerComm::auto_event("bro/event/my_auto_event", my_auto_event); Broker::auto_event("bro/event/my_auto_event", my_auto_event);
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "hi", 0)); Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "hi", 0));
event my_auto_event("stuff", 88); event my_auto_event("stuff", 88);
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "...", 1)); Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "...", 1));
event my_auto_event("more stuff", 51); event my_auto_event("more stuff", 51);
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "bye", 2)); Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "bye", 2));
} }
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();

View file

@ -1,20 +1,20 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
global msg_count = 0; global msg_count = 0;
global my_event: event(msg: string, c: count); global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count); global my_auto_event: event(msg: string, c: count);
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_events("bro/event/"); Broker::subscribe_to_events("bro/event/");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event my_event(msg: string, c: count) event my_event(msg: string, c: count)

View file

@ -2,16 +2,16 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
redef Log::enable_local_logging = F; redef Log::enable_local_logging = F;
redef Log::enable_remote_logging = F; redef Log::enable_remote_logging = F;
global n = 0; global n = 0;
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::enable_remote_logs(Test::LOG); Broker::enable_remote_logs(Test::LOG);
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event do_write() event do_write()
@ -24,16 +24,16 @@ event do_write()
event do_write(); event do_write();
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
event do_write(); event do_write();
} }
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();

View file

@ -2,18 +2,18 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_logs("bro/log/Test::LOG"); Broker::subscribe_to_logs("bro/log/Test::LOG");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event Test::log_test(rec: Test::Info) event Test::log_test(rec: Test::Info)

View file

@ -1,25 +1,25 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
BrokerComm::print("bro/print/hi", "hello"); Broker::send_print("bro/print/hi", "hello");
BrokerComm::print("bro/print/stuff", "..."); Broker::send_print("bro/print/stuff", "...");
BrokerComm::print("bro/print/bye", "goodbye"); Broker::send_print("bro/print/bye", "goodbye");
} }
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();

View file

@ -1,21 +1,21 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
global msg_count = 0; global msg_count = 0;
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_prints("bro/print/"); Broker::subscribe_to_prints("bro/print/");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event BrokerComm::print_handler(msg: string) event Broker::print_handler(msg: string)
{ {
++msg_count; ++msg_count;
print "got print message", msg; print "got print message", msg;

View file

@ -1,42 +1,42 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
global h: opaque of BrokerStore::Handle; global h: opaque of Broker::Handle;
function dv(d: BrokerComm::Data): BrokerComm::DataVector function dv(d: Broker::Data): Broker::DataVector
{ {
local rval: BrokerComm::DataVector; local rval: Broker::DataVector;
rval[0] = d; rval[0] = d;
return rval; return rval;
} }
global ready: event(); global ready: event();
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
local myset: set[string] = {"a", "b", "c"}; local myset: set[string] = {"a", "b", "c"};
local myvec: vector of string = {"alpha", "beta", "gamma"}; local myvec: vector of string = {"alpha", "beta", "gamma"};
h = BrokerStore::create_master("mystore"); h = Broker::create_master("mystore");
BrokerStore::insert(h, BrokerComm::data("one"), BrokerComm::data(110)); Broker::insert(h, Broker::data("one"), Broker::data(110));
BrokerStore::insert(h, BrokerComm::data("two"), BrokerComm::data(223)); Broker::insert(h, Broker::data("two"), Broker::data(223));
BrokerStore::insert(h, BrokerComm::data("myset"), BrokerComm::data(myset)); Broker::insert(h, Broker::data("myset"), Broker::data(myset));
BrokerStore::insert(h, BrokerComm::data("myvec"), BrokerComm::data(myvec)); Broker::insert(h, Broker::data("myvec"), Broker::data(myvec));
BrokerStore::increment(h, BrokerComm::data("one")); Broker::increment(h, Broker::data("one"));
BrokerStore::decrement(h, BrokerComm::data("two")); Broker::decrement(h, Broker::data("two"));
BrokerStore::add_to_set(h, BrokerComm::data("myset"), BrokerComm::data("d")); Broker::add_to_set(h, Broker::data("myset"), Broker::data("d"));
BrokerStore::remove_from_set(h, BrokerComm::data("myset"), BrokerComm::data("b")); Broker::remove_from_set(h, Broker::data("myset"), Broker::data("b"));
BrokerStore::push_left(h, BrokerComm::data("myvec"), dv(BrokerComm::data("delta"))); Broker::push_left(h, Broker::data("myvec"), dv(Broker::data("delta")));
BrokerStore::push_right(h, BrokerComm::data("myvec"), dv(BrokerComm::data("omega"))); Broker::push_right(h, Broker::data("myvec"), dv(Broker::data("omega")));
when ( local res = BrokerStore::size(h) ) when ( local res = Broker::size(h) )
{ {
print "master size", res; print "master size", res;
event ready(); event ready();
@ -47,7 +47,7 @@ event BrokerComm::outgoing_connection_established(peer_address: string,
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1secs); Broker::connect("127.0.0.1", broker_port, 1secs);
BrokerComm::auto_event("bro/event/ready", ready); Broker::auto_event("bro/event/ready", ready);
} }

View file

@ -1,13 +1,13 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
global h: opaque of BrokerStore::Handle; global h: opaque of Broker::Handle;
global expected_key_count = 4; global expected_key_count = 4;
global key_count = 0; global key_count = 0;
function do_lookup(key: string) function do_lookup(key: string)
{ {
when ( local res = BrokerStore::lookup(h, BrokerComm::data(key)) ) when ( local res = Broker::lookup(h, Broker::data(key)) )
{ {
++key_count; ++key_count;
print "lookup", key, res; print "lookup", key, res;
@ -21,15 +21,15 @@ function do_lookup(key: string)
event ready() event ready()
{ {
h = BrokerStore::create_clone("mystore"); h = Broker::create_clone("mystore");
when ( local res = BrokerStore::keys(h) ) when ( local res = Broker::keys(h) )
{ {
print "clone keys", res; print "clone keys", res;
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 0))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 0)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 1))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 1)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 2))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 2)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 3))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 3)));
} }
timeout 10sec timeout 10sec
{ print "timeout"; } { print "timeout"; }
@ -37,7 +37,7 @@ event ready()
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_events("bro/event/ready"); Broker::subscribe_to_events("bro/event/ready");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }

View file

@ -13,6 +13,6 @@ export {
event bro_init() &priority=5 event bro_init() &priority=5
{ {
BrokerComm::enable(); Broker::enable();
Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]); Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]);
} }

View file

@ -11,6 +11,7 @@ Frameworks
input input
intel intel
logging logging
netcontrol
notice notice
signatures signatures
sumstats sumstats

View file

@ -7,7 +7,7 @@ Input Framework
.. rst-class:: opening .. rst-class:: opening
Bro now features a flexible input framework that allows users Bro features a flexible input framework that allows users
to import data into Bro. Data is either read into Bro tables or to import data into Bro. Data is either read into Bro tables or
converted to events which can then be handled by scripts. converted to events which can then be handled by scripts.
This document gives an overview of how to use the input framework This document gives an overview of how to use the input framework

View file

@ -537,6 +537,5 @@ Additional writers are available as external plugins:
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
../components/bro-plugins/dataseries/README ../components/bro-plugins/README
../components/bro-plugins/elasticsearch/README

View file

@ -0,0 +1,10 @@
event NetControl::init()
{
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
}
event connection_established(c: connection)
{
NetControl::drop_connection(c$id, 20 secs);
}

View file

@ -0,0 +1,10 @@
event NetControl::init()
{
local skeleton_plugin = NetControl::create_skeleton("");
NetControl::activate(skeleton_plugin, 0);
}
event connection_established(c: connection)
{
NetControl::drop_connection(c$id, 20 secs);
}

View file

@ -0,0 +1,16 @@
@load protocols/ssh/detect-bruteforcing
redef SSH::password_guesses_limit=10;
event NetControl::init()
{
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
}
hook Notice::policy(n: Notice::Info)
{
if ( n$note == SSH::Password_Guessing )
NetControl::drop_address(n$src, 60min);
}

View file

@ -0,0 +1,16 @@
@load protocols/ssh/detect-bruteforcing
redef SSH::password_guesses_limit=10;
event NetControl::init()
{
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
}
hook Notice::policy(n: Notice::Info)
{
if ( n$note == SSH::Password_Guessing )
add n$actions[Notice::ACTION_DROP];
}

View file

@ -0,0 +1,26 @@
function our_drop_connection(c: conn_id, t: interval)
{
# As a first step, create the NetControl::Entity that we want to block
local e = NetControl::Entity($ty=NetControl::CONNECTION, $conn=c);
# Then, use the entity to create the rule to drop the entity in the forward path
local r = NetControl::Rule($ty=NetControl::DROP,
$target=NetControl::FORWARD, $entity=e, $expire=t);
# Add the rule
local id = NetControl::add_rule(r);
if ( id == "" )
print "Error while dropping";
}
event NetControl::init()
{
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
}
event connection_established(c: connection)
{
our_drop_connection(c$id, 20 secs);
}

View file

@ -0,0 +1,22 @@
hook NetControl::rule_policy(r: NetControl::Rule)
{
if ( r$ty == NetControl::DROP &&
r$entity$ty == NetControl::CONNECTION &&
r$entity$conn$orig_h in 192.168.0.0/16 )
{
print "Ignored connection from", r$entity$conn$orig_h;
break;
}
}
event NetControl::init()
{
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
}
event connection_established(c: connection)
{
NetControl::drop_connection(c$id, 20 secs);
}

View file

@ -0,0 +1,17 @@
event NetControl::init()
{
local netcontrol_debug = NetControl::create_debug(T);
NetControl::activate(netcontrol_debug, 0);
}
event connection_established(c: connection)
{
if ( |NetControl::find_rules_addr(c$id$orig_h)| > 0 )
{
print "Rule already exists";
return;
}
NetControl::drop_connection(c$id, 20 secs);
print "Rule added";
}

View file

@ -0,0 +1,10 @@
event NetControl::init()
{
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
}
event connection_established(c: connection)
{
NetControl::drop_address_catch_release(c$id$orig_h);
}

View file

@ -0,0 +1,29 @@
function our_openflow_check(p: NetControl::PluginState, r: NetControl::Rule): bool
{
if ( r$ty == NetControl::DROP &&
r$entity$ty == NetControl::ADDRESS &&
subnet_width(r$entity$ip) == 32 &&
subnet_to_addr(r$entity$ip) in 192.168.17.0/24 )
return F;
return T;
}
event NetControl::init()
{
# Add debug plugin with low priority
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
# Instantiate OpenFlow debug plugin with higher priority
local of_controller = OpenFlow::log_new(42);
local netcontrol_of = NetControl::create_openflow(of_controller, [$check_pred=our_openflow_check]);
NetControl::activate(netcontrol_of, 10);
}
event NetControl::init_done()
{
NetControl::drop_address(10.0.0.1, 1min);
NetControl::drop_address(192.168.17.2, 1min);
NetControl::drop_address(192.168.18.2, 1min);
}

View file

@ -0,0 +1,39 @@
module NetControl;
export {
## Instantiates the plugin.
global create_skeleton: function(argument: string) : PluginState;
}
function skeleton_name(p: PluginState) : string
{
return "NetControl skeleton plugin";
}
function skeleton_add_rule_fun(p: PluginState, r: Rule) : bool
{
print "add", r;
event NetControl::rule_added(r, p);
return T;
}
function skeleton_remove_rule_fun(p: PluginState, r: Rule, reason: string &default="") : bool
{
print "remove", r;
event NetControl::rule_removed(r, p);
return T;
}
global skeleton_plugin = Plugin(
$name = skeleton_name,
$can_expire = F,
$add_rule = skeleton_add_rule_fun,
$remove_rule = skeleton_remove_rule_fun
);
function create_skeleton(argument: string) : PluginState
{
local p = PluginState($plugin=skeleton_plugin);
return p;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

View file

@ -0,0 +1,629 @@
.. _framework-netcontrol:
====================
NetControl Framework
====================
.. rst-class:: opening
Bro can connect with network devices like, for example, switches
or soft- and hardware firewalls using the NetControl framework. The
NetControl framework provides a flexible, unified interface for active
response and hides the complexity of heterogeneous network equipment
behind a simple task-oriented API, which is easily usable via Bro
scripts. This document gives an overview of how to use the NetControl
framework in different scenarios; to get a better understanding of how
it can be used in practice, it might be worthwhile to take a look at
the unit tests.
.. contents::
NetControl Architecture
=======================
.. figure:: netcontrol-architecture.png
:width: 600
:align: center
:alt: NetControl framework architecture
:target: ../_images/netcontrol-architecture.png
NetControl architecture (click to enlarge).
The basic architecture of the NetControl framework is shown in the figure above.
Conceptually, the NetControl framework sits between the user provided scripts
(which use the Bro event engine) and the network device (which can either be a
hardware or software device), that is used to implement the commands.
The NetControl framework supports a number of high-level calls, like the
:bro:see:`NetControl::drop_address` function, or a lower level rule
syntax. After a rule has been added to the NetControl framework, NetControl
sends the rule to one or several of its *backends*. Each backend is responsible
to communicate with a single hard- or software device. The NetControl framework
tracks rules throughout their entire lifecycle and reports the status (like
success, failure and timeouts) back to the user scripts.
The backends are implemented as Bro scripts using a plugin based API; an example
for this is :doc:`/scripts/base/frameworks/netcontrol/plugins/broker.bro`. This
document will show how to write plugins in
:ref:`framework-netcontrol-plugins`.
NetControl API
==============
High-level NetControl API
-------------------------
In this section, we will introduce the high level NetControl API. As mentioned
above, NetControl uses *backends* to communicate with the external devices that
will implement the rules. You will need at least one active backend before you
can use NetControl. For our examples, we will just use the debug plugin to
create a backend. This plugin outputs all actions that are taken to the standard
output.
Backends should be initialized in the :bro:see:`NetControl::init` event, calling
the :bro:see:`NetControl::activate` function after the plugin instance has been
initialized. The debug plugin can be initialized as follows:
.. code:: bro
event NetControl::init()
{
local debug_plugin = NetControl::create_debug(T);
NetControl::activate(debug_plugin, 0);
}
After at least one backend has been added to the NetControl framework, the
framework can be used and will send added rules to the added backend.
The NetControl framework contains several high level functions that allow users
to drop connections of certain addresses and networks, shunt network traffic,
etc. The following table shows and describes all of the currently available
high-level functions.
.. list-table::
:widths: 32 40
:header-rows: 1
* - Function
- Description
* - :bro:see:`NetControl::drop_address`
- Calling this function causes NetControl to block all packets involving
an IP address from being forwarded.
* - :bro:see:`NetControl::drop_connection`
- Calling this function stops all packets of a specific connection
(identified by its 5-tuple) from being forwarded.
* - :bro:see:`NetControl::drop_address_catch_release`
- Calling this function causes all packets of a specific source IP to be
blocked. This function uses catch-and-release functionality and the IP
address is only dropped for a short amount of time to conserve rule
space in the network hardware. It is immediately re-dropped when it is
seen again in traffic. See :ref:`framework-netcontrol-catchrelease` for
more information.
* - :bro:see:`NetControl::shunt_flow`
- Calling this function causes NetControl to stop forwarding a
uni-directional flow of packets to Bro. This allows Bro to conserve
resources by shunting flows that have been identified as being benign.
* - :bro:see:`NetControl::redirect_flow`
- Calling this function causes NetControl to redirect a uni-directional
flow to another port of the networking hardware.
* - :bro:see:`NetControl::quarantine_host`
- Calling this function allows Bro to quarantine a host by sending DNS
traffic to a host with a special DNS server, which resolves all queries
as pointing to itself. The quarantined host is only allowed between the
special server, which will serve a warning message detailing the next
steps for the user.
* - :bro:see:`NetControl::whitelist_address`
- Calling this function causes NetControl to push a whitelist entry for an
IP address to the networking hardware.
* - :bro:see:`NetControl::whitelist_subnet`
- Calling this function causes NetControl to push a whitelist entry for a
subnet to the networking hardware.
After adding a backend, all of these functions can immediately be used and will
start sending the rules to the added backend(s). To give a very simple example,
the following script will simply block the traffic of all connections that it
sees being established:
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-1-drop-with-debug.bro
Running this script on a file containing one connection will cause the debug
plugin to print one line to the standard output, which contains information
about the rule that was added. It will also cause creation of `netcontrol.log`,
which contains information about all actions that are taken by NetControl:
.. btest:: netcontrol-1-drop-with-debug.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/tls/ecdhe.pcap ${DOC_ROOT}/frameworks/netcontrol-1-drop-with-debug.bro
@TEST-EXEC: btest-rst-cmd cat netcontrol.log
In our case, `netcontrol.log` contains several :bro:see:`NetControl::MESSAGE`
entries, which show that the debug plugin has been initialized and added.
Afterwards, there are two :bro:see:`NetControl::RULE` entries; the first shows
that the addition of a rule has been requested (state is
:bro:see:`NetControl::REQUESTED`). The following line shows that the rule was
successfully added (the state is :bro:see:`NetControl::SUCCEEDED`). The
remainder of the log line gives more information about the added rule, which in
our case applies to a specific 5-tuple.
In addition to the netcontrol.log, the drop commands also create a second,
additional log called `netcontrol_drop.log`. This log file is much more succinct and
only contains information that is specific to drops that are enacted by
NetControl:
.. btest:: netcontrol-1-drop-with-debug.bro
@TEST-EXEC: btest-rst-cmd cat netcontrol_drop.log
While this example of blocking all connections is usually not very useful, the
high-level API gives an easy way to take action, for example when a host is
identified doing some harmful activity. To give a more realistic example, the
following code automatically blocks a recognized SSH guesser:
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-2-ssh-guesser.bro
.. btest:: netcontrol-2-ssh-guesser.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/ssh/sshguess.pcap ${DOC_ROOT}/frameworks/netcontrol-2-ssh-guesser.bro
@TEST-EXEC: btest-rst-cmd cat netcontrol.log
Note that in this case, instead of calling NetControl directly, we also can use
the :bro:see:`Notice::ACTION_DROP` action of the notice framework:
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-3-ssh-guesser.bro
.. btest:: netcontrol-3-ssh-guesser.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/ssh/sshguess.pcap ${DOC_ROOT}/frameworks/netcontrol-3-ssh-guesser.bro
@TEST-EXEC: btest-rst-cmd cat netcontrol.log
Using the :bro:see:`Notice::ACTION_DROP` action of the notice framework also
will cause the `dropped` column in `notice.log` to be set to true each time that
the NetControl framework enacts a block:
.. btest:: netcontrol-3-ssh-guesser.bro
@TEST-EXEC: btest-rst-cmd cat notice.log
Rule API
--------
As already mentioned in the last section, in addition to the high-level API, the
NetControl framework also supports a Rule based API which allows greater
flexibility while adding rules. Actually, all the high-level functions are
implemented using this lower-level rule API; the high-level functions simply
convert their arguments into the lower-level rules and then add the rules
directly to the NetControl framework (by calling :bro:see:`NetControl::add_rule`).
The following figure shows the main components of NetControl rules:
.. figure:: netcontrol-rules.png
:width: 600
:align: center
:alt: NetControl rule overview
:target: ../_images/netcontrol-rules.png
NetControl Rule overview (click to enlarge).
The types that are used to make up a rule are defined in
:doc:`/scripts/base/frameworks/netcontrol/types.bro`.
Rules are defined as a :bro:see:`NetControl::Rule` record. Rules have a *type*,
which specifies what kind of action is taken. The possible actions are to
**drop** packets, to **modify** them, to **redirect** or to **whitelist** them.
The *target* of a rule specifies if the rule is applied in the *forward path*,
and affects packets as they are forwarded through the network, or if it affects
the *monitor path* and only affects the packets that are sent to Bro, but not
the packets that traverse the network. The *entity* specifies the address,
connection, etc. that the rule applies to. In addition, each rule has a
*timeout* (which can be left empty), a *priority* (with higher priority rules
overriding lower priority rules). Furthermore, a *location* string with more
text information about each rule can be provided.
There are a couple more fields that are only needed for some rule types. For
example, when you insert a redirect rule, you have to specify the port that
packets should be redirected to. All these fields are shown in the
:bro:see:`NetControl::Rule` documentation.
To give an example on how to construct your own rule, we are going to write
our own version of the :bro:see:`NetControl::drop_connection` function. The only
difference between our function and the one provided by NetControl is the fact
that the NetControl function has additional functionality, e.g. for logging.
Once again, we are going to test our function with a simple example that simply
drops all connections on the network:
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-4-drop.bro
.. btest:: netcontrol-4-drop.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/tls/ecdhe.pcap ${DOC_ROOT}/frameworks/netcontrol-4-drop.bro
@TEST-EXEC: btest-rst-cmd cat netcontrol.log
The last example shows that :bro:see:`NetControl::add_rule` returns a string
identifier that is unique for each rule (uniqueness is not preserved across
restarts of Bro). This rule id can be used to later remove rules manually using
:bro:see:`NetControl::remove_rule`.
Similar to :bro:see:`NetControl::add_rule`, all the high-level functions also
return their rule IDs, which can be removed in the same way.
Interacting with Rules
----------------------
The NetControl framework offers a number of different ways to interact with
rules. Before a rule is applied by the framework, a number of different hooks
allow you to either modify or discard rules before they are added. Furthermore,
a number of events can be used to track the lifecycle of a rule while it is
being managed by the NetControl framework. It is also possible to query and
access the current set of active rules.
Rule Policy
***********
The hook :bro:see:`NetControl::rule_policy` provides the mechanism for modifying
or discarding a rule before it is sent onwards to the backends. Hooks can be
thought of as multi-bodied functions and using them looks very similar to
handling events. In contrast to events, they are processed immediately. Like
events, hooks can have priorities to sort the order in which they are applied.
Hooks can use the ``break`` keyword to show that processing should be aborted;
if any :bro:see:`NetControl::rule_policy` hook uses ``break``, the rule will be
discarded before further processing.
Here is a simple example which tells Bro to discard all rules for connections
originating from the 192.168.* network:
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-5-hook.bro
.. btest:: netcontrol-5-hook.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/tls/ecdhe.pcap ${DOC_ROOT}/frameworks/netcontrol-5-hook.bro
NetControl Events
*****************
In addition to the hooks, the NetControl framework offers a variety of events
that are raised by the framework to allow users to track rules, as well as the
state of the framework.
We already encountered and used one event of the NetControl framework,
:bro:see:`NetControl::init`, which is used to initialize the framework. After
the framework has finished initialization and will start accepting rules, the
:bro:see:`NetControl::init_done` event will be raised.
When rules are added to the framework, the following events will be called in
this order:
.. list-table::
:widths: 20 80
:header-rows: 1
* - Event
- Description
* - :bro:see:`NetControl::rule_new`
- Signals that a new rule is created by the NetControl framework due to
:bro:see:`NetControl::add_rule`. At this point, the rule has not
yet been added to any backend.
* - :bro:see:`NetControl::rule_added`
- Signals that a new rule has successfully been added by a backend.
* - :bro:see:`NetControl::rule_exists`
- This event is raised instead of :bro:see:`NetControl::rule_added` when a
backend reports that a rule was already existing.
* - :bro:see:`NetControl::rule_timeout`
- Signals that a rule timeout was reached. If the hardware does not support
automatic timeouts, the NetControl framework will automatically call
:bro:see:`NetControl::remove_rule`.
* - :bro:see:`NetControl::rule_removed`
- Signals that a new rule has successfully been removed a backend.
* - :bro:see:`NetControl::rule_destroyed`
- This event is the pendant to :bro:see:`NetControl::rule_added`, and
reports that a rule is no longer being tracked by the NetControl framework.
This happens, for example, when a rule was removed from all backends.
* - :bro:see:`NetControl::rule_error`
- This event is raised whenever an error occurs during any rule operation.
Finding active rules
********************
The NetControl framework provides two functions for finding currently active
rules: :bro:see:`NetControl::find_rules_addr` finds all rules that affect a
certain IP address and :bro:see:`NetControl::find_rules_subnet` finds all rules
that affect a specified subnet.
Consider, for example, the case where a Bro instance monitors the traffic at the
border, before any firewall or switch rules were applied. In this case, Bro will
still be able to see connection attempts of already blocked IP addresses. In this
case, :bro:see:`NetControl::find_rules_addr` could be used to check if an
address already was blocked in the past.
Here is a simple example, which uses a trace that contains two connections from
the same IP address. After the first connection, the script recognizes that the
address is already blocked in the second connection.
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-6-find.bro
.. btest:: netcontrol-6-find.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/tls/google-duplicate.trace ${DOC_ROOT}/frameworks/netcontrol-6-find.bro
Notice that the functions return vectors because it is possible that several
rules exist simultaneously that affect one IP; either there could be
rules with different priorities, or rules for the subnet that an IP address is
part of.
.. _framework-netcontrol-catchrelease:
Catch and Release
-----------------
We already mentioned earlier that in addition to the
:bro:see:`NetControl::drop_connection` and :bro:see:`NetControl::drop_address`
functions, which drop a connection or address for a specified amount of time,
NetControl also comes with a blocking function that uses an approach called
*catch and release*.
Catch and release is a blocking scheme that conserves valuable rule space in
your hardware. Instead of using long-lasting blocks, catch and release first
only installs blocks for a short amount of time (typically a few minutes). After
these minutes pass, the block is lifted, but the IP address is added to a
watchlist and the IP address will immediately be re-blocked again (for a longer
amount of time), if it is seen reappearing in any traffic, no matter if the new
traffic triggers any alert or not.
This makes catch and release blocks similar to normal, longer duration blocks,
while only requiring a small amount of space for the currently active rules. IP
addresses that only are seen once for a short time are only blocked for a few
minutes, monitored for a while and then forgotten. IP addresses that keep
appearing will get re-blocked for longer amounts of time.
In contrast to the other high-level functions that we documented so far, the
catch and release functionality is much more complex and adds a number of
different specialized functions to NetControl. The documentation for catch and
release is contained in the file
:doc:`/scripts/base/frameworks/netcontrol/catch-and-release.bro`.
Using catch and release in your scripts is easy; just use
:bro:see:`NetControl::drop_address_catch_release` like in this example:
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-7-catch-release.bro
.. btest:: netcontrol-7-catch-release.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/tls/ecdhe.pcap ${DOC_ROOT}/frameworks/netcontrol-7-catch-release.bro
Note that you do not have to provide the block time for catch and release;
instead, catch and release uses the time intervals specified in
:bro:see:`NetControl::catch_release_intervals` (by default 10 minutes, 1 hour,
24 hours, 7 days). That means when an address is first blocked, it is blocked
for 10 minutes and monitored for 1 hour. If the address reappears after the
first 10 minutes, it is blocked for 1 hour and then monitored for 24 hours, etc.
Catch and release adds its own new logfile in addition to the already existing
ones (netcontrol_catch_release.log):
.. btest:: netcontrol-7-catch-release.bro
@TEST-EXEC: btest-rst-cmd cat netcontrol_catch_release.log
In addition to the blocking function, catch and release comes with the
:bro:see:`NetControl::get_catch_release_info` function to
check if an address is already blocked by catch and release (and get information
about the block). The :bro:see:`NetControl::unblock_address_catch_release`
function can be used to unblock addresses from catch and release.
.. note::
Since catch and release does its own connection tracking in addition to the
tracking used by the NetControl framework, it is not sufficient to remove
rules that were added by catch and release using :bro:see:`NetControl::remove_rule`.
You have to use :bro:see:`NetControl::unblock_address_catch_release` in this
case.
.. _framework-netcontrol-plugins:
NetControl Plugins
==================
Using the existing plugins
--------------------------
In the API part of the documentation, we exclusively used the debug plugin,
which simply outputs its actions to the screen. In addition to this debugging
plugin, Bro ships with a small number of plugins that can be used to interface
the NetControl framework with your networking hard- and software.
The plugins that currently ship with NetControl are:
.. list-table::
:widths: 15 55
:header-rows: 1
* - Plugin name
- Description
* - OpenFlow plugin
- This is the most fully featured plugin which allows the NetControl
framework to be interfaced with OpenFlow switches. The source of this
plugin is contained in :doc:`/scripts/base/frameworks/netcontrol/plugins/openflow.bro`.
* - Broker plugin
- This plugin provides a generic way to send NetControl commands using the
new Bro communication library (Broker). External programs can receive
the rules and take action; we provide an example script that calls
command-line programs triggered by NetControl. The source of this
plugin is contained in :doc:`/scripts/base/frameworks/netcontrol/plugins/broker.bro`.
* - acld plugin
- This plugin adds support for the acld daemon, which can interface with
several switches and routers. The current version of acld is available
from the `LBL ftp server <ftp://ftp.ee.lbl.gov/acld.tar.gz>`_. The source of this
plugin is contained in :doc:`/scripts/base/frameworks/netcontrol/plugins/acld.bro`.
* - PacketFilter plugin
- This plugin uses the Bro process-level packet filter (see
:bro:see:`install_src_net_filter` and
:bro:see:`install_dst_net_filter`). Since the functionality of the
PacketFilter is limited, this plugin is mostly for demonstration purposes. The source of this
plugin is contained in :doc:`/scripts/base/frameworks/netcontrol/plugins/packetfilter.bro`.
* - Debug plugin
- The debug plugin simply outputs its action to the standard output. The source of this
plugin is contained in :doc:`/scripts/base/frameworks/netcontrol/plugins/debug.bro`.
Activating plugins
******************
In the API reference part of this document, we already used the debug plugin. To
use the plugin, we first had to instantiate it by calling
:bro:see:`NetControl::create_debug` and then add it to NetControl by
calling :bro:see:`NetControl::activate`.
As we already hinted before, NetControl supports having several plugins that are
active at the same time. The second argument to the `NetControl::activate`
function is the priority of the backend that was just added. Each rule is sent
to all plugins in order, from highest priority to lowest priority. The backend
can then choose if it accepts the rule and pushes it out to the hardware that it
manages. Or, it can opt to reject the rule. In this case, the NetControl
framework will try to apply the rule to the backend with the next lower
priority. If no backend accepts a rule, the rule insertion is marked as failed.
The choice if a rule is accepted or rejected stays completely with each plugin.
The debug plugin we used so far just accepts all rules. However, for other
plugins you can specify what rules they will accept. Consider, for example, a
network with two OpenFlow switches. The first switch forwards packets from the
network to the external world, the second switch sits in front of your Bro
cluster to provide packet shunting. In this case, you can add two OpenFlow
backends to NetControl. When you create the instances using
:bro:see:`NetControl::create_openflow`, you set the `monitor` and `forward`
attributes of the configuration in :bro:see:`NetControl::OfConfig`
appropriately. Afterwards, one of the backends will only accept rules for the
monitor path; the other backend will only accept rules for the forward path.
Commonly, plugins also support predicate functions, that allow the user to
specify restrictions on the rules that they will accept. This can for example be
used if you have a network where certain switches are responsible for specified
subnets. The predicate can examine the subnet of the rule and only accept the
rule if the rule matches the subnet that the specific switch is responsible for.
To give an example, the following script adds two backends to NetControl. One
backend is the NetControl debug backend, which just outputs the rules to the
console. The second backend is an OpenFlow backend, which uses the OpenFlow
debug mode that outputs the openflow rules to openflow.log. The OpenFlow
backend uses a predicate function to only accept rules with a source address in
the 192.168.17.0/24 network; all other rules will be passed on to the debug
plugin. We manually block a few addresses in the
:bro:see:`NetControl::init_done` event to verify the correct functionality.
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-8-multiple.bro
.. btest:: netcontrol-8-multiple.bro
@TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/frameworks/netcontrol-8-multiple.bro
As you can see, only the single block affecting the 192.168.17.0/24 network is
output to the command line. The other two lines are handled by the OpenFlow
plugin. We can verify this by looking at netcontrol.log. The plugin column shows
which plugin handled a rule and reveals that two rules were handled by OpenFlow:
.. btest:: netcontrol-8-multiple.bro
@TEST-EXEC: btest-rst-cmd cat netcontrol.log
Furthermore, openflow.log also shows the two added rules, converted to OpenFlow
flow mods:
.. btest:: netcontrol-8-multiple.bro
@TEST-EXEC: btest-rst-cmd cat openflow.log
.. note::
You might have asked yourself what happens when you add two or more with the
same priority. In this case, the rule is sent to all the backends
simultaneously. This can be useful, for example when you have redundant
switches that should keep the same rule state.
Interfacing with external hardware
**********************************
Now that we know which plugins exist, and how they can be added to NetControl,
it is time to discuss how we can interface Bro with actual hardware. The typical
way to accomplish this is to use the Bro communication library (Broker), which
can be used to exchange Bro events with external programs and scripts. The
NetControl plugins can use Broker to send events to external programs, which can
then take action depending on these events.
The following figure shows this architecture with the example of the OpenFlow
plugin. The OpenFlow plugin uses Broker to send events to an external Python
script, which uses the `Ryu SDN controller <https://osrg.github.io/ryu/>`_ to
communicate with the Switch.
.. figure:: netcontrol-openflow.png
:width: 600
:align: center
:alt: NetControl and OpenFlow architecture.
:target: ../_images/netcontrol-openflow.png
NetControl and OpenFlow architecture (click to enlarge).
The Python scripts that are used to interface with the available NetControl
plugins are contained in the `bro-netcontrol` repository (`github link <https://github.com/bro/bro-netcontrol>`_).
The repository contains scripts for the OpenFlow as well as the acld plugin.
Furthermore, it contains a script for the broker plugin which can be used to
call configureable command-line programs when used with the broker plugin.
The repository also contains documentation on how to install these connectors.
The `netcontrol` directory contains an API that allows you to write your own
connectors to the broker plugin.
.. note::
Note that the API of the Broker communication library is not finalized yet.
You might have to rewrite any scripts for use in future Bro versions.
Writing plugins
---------------
In addition to using the plugins that are part of NetControl, you can write your
own plugins to interface with hard- or software that we currently do not support
out of the box.
Creating your own plugin is easy; besides a bit of boilerplate, you only need to
create two functions: one that is called when a rule is added, and one that is
called when a rule is removed. The following script creates a minimal plugin
that just outputs a rule when it is added or removed. Note that you have to
raise the :bro:see:`NetControl::rule_added` and
:bro:see:`NetControl::rule_removed` events in your plugin to let NetControl know
when a rule was added and removed successfully.
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-9-skeleton.bro
This example is already fully functional and we can use it with a script similar
to our very first example:
.. btest-include:: ${DOC_ROOT}/frameworks/netcontrol-10-use-skeleton.bro
.. btest:: netcontrol-9-skeleton.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/tls/ecdhe.pcap ${DOC_ROOT}/frameworks/netcontrol-9-skeleton.bro ${DOC_ROOT}/frameworks/netcontrol-10-use-skeleton.bro
If you want to write your own plugins, it will be worthwhile to look at the
plugins that ship with the NetControl framework to see how they define the
predicates and interact with Broker.

View file

@ -83,9 +83,9 @@ The hook :bro:see:`Notice::policy` provides the mechanism for applying
actions and generally modifying the notice before it's sent onward to actions and generally modifying the notice before it's sent onward to
the action plugins. Hooks can be thought of as multi-bodied functions the action plugins. Hooks can be thought of as multi-bodied functions
and using them looks very similar to handling events. The difference and using them looks very similar to handling events. The difference
is that they don't go through the event queue like events. Users should is that they don't go through the event queue like events. Users can
directly make modifications to the :bro:see:`Notice::Info` record alter notice processing by directly modifying fields in the
given as the argument to the hook. :bro:see:`Notice::Info` record given as the argument to the hook.
Here's a simple example which tells Bro to send an email for all notices of Here's a simple example which tells Bro to send an email for all notices of
type :bro:see:`SSH::Password_Guessing` if the guesser attempted to log in to type :bro:see:`SSH::Password_Guessing` if the guesser attempted to log in to

View file

@ -10,40 +10,53 @@ there's two suggested approaches: either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy installation prefix directory as before, or pick a new prefix and copy
local customizations over. local customizations over.
Regardless of which approach you choose, if you are using BroControl, then
before doing the upgrade you should stop all running Bro processes with the
"broctl stop" command. After the upgrade is complete then you will need
to run "broctl deploy".
In the following we summarize general guidelines for upgrading, see In the following we summarize general guidelines for upgrading, see
the :ref:`release-notes` for version-specific information. the :ref:`release-notes` for version-specific information.
Reusing Previous Install Prefix Reusing Previous Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you choose to configure and install Bro with the same prefix If you choose to configure and install Bro with the same prefix
directory as before, local customization and configuration to files in directory as before, first stop all running Bro instances in your
``$prefix/share/bro/site`` and ``$prefix/etc`` won't be overwritten cluster (if using BroControl, issue the "broctl stop" command on the
(``$prefix`` indicating the root of where Bro was installed). Also, logs manager host). Next, make a backup of the Bro install prefix directory.
generated at run-time won't be touched by the upgrade. Backing up local
changes before upgrading is still recommended.
After upgrading, remember to check ``$prefix/share/bro/site`` and During the upgrade, any file in the install prefix may be
``$prefix/etc`` for ``.example`` files, which indicate that the overwritten or removed, except for local customization of
distribution's version of the file differs from the local one, and therefore, files in the ``$prefix/share/bro/site`` and ``$prefix/etc``
may include local changes. Review the differences and make adjustments directories (``$prefix`` indicating the root
as necessary. Use the new version for differences that aren't a result of of where Bro was installed). Also, logs generated at run-time
a local change. won't be touched by the upgrade.
After upgrading, remember to check the ``$prefix/share/bro/site`` and
``$prefix/etc`` directories for files with a file extension of ``.example``,
which indicate that the distribution's version of the file differs from the
local one, and therefore, may include local changes. Review the
differences and make adjustments as necessary. Use the new version
for differences that aren't a result of a local change.
Finally, if using BroControl, then issue the "broctl deploy" command. This
command will check for any policy script errors, install the new version
of Bro to all machines in your cluster, and then it will start Bro.
Using a New Install Prefix Using a New Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~
To install the newer version in a different prefix directory than before, To install the newer version in a different prefix directory than before,
copy local customization and configuration files from ``$prefix/share/bro/site`` first stop all running Bro instances in your cluster (if using BroControl,
and ``$prefix/etc`` to the new location (``$prefix`` indicating the root of then issue a "broctl stop" command on the manager host). Next,
where Bro was originally installed). Review the files for differences install the new version of Bro in a new directory.
Next, copy local customization and configuration files
from the ``$prefix/share/bro/site`` and ``$prefix/etc`` directories to the
new location (``$prefix`` indicating the root of where Bro was originally
installed). Review the files for differences
before copying and make adjustments as necessary (use the new version for before copying and make adjustments as necessary (use the new version for
differences that aren't a result of a local change). Of particular note, differences that aren't a result of a local change). Of particular note,
the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes
to any settings that specify a pathname. to any settings that specify a pathname.
Finally, if using BroControl, then issue the "broctl deploy" command. This
command will check for any policy script errors, install the new version
of Bro to all machines in your cluster, and then it will start Bro.

View file

@ -31,8 +31,7 @@ before you begin:
* BIND8 library * BIND8 library
* Libz * Libz
* Bash (for BroControl) * Bash (for BroControl)
* Python (for BroControl) * Python 2.6 or greater (for BroControl)
* C++ Actor Framework (CAF) version 0.14 (http://actor-framework.org)
To build Bro from source, the following additional dependencies are required: To build Bro from source, the following additional dependencies are required:
@ -47,8 +46,6 @@ To build Bro from source, the following additional dependencies are required:
* zlib headers * zlib headers
* Python * Python
To install CAF, first download the source code of the required version from: https://github.com/actor-framework/actor-framework/releases
To install the required dependencies, you can use: To install the required dependencies, you can use:
* RPM/RedHat-based Linux: * RPM/RedHat-based Linux:
@ -72,21 +69,40 @@ To install the required dependencies, you can use:
sudo pkg install bash cmake swig bison python py27-sqlite3 sudo pkg install bash cmake swig bison python py27-sqlite3
Note that in older versions of FreeBSD, you might have to use the For older versions of FreeBSD (especially FreeBSD 9.x), the system compiler
"pkg_add -r" command instead of "pkg install". is not new enough to compile Bro. For these systems, you will have to install
a newer compiler using pkg; the ``clang34`` package should work.
You will also have to define several environment variables on these older
systems to use the new compiler and headers similar to this before calling
configure:
.. console::
export CC=clang34
export CXX=clang++34
export CXXFLAGS="-stdlib=libc++ -I${LOCALBASE}/include/c++/v1 -L${LOCALBASE}/lib"
export LDFLAGS="-pthread"
* Mac OS X: * Mac OS X:
Compiling source code on Macs requires first installing Xcode_ (in older Compiling source code on Macs requires first installing either Xcode_
versions of Xcode, you would then need to go through its or the "Command Line Tools" (which is a much smaller download). To check
"Preferences..." -> "Downloads" menus to install the "Command Line Tools" if either is installed, run the ``xcode-select -p`` command. If you see
component). an error message, then neither is installed and you can then run
``xcode-select --install`` which will prompt you to either get Xcode (by
clicking "Get Xcode") or to install the command line tools (by
clicking "Install").
OS X comes with all required dependencies except for CMake_, SWIG_, and CAF. OS X comes with all required dependencies except for CMake_, SWIG_,
Distributions of these dependencies can likely be obtained from your and OpenSSL (OpenSSL headers were removed in OS X 10.11, therefore OpenSSL
preferred Mac OS X package management system (e.g. Homebrew_, MacPorts_, must be installed manually for OS X versions 10.11 or newer).
or Fink_). Specifically for Homebrew, the ``cmake``, ``swig``, Distributions of these dependencies can
and ``caf`` packages provide the required dependencies. likely be obtained from your preferred Mac OS X package management
system (e.g. Homebrew_, MacPorts_, or Fink_). Specifically for
Homebrew, the ``cmake``, ``swig``, and ``openssl`` packages
provide the required dependencies. For MacPorts, the ``cmake``, ``swig``,
``swig-python``, and ``openssl`` packages provide the required dependencies.
Optional Dependencies Optional Dependencies
@ -95,6 +111,7 @@ Optional Dependencies
Bro can make use of some optional libraries and tools if they are found at Bro can make use of some optional libraries and tools if they are found at
build time: build time:
* C++ Actor Framework (CAF) version 0.14 (http://actor-framework.org)
* LibGeoIP (for geolocating IP addresses) * LibGeoIP (for geolocating IP addresses)
* sendmail (enables Bro and BroControl to send mail) * sendmail (enables Bro and BroControl to send mail)
* curl (used by a Bro script that implements active HTTP) * curl (used by a Bro script that implements active HTTP)
@ -126,14 +143,8 @@ platforms for binary releases and for installation instructions.
Linux based binary installations are usually performed by adding Linux based binary installations are usually performed by adding
information about the Bro packages to the respective system packaging information about the Bro packages to the respective system packaging
tool. Then the usual system utilities such as ``apt``, ``yum`` tool. Then the usual system utilities such as ``apt``, ``dnf``, ``yum``,
or ``zypper`` are used to perform the installation. By default, or ``zypper`` are used to perform the installation.
installations of binary packages will go into ``/opt/bro``.
* MacOS Disk Image with Installer
Just open the ``Bro-*.dmg`` and then run the ``.pkg`` installer.
Everything installed by the package will go into ``/opt/bro``.
The primary install prefix for binary packages is ``/opt/bro``. The primary install prefix for binary packages is ``/opt/bro``.

View file

@ -197,7 +197,7 @@ file:
Often times log files from multiple sources are stored in UTC time to Often times log files from multiple sources are stored in UTC time to
allow easy correlation. Converting the timestamp from a log file to allow easy correlation. Converting the timestamp from a log file to
UTC can be accomplished with the ``-u`` option: UTC can be accomplished with the ``-u`` option:
.. btest:: using_bro .. btest:: using_bro
@ -227,7 +227,7 @@ trip. A common progression of review includes correlating a session
across multiple log files. As a connection is processed by Bro, a across multiple log files. As a connection is processed by Bro, a
unique identifier is assigned to each session. This unique identifier unique identifier is assigned to each session. This unique identifier
is generally included in any log file entry associated with that is generally included in any log file entry associated with that
connection and can be used to cross-reference different log files. connection and can be used to cross-reference different log files.
A simple example would be to cross-reference a UID seen in a A simple example would be to cross-reference a UID seen in a
``conn.log`` file. Here, we're looking for the connection with the ``conn.log`` file. Here, we're looking for the connection with the
@ -244,7 +244,7 @@ crossreference that with the UIDs in the ``http.log`` file.
.. btest:: using_bro .. btest:: using_bro
@TEST-EXEC: btest-rst-cmd "cat http.log | bro-cut uid id.resp_h method status_code host uri | grep VW0XPVINV8a" @TEST-EXEC: btest-rst-cmd "cat http.log | bro-cut uid id.resp_h method status_code host uri | grep UM0KZ3MLUfNB0cl11"
As you can see there are two HTTP ``GET`` requests within the As you can see there are two HTTP ``GET`` requests within the
session that Bro identified and logged. Given that HTTP is a stream session that Bro identified and logged. Given that HTTP is a stream

View file

@ -78,15 +78,6 @@ You can leave it running for now, but to stop this Bro instance you would do:
[BroControl] > stop [BroControl] > stop
We also recommend to insert the following entry into the crontab of the user
running BroControl::
0-59/5 * * * * $PREFIX/bin/broctl cron
This will perform a number of regular housekeeping tasks, including
verifying that the process is still running (and restarting if not in
case of any abnormal termination).
Browsing Log Files Browsing Log Files
------------------ ------------------
@ -232,23 +223,25 @@ That's exactly what we want to do for the first notice. Add to ``local.bro``:
inside the module. inside the module.
Then go into the BroControl shell to check whether the configuration change Then go into the BroControl shell to check whether the configuration change
is valid before installing it and then restarting the Bro instance: is valid before installing it and then restarting the Bro instance. The
"deploy" command does all of this automatically:
.. console:: .. console::
[BroControl] > check [BroControl] > deploy
bro scripts are ok. checking configurations ...
[BroControl] > install installing ...
removing old policies in /usr/local/bro/spool/policy/site ... done. removing old policies in /usr/local/bro/spool/installed-scripts-do-not-touch/site ...
removing old policies in /usr/local/bro/spool/policy/auto ... done. removing old policies in /usr/local/bro/spool/installed-scripts-do-not-touch/auto ...
creating policy directories ... done. creating policy directories ...
installing site policies ... done. installing site policies ...
generating standalone-layout.bro ... done. generating standalone-layout.bro ...
generating local-networks.bro ... done. generating local-networks.bro ...
generating broctl-config.bro ... done. generating broctl-config.bro ...
updating nodes ... done. generating broctl-config.sh ...
[BroControl] > restart stopping ...
stopping bro ... stopping bro ...
starting ...
starting bro ... starting bro ...
Now that the SSL notice is ignored, let's look at how to send an email Now that the SSL notice is ignored, let's look at how to send an email
@ -281,8 +274,8 @@ connection field is in the set of watched servers.
order to avoid ambiguity with the built-in address type's use of '.' order to avoid ambiguity with the built-in address type's use of '.'
in IPv4 dotted decimal representations. in IPv4 dotted decimal representations.
Remember, to finalize that configuration change perform the ``check``, Remember, to finalize that configuration change perform the ``deploy``
``install``, ``restart`` commands in that order inside the BroControl shell. command inside the BroControl shell.
Next Steps Next Steps
---------- ----------

View file

@ -14,6 +14,8 @@ Network Protocols
+============================+=======================================+=================================+ +============================+=======================================+=================================+
| conn.log | TCP/UDP/ICMP connections | :bro:type:`Conn::Info` | | conn.log | TCP/UDP/ICMP connections | :bro:type:`Conn::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| dce_rpc.log | Distributed Computing Environment/RPC | :bro:type:`DCE_RPC::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| dhcp.log | DHCP leases | :bro:type:`DHCP::Info` | | dhcp.log | DHCP leases | :bro:type:`DHCP::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| dnp3.log | DNP3 requests and replies | :bro:type:`DNP3::Info` | | dnp3.log | DNP3 requests and replies | :bro:type:`DNP3::Info` |
@ -35,12 +37,22 @@ Network Protocols
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| mysql.log | MySQL | :bro:type:`MySQL::Info` | | mysql.log | MySQL | :bro:type:`MySQL::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| ntlm.log | NT LAN Manager (NTLM) | :bro:type:`NTLM::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| radius.log | RADIUS authentication attempts | :bro:type:`RADIUS::Info` | | radius.log | RADIUS authentication attempts | :bro:type:`RADIUS::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| rdp.log | RDP | :bro:type:`RDP::Info` | | rdp.log | RDP | :bro:type:`RDP::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| rfb.log | Remote Framebuffer (RFB) | :bro:type:`RFB::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| sip.log | SIP | :bro:type:`SIP::Info` | | sip.log | SIP | :bro:type:`SIP::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| smb_cmd.log | SMB commands | :bro:type:`SMB::CmdInfo` |
+----------------------------+---------------------------------------+---------------------------------+
| smb_files.log | SMB files | :bro:type:`SMB::FileInfo` |
+----------------------------+---------------------------------------+---------------------------------+
| smb_mapping.log | SMB trees | :bro:type:`SMB::TreeInfo` |
+----------------------------+---------------------------------------+---------------------------------+
| smtp.log | SMTP transactions | :bro:type:`SMTP::Info` | | smtp.log | SMTP transactions | :bro:type:`SMTP::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| snmp.log | SNMP messages | :bro:type:`SNMP::Info` | | snmp.log | SNMP messages | :bro:type:`SNMP::Info` |
@ -69,6 +81,23 @@ Files
| x509.log | X.509 certificate info | :bro:type:`X509::Info` | | x509.log | X.509 certificate info | :bro:type:`X509::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
NetControl
----------
+------------------------------+---------------------------------------+------------------------------------------+
| Log File | Description | Field Descriptions |
+==============================+=======================================+==========================================+
| netcontrol.log | NetControl actions | :bro:type:`NetControl::Info` |
+------------------------------+---------------------------------------+------------------------------------------+
| netcontrol_drop.log | NetControl actions | :bro:type:`NetControl::DropInfo` |
+------------------------------+---------------------------------------+------------------------------------------+
| netcontrol_shunt.log | NetControl shunt actions | :bro:type:`NetControl::ShuntInfo` |
+------------------------------+---------------------------------------+------------------------------------------+
| netcontrol_catch_release.log | NetControl catch and release actions | :bro:type:`NetControl::CatchReleaseInfo` |
+------------------------------+---------------------------------------+------------------------------------------+
| openflow.log | OpenFlow debug log | :bro:type:`OpenFlow::Info` |
+------------------------------+---------------------------------------+------------------------------------------+
Detection Detection
--------- ---------
@ -93,8 +122,6 @@ Network Observations
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| Log File | Description | Field Descriptions | | Log File | Description | Field Descriptions |
+============================+=======================================+=================================+ +============================+=======================================+=================================+
| app_stats.log | Web app usage statistics | :bro:type:`AppStats::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| known_certs.log | SSL certificates | :bro:type:`Known::CertsInfo` | | known_certs.log | SSL certificates | :bro:type:`Known::CertsInfo` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| known_devices.log | MAC addresses of devices on the | :bro:type:`Known::DevicesInfo` | | known_devices.log | MAC addresses of devices on the | :bro:type:`Known::DevicesInfo` |

View file

@ -277,16 +277,25 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: delete .. bro:keyword:: delete
The "delete" statement is used to remove an element from a The "delete" statement is used to remove an element from a
:bro:type:`set` or :bro:type:`table`. Nothing happens if the :bro:type:`set` or :bro:type:`table`, or to remove a value from
specified element does not exist in the set or table. a :bro:type:`record` field that has the :bro:attr:`&optional` attribute.
When attempting to remove an element from a set or table,
nothing happens if the specified index does not exist.
When attempting to remove a value from an "&optional" record field,
nothing happens if that field doesn't have a value.
Example:: Example::
local myset = set("this", "test"); local myset = set("this", "test");
local mytable = table(["key1"] = 80/tcp, ["key2"] = 53/udp); local mytable = table(["key1"] = 80/tcp, ["key2"] = 53/udp);
local myrec = MyRecordType($a = 1, $b = 2);
delete myset["test"]; delete myset["test"];
delete mytable["key1"]; delete mytable["key1"];
# In this example, "b" must have the "&optional" attribute
delete myrec$b;
.. bro:keyword:: event .. bro:keyword:: event
The "event" statement immediately queues invocation of an event handler. The "event" statement immediately queues invocation of an event handler.
@ -306,30 +315,33 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: for .. bro:keyword:: for
A "for" loop iterates over each element in a string, set, vector, or A "for" loop iterates over each element in a string, set, vector, or
table and executes a statement for each iteration. Currently, table and executes a statement for each iteration (note that the order
modifying a container's membership while iterating over it may in which the loop iterates over the elements in a set or a table is
result in undefined behavior, so avoid adding or removing elements nondeterministic). However, no loop iterations occur if the string,
inside the loop. set, vector, or table is empty.
For each iteration of the loop, a loop variable will be assigned to an For each iteration of the loop, a loop variable will be assigned to an
element if the expression evaluates to a string or set, or an index if element if the expression evaluates to a string or set, or an index if
the expression evaluates to a vector or table. Then the statement the expression evaluates to a vector or table. Then the statement
is executed. However, the statement will not be executed if the expression is executed.
evaluates to an object with no elements.
If the expression is a table or a set with more than one index, then the If the expression is a table or a set with more than one index, then the
loop variable must be specified as a comma-separated list of different loop variable must be specified as a comma-separated list of different
loop variables (one for each index), enclosed in brackets. loop variables (one for each index), enclosed in brackets.
A :bro:keyword:`break` statement can be used at any time to immediately
terminate the "for" loop, and a :bro:keyword:`next` statement can be
used to skip to the next loop iteration.
Note that the loop variable in a "for" statement is not allowed to be Note that the loop variable in a "for" statement is not allowed to be
a global variable, and it does not need to be declared prior to the "for" a global variable, and it does not need to be declared prior to the "for"
statement. The type will be inferred from the elements of the statement. The type will be inferred from the elements of the
expression. expression.
Currently, modifying a container's membership while iterating over it may
result in undefined behavior, so do not add or remove elements
inside the loop.
A :bro:keyword:`break` statement will immediately terminate the "for"
loop, and a :bro:keyword:`next` statement will skip to the next loop
iteration.
Example:: Example::
local myset = set(80/tcp, 81/tcp); local myset = set(80/tcp, 81/tcp);
@ -532,8 +544,6 @@ Here are the statements that the Bro scripting language supports.
end with either a :bro:keyword:`break`, :bro:keyword:`fallthrough`, or end with either a :bro:keyword:`break`, :bro:keyword:`fallthrough`, or
:bro:keyword:`return` statement (although "return" is allowed only :bro:keyword:`return` statement (although "return" is allowed only
if the "switch" statement is inside a function, hook, or event handler). if the "switch" statement is inside a function, hook, or event handler).
If a "case" (or "default") block contain more than one statement, then
there is no need to wrap them in braces.
Note that the braces in a "switch" statement are always required (these Note that the braces in a "switch" statement are always required (these
do not indicate the presence of a `compound statement`_), and that no do not indicate the presence of a `compound statement`_), and that no
@ -604,12 +614,9 @@ Here are the statements that the Bro scripting language supports.
if ( skip_ahead() ) if ( skip_ahead() )
next; next;
[...]
if ( finish_up ) if ( finish_up )
break; break;
[...]
} }
.. _compound statement: .. _compound statement:

View file

@ -150,18 +150,17 @@ Here is a more detailed description of each type:
.. bro:type:: string .. bro:type:: string
A type used to hold character-string values which represent text, although A type used to hold bytes which represent text and also can hold
strings in a Bro script can actually contain any arbitrary binary data. arbitrary binary data.
String constants are created by enclosing text within a pair of double String constants are created by enclosing text within a pair of double
quotes ("). A string constant cannot span multiple lines in a Bro script. quotes ("). A string constant cannot span multiple lines in a Bro script.
The backslash character (\\) introduces escape sequences. The The backslash character (\\) introduces escape sequences. Bro recognizes
following escape sequences are recognized: ``\n``, ``\t``, ``\v``, ``\b``, the following escape sequences: ``\\``, ``\n``, ``\t``, ``\v``, ``\b``,
``\r``, ``\f``, ``\a``, ``\ooo`` (where each 'o' is an octal digit), ``\r``, ``\f``, ``\a``, ``\ooo`` (where each 'o' is an octal digit),
``\xhh`` (where each 'h' is a hexadecimal digit). For escape sequences ``\xhh`` (where each 'h' is a hexadecimal digit). If Bro does not
that don't match any of these, Bro will just remove the backslash (so recognize an escape sequence, Bro will ignore the backslash
to represent a literal backslash in a string constant, you just use ("\\g" becomes "g").
two consecutive backslashes).
Strings support concatenation (``+``), and assignment (``=``, ``+=``). Strings support concatenation (``+``), and assignment (``=``, ``+=``).
Strings also support the comparison operators (``==``, ``!=``, ``<``, Strings also support the comparison operators (``==``, ``!=``, ``<``,
@ -181,11 +180,14 @@ Here is a more detailed description of each type:
second-to-last character, etc. Here are a few examples:: second-to-last character, etc. Here are a few examples::
local orig = "0123456789"; local orig = "0123456789";
local second_char = orig[1]; local second_char = orig[1]; # "1"
local last_char = orig[-1]; local last_char = orig[-1]; # "9"
local first_two_chars = orig[:2]; local first_two_chars = orig[:2]; # "01"
local last_two_chars = orig[8:]; local last_two_chars = orig[8:]; # "89"
local no_first_and_last = orig[1:9]; local no_first_and_last = orig[1:9]; # "12345678"
local no_first = orig[1:]; # "123456789"
local no_last = orig[:-1]; # "012345678"
local copy_orig = orig[:]; # "0123456789"
Note that the subscript operator cannot be used to modify a string (i.e., Note that the subscript operator cannot be used to modify a string (i.e.,
it cannot be on the left side of an assignment operator). it cannot be on the left side of an assignment operator).

View file

@ -0,0 +1,25 @@
module Conn;
export {
## The record type which contains column fields of the connection log.
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
proto: transport_proto &log;
service: string &log &optional;
duration: interval &log &optional;
orig_bytes: count &log &optional;
resp_bytes: count &log &optional;
conn_state: string &log &optional;
local_orig: bool &log &optional;
local_resp: bool &log &optional;
missed_bytes: count &log &default=0;
history: string &log &optional;
orig_pkts: count &log &optional;
orig_ip_bytes: count &log &optional;
resp_pkts: count &log &optional;
resp_ip_bytes: count &log &optional;
tunnel_parents: set[string] &log;
};
}

View file

@ -0,0 +1,7 @@
module HTTP;
export {
## This setting changes if passwords used in Basic-Auth are captured or
## not.
const default_capture_password = F &redef;
}

View file

@ -362,8 +362,7 @@ decrypted from HTTP streams is stored in
:bro:see:`HTTP::default_capture_password` as shown in the stripped down :bro:see:`HTTP::default_capture_password` as shown in the stripped down
excerpt from :doc:`/scripts/base/protocols/http/main.bro` below. excerpt from :doc:`/scripts/base/protocols/http/main.bro` below.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/http/main.bro .. btest-include:: ${DOC_ROOT}/scripting/http_main.bro
:lines: 9-11,20-22,125
Because the constant was declared with the ``&redef`` attribute, if we Because the constant was declared with the ``&redef`` attribute, if we
needed to turn this option on globally, we could do so by adding the needed to turn this option on globally, we could do so by adding the
@ -776,7 +775,7 @@ string against which it will be tested to be on the right.
In the sample above, two local variables are declared to hold our In the sample above, two local variables are declared to hold our
sample sentence and regular expression. Our regular expression in sample sentence and regular expression. Our regular expression in
this case will return true if the string contains either the word this case will return true if the string contains either the word
``quick`` or the word ``fox``. The ``if`` statement in the script uses ``quick`` or the word ``lazy``. The ``if`` statement in the script uses
embedded matching and the ``in`` operator to check for the existence embedded matching and the ``in`` operator to check for the existence
of the pattern within the string. If the statement resolves to true, of the pattern within the string. If the statement resolves to true,
:bro:id:`split` is called to break the string into separate pieces. :bro:id:`split` is called to break the string into separate pieces.
@ -825,8 +824,7 @@ example of the ``record`` data type in the earlier sections, the
:bro:type:`Conn::Info`, which corresponds to the fields logged into :bro:type:`Conn::Info`, which corresponds to the fields logged into
``conn.log``, is shown by the excerpt below. ``conn.log``, is shown by the excerpt below.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/conn/main.bro .. btest-include:: ${DOC_ROOT}/scripting/data_type_record.bro
:lines: 10-12,16-17,19,21,23,25,28,31,35,38,57,63,69,75,98,101,105,108,112,116-117,122
Looking at the structure of the definition, a new collection of data Looking at the structure of the definition, a new collection of data
types is being defined as a type called ``Info``. Since this type types is being defined as a type called ``Info``. Since this type
@ -1118,7 +1116,7 @@ Policy, but the script attempts to supply as much information as
possible while staying concise. possible while staying concise.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ssh/interesting-hostnames.bro .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ssh/interesting-hostnames.bro
:lines: 1-46 :lines: 1-52
While much of the script relates to the actual detection, the parts While much of the script relates to the actual detection, the parts
specific to the Notice Framework are actually quite interesting in specific to the Notice Framework are actually quite interesting in
@ -1126,8 +1124,11 @@ themselves. The script's ``export`` block adds the value
``SSH::Interesting_Hostname_Login`` to the enumerable constant ``SSH::Interesting_Hostname_Login`` to the enumerable constant
``Notice::Type`` to indicate to the Bro core that a new type of notice ``Notice::Type`` to indicate to the Bro core that a new type of notice
is being defined. The script then calls ``NOTICE`` and defines the is being defined. The script then calls ``NOTICE`` and defines the
``$note``, ``$msg``, ``$sub`` and ``$conn`` fields of the ``$note``, ``$msg``, ``$sub``, ``id``, and ``$uid`` fields of the
:bro:type:`Notice::Info` record. There are two ternary if :bro:type:`Notice::Info` record. (More commonly, one would set
``$conn`` instead, however this script avoids using the connection
record inside the when-statement for performance reasons.)
There are two ternary if
statements that modify the ``$msg`` text depending on whether the statements that modify the ``$msg`` text depending on whether the
host is a local address and whether it is the client or the server. host is a local address and whether it is the client or the server.
This use of :bro:id:`fmt` and ternary operators is a concise way to This use of :bro:id:`fmt` and ternary operators is a concise way to

View file

@ -66,9 +66,6 @@ print version and exit
\fB\-x\fR,\ \-\-print\-state <file.bst> \fB\-x\fR,\ \-\-print\-state <file.bst>
print contents of state file print contents of state file
.TP .TP
\fB\-z\fR,\ \-\-analyze <analysis>
run the specified policy file analysis
.TP
\fB\-C\fR,\ \-\-no\-checksums \fB\-C\fR,\ \-\-no\-checksums
ignore checksums ignore checksums
.TP .TP
@ -78,12 +75,6 @@ force DNS
\fB\-I\fR,\ \-\-print\-id <ID name> \fB\-I\fR,\ \-\-print\-id <ID name>
print out given ID print out given ID
.TP .TP
\fB\-J\fR,\ \-\-set\-seed <seed>
set the random number seed
.TP
\fB\-K\fR,\ \-\-md5\-hashkey <hashkey>
set key for MD5\-keyed hashing
.TP
\fB\-N\fR,\ \-\-print\-plugins \fB\-N\fR,\ \-\-print\-plugins
print available plugins and exit (\fB\-NN\fR for verbose) print available plugins and exit (\fB\-NN\fR for verbose)
.TP .TP

View file

@ -1,46 +0,0 @@
#!/bin/sh
# This script generates binary DEB packages.
# They can be found in ../build/ after running.
# The DEB CPack generator depends on `dpkg-shlibdeps` to automatically
# determine what dependencies to set for the packages
type dpkg-shlibdeps > /dev/null 2>&1 || {
echo "\
Creating DEB packages requires the "dpkg-shlibs" command, usually provided by
the 'dpkg-dev' package, please install it first.
" >&2;
exit 1;
}
prefix=/opt/bro
localstatedir=/var/opt/bro
# During the packaging process, `dpkg-shlibs` will fail if used on a library
# that links to other internal/project libraries unless an RPATH is used or
# we set LD_LIBRARY_PATH such that it can find the internal/project library
# in the temporary packaging tree.
export LD_LIBRARY_PATH=./${prefix}/lib
cd ..
# Minimum Bro
./configure --prefix=${prefix} --disable-broccoli --disable-broctl \
--pkg-name-prefix=Bro-minimal --binary-package
( cd build && make package )
# Full Bro package
./configure --prefix=${prefix} --localstatedir=${localstatedir} --pkg-name-prefix=Bro --binary-package
( cd build && make package )
# Broccoli
cd aux/broccoli
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv *.deb ../../../build/ )
cd ../..
# Broctl
cd aux/broctl
./configure --prefix=${prefix} --localstatedir=${localstatedir} --binary-package
( cd build && make package && mv *.deb ../../../build/ )
cd ../..

View file

@ -1,57 +0,0 @@
#!/bin/sh
# This script creates binary packages for Mac OS X.
# They can be found in ../build/ after running.
type sw_vers > /dev/null 2>&1 || {
echo "Unable to get Mac OS X version" >&2;
exit 1;
}
# Get the OS X minor version
# 5 = Leopard, 6 = Snow Leopard, 7 = Lion ...
osx_ver=`sw_vers | sed -n 's/ProductVersion://p' | cut -d . -f 2`
if [ ${osx_ver} -lt 5 ]; then
echo "Packages for OS X < 10.5 are not supported" >&2
exit 1
elif [ ${osx_ver} -eq 5 ]; then
# On OS X 10.5, the x86_64 version of libresolv is broken,
# so we build for i386 as the easiest solution
arch=i386
else
# Currently it's just easiest to build the 10.5 package on
# on 10.5, but if it weren't for the libresolv issue, we could
# potentially build packages for older OS X version by using the
# --osx-sysroot and --osx-min-version options
arch=x86_64
fi
prefix=/opt/bro
cd ..
# Minimum Bro
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--disable-broccoli --disable-broctl --pkg-name-prefix=Bro-minimal \
--binary-package
( cd build && make package )
# Full Bro package
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--pkg-name-prefix=Bro --binary-package
( cd build && make package )
# Broccoli
cd aux/broccoli
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--binary-package
( cd build && make package && mv *.dmg ../../../build/ )
cd ../..
# Broctl
cd aux/broctl
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--binary-package
( cd build && make package && mv *.dmg ../../../build/ )
cd ../..

View file

@ -1,39 +0,0 @@
#!/bin/sh
# This script generates binary RPM packages.
# They can be found in ../build/ after running.
# The RPM CPack generator depends on `rpmbuild` to create packages
type rpmbuild > /dev/null 2>&1 || {
echo "\
Creating RPM packages requires the "rpmbuild" command, usually provided by
the 'rpm-build' package, please install it first.
" >&2;
exit 1;
}
prefix=/opt/bro
localstatedir=/var/opt/bro
cd ..
# Minimum Bro
./configure --prefix=${prefix} --disable-broccoli --disable-broctl \
--pkg-name-prefix=Bro-minimal --binary-package
( cd build && make package )
# Full Bro package
./configure --prefix=${prefix} --localstatedir=${localstatedir} --pkg-name-prefix=Bro --binary-package
( cd build && make package )
# Broccoli
cd aux/broccoli
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv *.rpm ../../../build/ )
cd ../..
# Broctl
cd aux/broctl
./configure --prefix=${prefix} --localstatedir=${localstatedir} --binary-package
( cd build && make package && mv *.rpm ../../../build/ )
cd ../..

View file

@ -18,6 +18,10 @@ InstallPackageConfigFile(
${CMAKE_CURRENT_SOURCE_DIR}/site/local-manager.bro ${CMAKE_CURRENT_SOURCE_DIR}/site/local-manager.bro
${BRO_SCRIPT_INSTALL_PATH}/site ${BRO_SCRIPT_INSTALL_PATH}/site
local-manager.bro) local-manager.bro)
InstallPackageConfigFile(
${CMAKE_CURRENT_SOURCE_DIR}/site/local-logger.bro
${BRO_SCRIPT_INSTALL_PATH}/site
local-logger.bro)
InstallPackageConfigFile( InstallPackageConfigFile(
${CMAKE_CURRENT_SOURCE_DIR}/site/local-proxy.bro ${CMAKE_CURRENT_SOURCE_DIR}/site/local-proxy.bro
${BRO_SCRIPT_INSTALL_PATH}/site ${BRO_SCRIPT_INSTALL_PATH}/site

View file

@ -1 +1 @@
Support for extracing files with the file analysis framework. Support for extracting files with the file analysis framework.

View file

@ -14,6 +14,13 @@ export {
redef record Files::Info += { redef record Files::Info += {
## Local filename of extracted file. ## Local filename of extracted file.
extracted: string &optional &log; extracted: string &optional &log;
## Set to true if the file being extracted was cut off
## so the whole file was not logged.
extracted_cutoff: bool &optional &log;
## The number of bytes extracted to disk.
extracted_size: count &optional &log;
}; };
redef record Files::AnalyzerArgs += { redef record Files::AnalyzerArgs += {
@ -58,9 +65,16 @@ function on_add(f: fa_file, args: Files::AnalyzerArgs)
f$info$extracted = args$extract_filename; f$info$extracted = args$extract_filename;
args$extract_filename = build_path_compressed(prefix, args$extract_filename); args$extract_filename = build_path_compressed(prefix, args$extract_filename);
f$info$extracted_cutoff = F;
mkdir(prefix); mkdir(prefix);
} }
event file_extraction_limit(f: fa_file, args: Files::AnalyzerArgs, limit: count, len: count) &priority=10
{
f$info$extracted_cutoff = T;
f$info$extracted_size = limit;
}
event bro_init() &priority=10 event bro_init() &priority=10
{ {
Files::register_analyzer_add_callback(Files::ANALYZER_EXTRACT, on_add); Files::register_analyzer_add_callback(Files::ANALYZER_EXTRACT, on_add);

View file

@ -6,6 +6,7 @@ module X509;
export { export {
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
## The record type which contains the fields of the X.509 log.
type Info: record { type Info: record {
## Current timestamp. ## Current timestamp.
ts: time &log; ts: time &log;

View file

@ -1 +1,2 @@
@load ./main @load ./main
@load ./store

View file

@ -1,11 +1,21 @@
##! Various data structure definitions for use with Bro's communication system. ##! Various data structure definitions for use with Bro's communication system.
module BrokerComm; module Log;
export {
type Log::ID: enum {
## Dummy place-holder.
UNKNOWN
};
}
module Broker;
export { export {
## A name used to identify this endpoint to peers. ## A name used to identify this endpoint to peers.
## .. bro:see:: BrokerComm::connect BrokerComm::listen ##
## .. bro:see:: Broker::connect Broker::listen
const endpoint_name = "" &redef; const endpoint_name = "" &redef;
## Change communication behavior. ## Change communication behavior.
@ -32,11 +42,11 @@ export {
## Opaque communication data. ## Opaque communication data.
type Data: record { type Data: record {
d: opaque of BrokerComm::Data &optional; d: opaque of Broker::Data &optional;
}; };
## Opaque communication data. ## Opaque communication data.
type DataVector: vector of BrokerComm::Data; type DataVector: vector of Broker::Data;
## Opaque event communication data. ## Opaque event communication data.
type EventArgs: record { type EventArgs: record {
@ -49,55 +59,318 @@ export {
## Opaque communication data used as a convenient way to wrap key-value ## Opaque communication data used as a convenient way to wrap key-value
## pairs that comprise table entries. ## pairs that comprise table entries.
type TableItem : record { type TableItem : record {
key: BrokerComm::Data; key: Broker::Data;
val: BrokerComm::Data; val: Broker::Data;
}; };
## Enable use of communication.
##
## flags: used to tune the local Broker endpoint behavior.
##
## Returns: true if communication is successfully initialized.
global enable: function(flags: EndpointFlags &default = EndpointFlags()): bool;
## Changes endpoint flags originally supplied to :bro:see:`Broker::enable`.
##
## flags: the new endpoint behavior flags to use.
##
## Returns: true if flags were changed.
global set_endpoint_flags: function(flags: EndpointFlags &default = EndpointFlags()): bool;
## Allow sending messages to peers if associated with the given topic.
## This has no effect if auto publication behavior is enabled via the flags
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
##
## topic: a topic to allow messages to be published under.
##
## Returns: true if successful.
global publish_topic: function(topic: string): bool;
## Disallow sending messages to peers if associated with the given topic.
## This has no effect if auto publication behavior is enabled via the flags
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
##
## topic: a topic to disallow messages to be published under.
##
## Returns: true if successful.
global unpublish_topic: function(topic: string): bool;
## Listen for remote connections.
##
## p: the TCP port to listen on.
##
## a: an address string on which to accept connections, e.g.
## "127.0.0.1". An empty string refers to @p INADDR_ANY.
##
## reuse: equivalent to behavior of SO_REUSEADDR.
##
## Returns: true if the local endpoint is now listening for connections.
##
## .. bro:see:: Broker::incoming_connection_established
global listen: function(p: port, a: string &default = "", reuse: bool &default = T): bool;
## Initiate a remote connection.
##
## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
##
## p: the TCP port on which the remote side is listening.
##
## retry: an interval at which to retry establishing the
## connection with the remote peer if it cannot be made initially, or
## if it ever becomes disconnected.
##
## Returns: true if it's possible to try connecting with the peer and
## it's a new peer. The actual connection may not be established
## until a later point in time.
##
## .. bro:see:: Broker::outgoing_connection_established
global connect: function(a: string, p: port, retry: interval): bool;
## Remove a remote connection.
##
## a: the address used in previous successful call to :bro:see:`Broker::connect`.
##
## p: the port used in previous successful call to :bro:see:`Broker::connect`.
##
## Returns: true if the arguments match a previously successful call to
## :bro:see:`Broker::connect`.
global disconnect: function(a: string, p: port): bool;
## Print a simple message to any interested peers. The receiver can use
## :bro:see:`Broker::print_handler` to handle messages.
##
## topic: a topic associated with the printed message.
##
## msg: the print message to send to peers.
##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if the message is sent.
global send_print: function(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool;
## Register interest in all peer print messages that use a certain topic
## prefix. Use :bro:see:`Broker::print_handler` to handle received
## messages.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new print subscription and it is now registered.
global subscribe_to_prints: function(topic_prefix: string): bool;
## Unregister interest in all peer print messages that use a topic prefix.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_prints`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_prints: function(topic_prefix: string): bool;
## Send an event to any interested peers.
##
## topic: a topic associated with the event message.
##
## args: event arguments as made by :bro:see:`Broker::event_args`.
##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if the message is sent.
global send_event: function(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool;
## Automatically send an event to any interested peers whenever it is
## locally dispatched (e.g. using "event my_event(...);" in a script).
##
## topic: a topic string associated with the event message.
## Peers advertise interest by registering a subscription to some
## prefix of this topic name.
##
## ev: a Bro event value.
##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if automatic event sending is now enabled.
global auto_event: function(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool;
## Stop automatically sending an event to peers upon local dispatch.
##
## topic: a topic originally given to :bro:see:`Broker::auto_event`.
##
## ev: an event originally given to :bro:see:`Broker::auto_event`.
##
## Returns: true if automatic events will not occur for the topic/event
## pair.
global auto_event_stop: function(topic: string, ev: any): bool;
## Register interest in all peer event messages that use a certain topic
## prefix.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new event subscription and it is now registered.
global subscribe_to_events: function(topic_prefix: string): bool;
## Unregister interest in all peer event messages that use a topic prefix.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_events`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_events: function(topic_prefix: string): bool;
## Enable remote logs for a given log stream.
##
## id: the log stream to enable remote logs for.
##
## flags: tune the behavior of how log entry messages are sent.
##
## Returns: true if remote logs are enabled for the stream.
global enable_remote_logs: function(id: Log::ID, flags: SendFlags &default = SendFlags()): bool;
## Disable remote logs for a given log stream.
##
## id: the log stream to disable remote logs for.
##
## Returns: true if remote logs are disabled for the stream.
global disable_remote_logs: function(id: Log::ID): bool;
## Check if remote logs are enabled for a given log stream.
##
## id: the log stream to check.
##
## Returns: true if remote logs are enabled for the given stream.
global remote_logs_enabled: function(id: Log::ID): bool;
## Register interest in all peer log messages that use a certain topic
## prefix. Logs are implicitly sent with topic "bro/log/<stream-name>" and
## the receiving side processes them through the logging framework as usual.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new log subscription and it is now registered.
global subscribe_to_logs: function(topic_prefix: string): bool;
## Unregister interest in all peer log messages that use a topic prefix.
## Logs are implicitly sent with topic "bro/log/<stream-name>" and the
## receiving side processes them through the logging framework as usual.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_logs`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_logs: function(topic_prefix: string): bool;
} }
module BrokerStore; @load base/bif/comm.bif
@load base/bif/messaging.bif
export { module Broker;
## Whether a data store query could be completed or not. @ifdef ( Broker::__enable )
type QueryStatus: enum {
SUCCESS,
FAILURE,
};
## An expiry time for a key-value pair inserted in to a data store. function enable(flags: EndpointFlags &default = EndpointFlags()) : bool
type ExpiryTime: record { {
## Absolute point in time at which to expire the entry. return __enable(flags);
absolute: time &optional; }
## A point in time relative to the last modification time at which
## to expire the entry. New modifications will delay the expiration.
since_last_modification: interval &optional;
};
## The result of a data store query. function set_endpoint_flags(flags: EndpointFlags &default = EndpointFlags()): bool
type QueryResult: record { {
## Whether the query completed or not. return __set_endpoint_flags(flags);
status: BrokerStore::QueryStatus; }
## The result of the query. Certain queries may use a particular
## data type (e.g. querying store size always returns a count, but
## a lookup may return various data types).
result: BrokerComm::Data;
};
## Options to tune the SQLite storage backend. function publish_topic(topic: string): bool
type SQLiteOptions: record { {
## File system path of the database. return __publish_topic(topic);
path: string &default = "store.sqlite"; }
};
## Options to tune the RocksDB storage backend. function unpublish_topic(topic: string): bool
type RocksDBOptions: record { {
## File system path of the database. return __unpublish_topic(topic);
path: string &default = "store.rocksdb"; }
};
## Options to tune the particular storage backends. function listen(p: port, a: string &default = "", reuse: bool &default = T): bool
type BackendOptions: record { {
sqlite: SQLiteOptions &default = SQLiteOptions(); return __listen(p, a, reuse);
rocksdb: RocksDBOptions &default = RocksDBOptions(); }
};
} function connect(a: string, p: port, retry: interval): bool
{
return __connect(a, p, retry);
}
function disconnect(a: string, p: port): bool
{
return __disconnect(a, p);
}
function send_print(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool
{
return __send_print(topic, msg, flags);
}
function subscribe_to_prints(topic_prefix: string): bool
{
return __subscribe_to_prints(topic_prefix);
}
function unsubscribe_to_prints(topic_prefix: string): bool
{
return __unsubscribe_to_prints(topic_prefix);
}
function send_event(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool
{
return __event(topic, args, flags);
}
function auto_event(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool
{
return __auto_event(topic, ev, flags);
}
function auto_event_stop(topic: string, ev: any): bool
{
return __auto_event_stop(topic, ev);
}
function subscribe_to_events(topic_prefix: string): bool
{
return __subscribe_to_events(topic_prefix);
}
function unsubscribe_to_events(topic_prefix: string): bool
{
return __unsubscribe_to_events(topic_prefix);
}
function enable_remote_logs(id: Log::ID, flags: SendFlags &default = SendFlags()): bool
{
return __enable_remote_logs(id, flags);
}
function disable_remote_logs(id: Log::ID): bool
{
return __disable_remote_logs(id);
}
function remote_logs_enabled(id: Log::ID): bool
{
return __remote_logs_enabled(id);
}
function subscribe_to_logs(topic_prefix: string): bool
{
return __subscribe_to_logs(topic_prefix);
}
function unsubscribe_to_logs(topic_prefix: string): bool
{
return __unsubscribe_to_logs(topic_prefix);
}
@endif

File diff suppressed because it is too large Load diff

View file

@ -28,6 +28,14 @@ redef Communication::listen_port = Cluster::nodes[Cluster::node]$p;
@if ( Cluster::local_node_type() == Cluster::MANAGER ) @if ( Cluster::local_node_type() == Cluster::MANAGER )
@load ./nodes/manager @load ./nodes/manager
# If no logger is defined, then the manager receives logs.
@if ( Cluster::manager_is_logger )
@load ./nodes/logger
@endif
@endif
@if ( Cluster::local_node_type() == Cluster::LOGGER )
@load ./nodes/logger
@endif @endif
@if ( Cluster::local_node_type() == Cluster::PROXY ) @if ( Cluster::local_node_type() == Cluster::PROXY )

View file

@ -31,7 +31,9 @@ export {
## A node type which is allowed to view/manipulate the configuration ## A node type which is allowed to view/manipulate the configuration
## of other nodes in the cluster. ## of other nodes in the cluster.
CONTROL, CONTROL,
## A node type responsible for log and policy management. ## A node type responsible for log management.
LOGGER,
## A node type responsible for policy management.
MANAGER, MANAGER,
## A node type for relaying worker node communication and synchronizing ## A node type for relaying worker node communication and synchronizing
## worker node state. ## worker node state.
@ -50,12 +52,21 @@ export {
## Events raised by a manager and handled by proxies. ## Events raised by a manager and handled by proxies.
const manager2proxy_events = /EMPTY/ &redef; const manager2proxy_events = /EMPTY/ &redef;
## Events raised by a manager and handled by loggers.
const manager2logger_events = /EMPTY/ &redef;
## Events raised by proxies and handled by loggers.
const proxy2logger_events = /EMPTY/ &redef;
## Events raised by proxies and handled by a manager. ## Events raised by proxies and handled by a manager.
const proxy2manager_events = /EMPTY/ &redef; const proxy2manager_events = /EMPTY/ &redef;
## Events raised by proxies and handled by workers. ## Events raised by proxies and handled by workers.
const proxy2worker_events = /EMPTY/ &redef; const proxy2worker_events = /EMPTY/ &redef;
## Events raised by workers and handled by loggers.
const worker2logger_events = /EMPTY/ &redef;
## Events raised by workers and handled by a manager. ## Events raised by workers and handled by a manager.
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef; const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
@ -68,7 +79,7 @@ export {
## Events raised by TimeMachine instances and handled by workers. ## Events raised by TimeMachine instances and handled by workers.
const tm2worker_events = /EMPTY/ &redef; const tm2worker_events = /EMPTY/ &redef;
## Events sent by the control host (i.e. BroControl) when dynamically ## Events sent by the control host (i.e., BroControl) when dynamically
## connecting to a running instance to update settings or request data. ## connecting to a running instance to update settings or request data.
const control_events = Control::controller_events &redef; const control_events = Control::controller_events &redef;
@ -86,6 +97,8 @@ export {
p: port; p: port;
## Identifier for the interface a worker is sniffing. ## Identifier for the interface a worker is sniffing.
interface: string &optional; interface: string &optional;
## Name of the logger node this node uses. For manager, proxies and workers.
logger: string &optional;
## Name of the manager node this node uses. For workers and proxies. ## Name of the manager node this node uses. For workers and proxies.
manager: string &optional; manager: string &optional;
## Name of the proxy node this node uses. For workers and managers. ## Name of the proxy node this node uses. For workers and managers.
@ -123,6 +136,12 @@ export {
## Note that BroControl handles all of this automatically. ## Note that BroControl handles all of this automatically.
const nodes: table[string] of Node = {} &redef; const nodes: table[string] of Node = {} &redef;
## Indicates whether or not the manager will act as the logger and receive
## logs. This value should be set in the cluster-layout.bro script (the
## value should be true only if no logger is specified in Cluster::nodes).
## Note that BroControl handles this automatically.
const manager_is_logger = T &redef;
## This is usually supplied on the command line for each instance ## This is usually supplied on the command line for each instance
## of the cluster that is started up. ## of the cluster that is started up.
const node = getenv("CLUSTER_NODE") &redef; const node = getenv("CLUSTER_NODE") &redef;

View file

@ -0,0 +1,29 @@
##! This is the core Bro script to support the notion of a cluster logger.
##!
##! The logger is passive (other Bro instances connect to us), and once
##! connected the logger receives logs from other Bro instances.
##! This script will be automatically loaded if necessary based on the
##! type of node being started.
##! This is where the cluster logger sets it's specific settings for other
##! frameworks and in the core.
@prefixes += cluster-logger
## Turn on local logging.
redef Log::enable_local_logging = T;
## Turn off remote logging since this is the logger and should only log here.
redef Log::enable_remote_logging = F;
## Log rotation interval.
redef Log::default_rotation_interval = 1 hrs;
## Alarm summary mail interval.
redef Log::default_mail_alarms_interval = 24 hrs;
## Use the cluster's archive logging script.
redef Log::default_rotation_postprocessor_cmd = "archive-log";
## We're processing essentially *only* remote events.
redef max_remote_events_processed = 10000;

View file

@ -10,17 +10,17 @@
@prefixes += cluster-manager @prefixes += cluster-manager
## Turn off remote logging since this is the manager and should only log here. ## Don't do any local logging since the logger handles writing logs.
redef Log::enable_remote_logging = F; redef Log::enable_local_logging = F;
## Turn on remote logging since the logger handles writing logs.
redef Log::enable_remote_logging = T;
## Log rotation interval. ## Log rotation interval.
redef Log::default_rotation_interval = 1 hrs; redef Log::default_rotation_interval = 24 hrs;
## Alarm summary mail interval. ## Use the cluster's delete-log script.
redef Log::default_mail_alarms_interval = 24 hrs; redef Log::default_rotation_postprocessor_cmd = "delete-log";
## Use the cluster's archive logging script.
redef Log::default_rotation_postprocessor_cmd = "archive-log";
## We're processing essentially *only* remote events. ## We're processing essentially *only* remote events.
redef max_remote_events_processed = 10000; redef max_remote_events_processed = 10000;

View file

@ -1,6 +1,6 @@
##! Redefines some options common to all worker nodes within a Bro cluster. ##! Redefines some options common to all worker nodes within a Bro cluster.
##! In particular, worker nodes do not produce logs locally, instead they ##! In particular, worker nodes do not produce logs locally, instead they
##! send them off to a manager node for processing. ##! send them off to a logger node for processing.
@prefixes += cluster-worker @prefixes += cluster-worker

View file

@ -23,17 +23,40 @@ event bro_init() &priority=9
$connect=F, $class="control", $connect=F, $class="control",
$events=control_events]; $events=control_events];
if ( me$node_type == MANAGER ) if ( me$node_type == LOGGER )
{ {
if ( n$node_type == MANAGER && n$logger == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=manager2logger_events, $request_logs=T];
if ( n$node_type == PROXY && n$logger == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=proxy2logger_events, $request_logs=T];
if ( n$node_type == WORKER && n$logger == node )
Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=worker2logger_events, $request_logs=T];
}
else if ( me$node_type == MANAGER )
{
if ( n$node_type == LOGGER && me$logger == i )
Communication::nodes["logger"] =
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $retry=retry_interval,
$class=node];
if ( n$node_type == WORKER && n$manager == node ) if ( n$node_type == WORKER && n$manager == node )
Communication::nodes[i] = Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F, [$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=worker2manager_events, $request_logs=T]; $class=i, $events=worker2manager_events,
$request_logs=Cluster::manager_is_logger];
if ( n$node_type == PROXY && n$manager == node ) if ( n$node_type == PROXY && n$manager == node )
Communication::nodes[i] = Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F, [$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=proxy2manager_events, $request_logs=T]; $class=i, $events=proxy2manager_events,
$request_logs=Cluster::manager_is_logger];
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i ) if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip, Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
@ -45,6 +68,12 @@ event bro_init() &priority=9
else if ( me$node_type == PROXY ) else if ( me$node_type == PROXY )
{ {
if ( n$node_type == LOGGER && me$logger == i )
Communication::nodes["logger"] =
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $retry=retry_interval,
$class=node];
if ( n$node_type == WORKER && n$proxy == node ) if ( n$node_type == WORKER && n$proxy == node )
Communication::nodes[i] = Communication::nodes[i] =
[$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i, [$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
@ -76,6 +105,12 @@ event bro_init() &priority=9
} }
else if ( me$node_type == WORKER ) else if ( me$node_type == WORKER )
{ {
if ( n$node_type == LOGGER && me$logger == i )
Communication::nodes["logger"] =
[$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $retry=retry_interval,
$class=node];
if ( n$node_type == MANAGER && me$manager == i ) if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip, Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id, $zone_id=nodes[i]$zone_id,

View file

@ -27,6 +27,9 @@ export {
disabled_aids: set[count]; disabled_aids: set[count];
}; };
## Analyzers which you don't want to throw
const ignore_violations: set[Analyzer::Tag] = set() &redef;
## Ignore violations which go this many bytes into the connection. ## Ignore violations which go this many bytes into the connection.
## Set to 0 to never ignore protocol violations. ## Set to 0 to never ignore protocol violations.
const ignore_violations_after = 10 * 1024 &redef; const ignore_violations_after = 10 * 1024 &redef;
@ -82,8 +85,11 @@ event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason
if ( ignore_violations_after > 0 && size > ignore_violations_after ) if ( ignore_violations_after > 0 && size > ignore_violations_after )
return; return;
if ( atype in ignore_violations )
return;
# Disable the analyzer that raised the last core-generated event. # Disable the analyzer that raised the last core-generated event.
disable_analyzer(c$id, aid); disable_analyzer(c$id, aid, F);
add c$dpd$disabled_aids[aid]; add c$dpd$disabled_aids[aid];
} }

View file

@ -174,3 +174,8 @@ signature file-lzma {
file-magic /^\x5d\x00\x00/ file-magic /^\x5d\x00\x00/
} }
# ACE archive file.
signature file-ace-archive {
file-mime "application/x-ace", 100
file-magic /^.{7}\*\*ACE\*\*/
}

View file

@ -2,7 +2,7 @@
# MPEG v3 audio # MPEG v3 audio
signature file-mpeg-audio { signature file-mpeg-audio {
file-mime "audio/mpeg", 20 file-mime "audio/mpeg", 20
file-magic /^\xff[\xe2\xe3\xf2\xf3\xf6\xf7\xfa\xfb\xfc\xfd]/ file-magic /^(ID3|\xff[\xe2\xe3\xf2\xf3\xf6\xf7\xfa\xfb\xfc\xfd])/
} }
# MPEG v4 audio # MPEG v4 audio

View file

@ -1,8 +1,14 @@
# Web Open Font Format 2
signature file-woff2 {
file-mime "application/font-woff2", 70
file-magic /^wOF2/
}
# Web Open Font Format # Web Open Font Format
signature file-woff { signature file-woff {
file-magic /^wOFF/
file-mime "application/font-woff", 70 file-mime "application/font-woff", 70
file-magic /^wOFF/
} }
# TrueType font # TrueType font

View file

@ -9,53 +9,53 @@ signature file-plaintext {
signature file-json { signature file-json {
file-mime "text/json", 1 file-mime "text/json", 1
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\{[\x0d\x0a[:blank:]]*(["][^"]{1,}["]|[a-zA-Z][a-zA-Z0-9\\_]*)[\x0d\x0a[:blank:]]*:[\x0d\x0a[:blank:]]*(["]|\[|\{|[0-9]|true|false)/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x0d\x0a[:blank:]]*\{[\x0d\x0a[:blank:]]*(["][^"]{1,}["]|[a-zA-Z][a-zA-Z0-9\\_]*)[\x0d\x0a[:blank:]]*:[\x0d\x0a[:blank:]]*(["]|\[|\{|[0-9]|true|false)/
} }
signature file-json2 { signature file-json2 {
file-mime "text/json", 1 file-mime "text/json", 1
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\[[\x0d\x0a[:blank:]]*(((["][^"]{1,}["]|[0-9]{1,}(\.[0-9]{1,})?|true|false)[\x0d\x0a[:blank:]]*,)|\{|\[)[\x0d\x0a[:blank:]]*/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x0d\x0a[:blank:]]*\[[\x0d\x0a[:blank:]]*(((["][^"]{1,}["]|[0-9]{1,}(\.[0-9]{1,})?|true|false)[\x0d\x0a[:blank:]]*,)|\{|\[)[\x0d\x0a[:blank:]]*/
} }
# Match empty JSON documents. # Match empty JSON documents.
signature file-json3 { signature file-json3 {
file-mime "text/json", 0 file-mime "text/json", 0
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*(\[\]|\{\})[\x0d\x0a[:blank:]]*$/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x0d\x0a[:blank:]]*(\[\]|\{\})[\x0d\x0a[:blank:]]*$/
} }
signature file-xml { signature file-xml {
file-mime "application/xml", 10 file-mime "application/xml", 10
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<\?xml / file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*\x00?<\x00?\?\x00?x\x00?m\x00?l\x00? \x00?/
} }
signature file-xhtml { signature file-xhtml {
file-mime "text/html", 100 file-mime "text/html", 100
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<(![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]|[hH][tT][mM][lL]|[mM][eE][tT][aA] {1,}[hH][tT][tT][pP]-[eE][qQ][uU][iI][vV])/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<(![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]|[hH][tT][mM][lL]|[mM][eE][tT][aA] {1,}[hH][tT][tT][pP]-[eE][qQ][uU][iI][vV])/
} }
signature file-html { signature file-html {
file-mime "text/html", 49 file-mime "text/html", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]/
} }
signature file-html2 { signature file-html2 {
file-mime "text/html", 20 file-mime "text/html", 20
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([hH][eE][aA][dD]|[hH][tT][mM][lL]|[tT][iI][tT][lL][eE]|[bB][oO][dD][yY])/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([hH][eE][aA][dD]|[hH][tT][mM][lL]|[tT][iI][tT][lL][eE]|[bB][oO][dD][yY])/
} }
signature file-rss { signature file-rss {
file-mime "text/rss", 90 file-mime "text/rss", 90
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[rR][sS][sS]/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[rR][sS][sS]/
} }
signature file-atom { signature file-atom {
file-mime "text/atom", 100 file-mime "text/atom", 100
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([rR][sS][sS][^>]*xmlns:atom|[fF][eE][eE][dD][^>]*xmlns=["']?http:\/\/www.w3.org\/2005\/Atom["']?)/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([rR][sS][sS][^>]*xmlns:atom|[fF][eE][eE][dD][^>]*xmlns=["']?http:\/\/www.w3.org\/2005\/Atom["']?)/
} }
signature file-soap { signature file-soap {
file-mime "application/soap+xml", 49 file-mime "application/soap+xml", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[sS][oO][aA][pP](-[eE][nN][vV])?:[eE][nN][vV][eE][lL][oO][pP][eE]/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[sS][oO][aA][pP](-[eE][nN][vV])?:[eE][nN][vV][eE][lL][oO][pP][eE]/
} }
signature file-cross-domain-policy { signature file-cross-domain-policy {
@ -70,7 +70,7 @@ signature file-cross-domain-policy2 {
signature file-xmlrpc { signature file-xmlrpc {
file-mime "application/xml-rpc", 49 file-mime "application/xml-rpc", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][eE][tT][hH][oO][dD][rR][eE][sS][pP][oO][nN][sS][eE]>/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][eE][tT][hH][oO][dD][rR][eE][sS][pP][oO][nN][sS][eE]>/
} }
signature file-coldfusion { signature file-coldfusion {
@ -81,13 +81,54 @@ signature file-coldfusion {
# Adobe Flash Media Manifest # Adobe Flash Media Manifest
signature file-f4m { signature file-f4m {
file-mime "application/f4m", 49 file-mime "application/f4m", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][aA][nN][iI][fF][eE][sS][tT][\x0d\x0a[:blank:]]{1,}xmlns=\"http:\/\/ns\.adobe\.com\/f4m\// file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][aA][nN][iI][fF][eE][sS][tT][\x0d\x0a[:blank:]]{1,}xmlns=\"http:\/\/ns\.adobe\.com\/f4m\//
}
# .ini style files
signature file-ini {
file-mime "text/ini", 20
file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x00\x0d\x0a[:blank:]]*\[[^\x0d\x0a]+\][[:blank:]\x00]*[\x0d\x0a]/
} }
# Microsoft LNK files # Microsoft LNK files
signature file-lnk { signature file-lnk {
file-mime "application/x-ms-shortcut", 49 file-mime "application/x-ms-shortcut", 49
file-magic /^\x4C\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x10\x00\x00\x00\x46/ file-magic /^\x4c\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x46/
}
# Microsoft Registry policies
signature file-pol {
file-mime "application/vnd.ms-pol", 49
file-magic /^PReg/
}
# Old style Windows registry file
signature file-reg {
file-mime "application/vnd.ms-reg", 49
file-magic /^REGEDIT4/
}
# Newer Windows registry file
signature file-reg-utf16 {
file-mime "application/vnd.ms-reg", 49
file-magic /^\xFF\xFEW\x00i\x00n\x00d\x00o\x00w\x00s\x00 \x00R\x00e\x00g\x00i\x00s\x00t\x00r\x00y\x00 \x00E\x00d\x00i\x00t\x00o\x00r\x00 \x00V\x00e\x00r\x00s\x00i\x00o\x00n\x00 \x005\x00\.\x000\x000/
}
# Microsoft Registry format (typically DESKTOP.DAT)
signature file-regf {
file-mime "application vnd.ms-regf", 49
file-magic /^\x72\x65\x67\x66/
}
# Microsoft Outlook PST files
signature file-pst {
file-mime "application/vnd.ms-outlook", 49
file-magic /!BDN......[\x0e\x0f\x15\x17][\x00-\x02]/
}
signature file-afpinfo {
file-mime "application/vnd.apple-afpinfo"
file-magic /^AFP/
} }
signature file-jar { signature file-jar {
@ -251,6 +292,104 @@ signature file-skp {
file-mime "application/skp", 100 file-mime "application/skp", 100
} }
# Microsoft DirectDraw Surface
signature file-msdds {
file-mime "application/x-ms-dds", 100
file-magic /^DDS/
}
# bsdiff output
signature file-bsdiff {
file-mime "application/bsdiff", 100
file-magic /^BSDIFF/
}
# AV Update binary diffs (mostly kaspersky)
# inferred from traffic analysis
signature file-binarydiff {
file-mime "application/bindiff", 100
file-magic /^DIFF/
}
# Kaspersky Database
# inferred from traffic analysis
signature file-kaspdb {
file-mime "application/x-kaspavdb", 100
file-magic /^KLZF/
}
# Kaspersky AV Database diff
# inferred from traffic analysis
signature file-kaspdbdif {
file-mime "application/x-kaspavupdate", 100
file-magic /^KLD2/
}
# MSSQL Backups
signature file-mssqlbak {
file-mime "application/mssql-bak", 100
file-magic /^MSSQLBAK/
}
# Microsoft Tape Format
# MSSQL transaction log
signature file-ms-tf {
file-mime "application/mtf", 100
file-magic /^TAPE/
}
# Binary property list (Apple)
signature file-bplist {
file-mime "application/bplist", 100
file-magic /^bplist0?/
}
# Microsoft Compiled HTML Help File
signature file-mshelp {
file-mime "application/mshelp", 100
file-magic /^ITSF/
}
# Blizzard game file MPQ Format
signature file-mpqgame {
file-mime "application/x-game-mpq", 100
file-magic /^MPQ\x1a/
}
# Blizzard CASC Format game file
signature file-blizgame {
file-mime "application/x-blizgame", 100
file-magic /^BLTE/
}
# iOS Mapkit tiles
# inferred from traffic analysis
signature file-mapkit-tile {
file-mime "application/map-tile", 100
file-magic /^VMP4/
}
# Google Chrome Extension file
signature file-chrome-extension {
file-mime "application/chrome-ext", 100
file-magic /^Cr24/
}
# Google Chrome Extension Update Delta
# not 100% sure about this identification
# this may be google chrome updates, not extensions
signature file-chrome-extension-update {
file-mime "application/chrome-ext-upd", 70
file-magic /^CrOD/
}
# Microsoft Message Queueing
# .net related
signature file-msqm {
file-mime "application/msqm", 100
file-magic /^MSQM/
}
signature file-elf-object { signature file-elf-object {
file-mime "application/x-object", 50 file-mime "application/x-object", 50
file-magic /\x7fELF[\x01\x02](\x01.{10}\x01\x00|\x02.{10}\x00\x01)/ file-magic /\x7fELF[\x01\x02](\x01.{10}\x01\x00|\x02.{10}\x00\x01)/
@ -270,3 +409,8 @@ signature file-elf-coredump {
file-mime "application/x-coredump", 50 file-mime "application/x-coredump", 50
file-magic /\x7fELF[\x01\x02](\x01.{10}\x04\x00|\x02.{10}\x00\x04)/ file-magic /\x7fELF[\x01\x02](\x01.{10}\x04\x00|\x02.{10}\x00\x04)/
} }
signature file-vim-tmp {
file-mime "application/x-vim-tmp", 100
file-magic /^b0VIM/
}

View file

@ -164,3 +164,9 @@ signature file-award-bios-logo {
file-mime "image/x-award-bioslogo", 50 file-mime "image/x-award-bioslogo", 50
file-magic /^\x11[\x06\x09]/ file-magic /^\x11[\x06\x09]/
} }
# WebP, lossy image format from Google
signature file-webp {
file-mime "image/webp", 70
file-magic /^RIFF.{4}WEBP/
}

View file

@ -18,7 +18,7 @@ signature file-docx {
} }
signature file-xlsx { signature file-xlsx {
file-magic /^PK\x03\x04.{26}(\[Content_Types\]\.xml|_rels\x2f\.rels|xl\2f).*PK\x03\x04.{26}xl\x2f/ file-magic /^PK\x03\x04.{26}(\[Content_Types\]\.xml|_rels\x2f\.rels|xl\x2f).*PK\x03\x04.{26}xl\x2f/
file-mime "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", 80 file-mime "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", 80
} }

View file

@ -103,6 +103,17 @@ export {
## it is skipped. ## it is skipped.
pred: function(typ: Input::Event, left: any, right: any): bool &optional; pred: function(typ: Input::Event, left: any, right: any): bool &optional;
## Error event that is raised when an information, warning or error
## is raised by the input stream. If the level is error, the stream will automatically
## be closed.
## The event receives the Input::TableDescription as the first argument, the
## message as the second argument and the Reporter::Level as the third argument.
##
## The event is raised like if it had been declared as follows:
## error_ev: function(desc: TableDescription, message: string, level: Reporter::Level) &optional;
## The actual declaration uses the ``any`` type because of deficiencies of the Bro type system.
error_ev: any &optional;
## A key/value table that will be passed to the reader. ## A key/value table that will be passed to the reader.
## Interpretation of the values is left to the reader, but ## Interpretation of the values is left to the reader, but
## usually they will be used for configuration purposes. ## usually they will be used for configuration purposes.
@ -146,6 +157,17 @@ export {
## all fields, or each field value as a separate argument). ## all fields, or each field value as a separate argument).
ev: any; ev: any;
## Error event that is raised when an information, warning or error
## is raised by the input stream. If the level is error, the stream will automatically
## be closed.
## The event receives the Input::EventDescription as the first argument, the
## message as the second argument and the Reporter::Level as the third argument.
##
## The event is raised like it had been declared as follows:
## error_ev: function(desc: EventDescription, message: string, level: Reporter::Level) &optional;
## The actual declaration uses the ``any`` type because of deficiencies of the Bro type system.
error_ev: any &optional;
## A key/value table that will be passed to the reader. ## A key/value table that will be passed to the reader.
## Interpretation of the values is left to the reader, but ## Interpretation of the values is left to the reader, but
## usually they will be used for configuration purposes. ## usually they will be used for configuration purposes.

View file

@ -1,5 +1,8 @@
@load ./main @load ./main
# File analysis framework integration.
@load ./files
# The cluster framework must be loaded first. # The cluster framework must be loaded first.
@load base/frameworks/cluster @load base/frameworks/cluster

View file

@ -1,8 +1,8 @@
##! Cluster transparency support for the intelligence framework. This is mostly ##! Cluster transparency support for the intelligence framework. This is mostly
##! oriented toward distributing intelligence information across clusters. ##! oriented toward distributing intelligence information across clusters.
@load ./main
@load base/frameworks/cluster @load base/frameworks/cluster
@load ./input
module Intel; module Intel;
@ -12,24 +12,22 @@ redef record Item += {
first_dispatch: bool &default=T; first_dispatch: bool &default=T;
}; };
# If this process is not a manager process, we don't want the full metadata # If this process is not a manager process, we don't want the full metadata.
@if ( Cluster::local_node_type() != Cluster::MANAGER ) @if ( Cluster::local_node_type() != Cluster::MANAGER )
redef have_full_data = F; redef have_full_data = F;
@endif @endif
# Internal event for cluster data distribution.
global cluster_new_item: event(item: Item); global cluster_new_item: event(item: Item);
# Primary intelligence distribution comes from manager. # Primary intelligence management is done by the manager.
redef Cluster::manager2worker_events += /^Intel::(cluster_new_item)$/; # The manager informs the workers about new items and item removal.
# If a worker finds intelligence and adds it, it should share it back to the manager. redef Cluster::manager2worker_events += /^Intel::(cluster_new_item|purge_item)$/;
redef Cluster::worker2manager_events += /^Intel::(cluster_new_item|match_no_items)$/; # A worker queries the manager to insert, remove or indicate the match of an item.
redef Cluster::worker2manager_events += /^Intel::(cluster_new_item|remove_item|match_no_items)$/;
@if ( Cluster::local_node_type() == Cluster::MANAGER ) @if ( Cluster::local_node_type() == Cluster::MANAGER )
event Intel::match_no_items(s: Seen) &priority=5 # Handling of new worker nodes.
{
event Intel::match(s, Intel::get_items(s));
}
event remote_connection_handshake_done(p: event_peer) event remote_connection_handshake_done(p: event_peer)
{ {
# When a worker connects, send it the complete minimal data store. # When a worker connects, send it the complete minimal data store.
@ -39,15 +37,22 @@ event remote_connection_handshake_done(p: event_peer)
send_id(p, "Intel::min_data_store"); send_id(p, "Intel::min_data_store");
} }
} }
@endif
event Intel::cluster_new_item(item: Intel::Item) &priority=5 # Handling of matches triggered by worker nodes.
event Intel::match_no_items(s: Seen) &priority=5
{ {
# Ignore locally generated events to avoid event storms. if ( Intel::find(s) )
if ( is_remote_event() ) event Intel::match(s, Intel::get_items(s));
Intel::insert(item);
} }
# Handling of item removal triggered by worker nodes.
event Intel::remove_item(item: Item, purge_indicator: bool)
{
remove(item, purge_indicator);
}
@endif
# Handling of item insertion.
event Intel::new_item(item: Intel::Item) &priority=5 event Intel::new_item(item: Intel::Item) &priority=5
{ {
# The cluster manager always rebroadcasts intelligence. # The cluster manager always rebroadcasts intelligence.
@ -59,3 +64,11 @@ event Intel::new_item(item: Intel::Item) &priority=5
event Intel::cluster_new_item(item); event Intel::cluster_new_item(item);
} }
} }
# Handling of item insertion by remote node.
event Intel::cluster_new_item(item: Intel::Item) &priority=5
{
# Ignore locally generated events to avoid event storms.
if ( is_remote_event() )
Intel::insert(item);
}

View file

@ -0,0 +1,69 @@
##! File analysis framework integration for the intelligence framework. This
##! script manages file information in intelligence framework data structures.
@load ./main
module Intel;
export {
## Enum type to represent various types of intelligence data.
redef enum Type += {
## File hash which is non-hash type specific. It's up to the
## user to query for any relevant hash types.
FILE_HASH,
## File name. Typically with protocols with definite
## indications of a file name.
FILE_NAME,
};
## Information about a piece of "seen" data.
redef record Seen += {
## If the data was discovered within a file, the file record
## should go here to provide context to the data.
f: fa_file &optional;
## If the data was discovered within a file, the file uid should
## go here to provide context to the data. If the file record *f*
## is provided, this will be automatically filled out.
fuid: string &optional;
};
## Record used for the logging framework representing a positive
## hit within the intelligence framework.
redef record Info += {
## If a file was associated with this intelligence hit,
## this is the uid for the file.
fuid: string &log &optional;
## A mime type if the intelligence hit is related to a file.
## If the $f field is provided this will be automatically filled
## out.
file_mime_type: string &log &optional;
## Frequently files can be "described" to give a bit more context.
## If the $f field is provided this field will be automatically
## filled out.
file_desc: string &log &optional;
};
}
# Add file information to matches if available.
hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=6
{
if ( s?$f )
{
s$fuid = s$f$id;
if ( s$f?$conns && |s$f$conns| == 1 )
{
for ( cid in s$f$conns )
s$conn = s$f$conns[cid];
}
if ( ! info?$file_mime_type && s$f?$info && s$f$info?$mime_type )
info$file_mime_type = s$f$info$mime_type;
if ( ! info?$file_desc )
info$file_desc = Files::describe(s$f);
}
if ( s?$fuid )
info$fuid = s$fuid;
}

View file

@ -1,11 +1,14 @@
##! Input handling for the intelligence framework. This script implements the
##! import of intelligence data from files using the input framework.
@load ./main @load ./main
module Intel; module Intel;
export { export {
## Intelligence files that will be read off disk. The files are ## Intelligence files that will be read off disk. The files are
## reread every time they are updated so updates must be atomic with ## reread every time they are updated so updates must be atomic
## "mv" instead of writing the file in place. ## with "mv" instead of writing the file in place.
const read_files: set[string] = {} &redef; const read_files: set[string] = {} &redef;
} }

View file

@ -1,7 +1,7 @@
##! The intelligence framework provides a way to store and query IP addresses, ##! The intelligence framework provides a way to store and query intelligence
##! and strings (with a str_type). Metadata can ##! data (e.g. IP addresses, URLs and hashes). The intelligence items can be
##! also be associated with the intelligence, like for making more informed ##! associated with metadata to allow informed decisions about matching and
##! decisions about matching and handling of intelligence. ##! handling.
@load base/frameworks/notice @load base/frameworks/notice
@ -14,6 +14,8 @@ export {
type Type: enum { type Type: enum {
## An IP address. ## An IP address.
ADDR, ADDR,
## A subnet in CIDR notation.
SUBNET,
## A complete URL without the prefix ``"http://"``. ## A complete URL without the prefix ``"http://"``.
URL, URL,
## Software name. ## Software name.
@ -24,24 +26,20 @@ export {
DOMAIN, DOMAIN,
## A user name. ## A user name.
USER_NAME, USER_NAME,
## File hash which is non-hash type specific. It's up to the
## user to query for any relevant hash types.
FILE_HASH,
## File name. Typically with protocols with definite
## indications of a file name.
FILE_NAME,
## Certificate SHA-1 hash. ## Certificate SHA-1 hash.
CERT_HASH, CERT_HASH,
## Public key MD5 hash. (SSH server host keys are a good example.) ## Public key MD5 hash. (SSH server host keys are a good example.)
PUBKEY_HASH, PUBKEY_HASH,
}; };
## Set of intelligence data types.
type TypeSet: set[Type];
## Data about an :bro:type:`Intel::Item`. ## Data about an :bro:type:`Intel::Item`.
type MetaData: record { type MetaData: record {
## An arbitrary string value representing the data source. ## An arbitrary string value representing the data source. This
## Typically, the convention for this field will be the source ## value is used as unique key to identify a metadata record in
## name and feed name separated by a hyphen. ## the scope of a single intelligence item.
## For example: "source1-c&c".
source: string; source: string;
## A freeform description for the data. ## A freeform description for the data.
desc: string &optional; desc: string &optional;
@ -57,7 +55,7 @@ export {
## The type of data that the indicator field represents. ## The type of data that the indicator field represents.
indicator_type: Type; indicator_type: Type;
## Metadata for the item. Typically represents more deeply ## Metadata for the item. Typically represents more deeply
## descriptive data for a piece of intelligence. ## descriptive data for a piece of intelligence.
meta: MetaData; meta: MetaData;
}; };
@ -77,23 +75,25 @@ export {
## The type of data that the indicator represents. ## The type of data that the indicator represents.
indicator_type: Type &log &optional; indicator_type: Type &log &optional;
## If the indicator type was :bro:enum:`Intel::ADDR`, then this ## If the indicator type was :bro:enum:`Intel::ADDR`, then this
## field will be present. ## field will be present.
host: addr &optional; host: addr &optional;
## Where the data was discovered. ## Where the data was discovered.
where: Where &log; where: Where &log;
## The name of the node where the match was discovered. ## The name of the node where the match was discovered.
node: string &optional &log; node: string &optional &log;
## If the data was discovered within a connection, the ## If the data was discovered within a connection, the
## connection record should go here to give context to the data. ## connection record should go here to give context to the data.
conn: connection &optional; conn: connection &optional;
## If the data was discovered within a file, the file record ## If the data was discovered within a connection, the
## should go here to provide context to the data. ## connection uid should go here to give context to the data.
f: fa_file &optional; ## If the *conn* field is provided, this will be automatically
## filled out.
uid: string &optional;
}; };
## Record used for the logging framework representing a positive ## Record used for the logging framework representing a positive
@ -109,40 +109,70 @@ export {
## this is the conn_id for the connection. ## this is the conn_id for the connection.
id: conn_id &log &optional; id: conn_id &log &optional;
## If a file was associated with this intelligence hit,
## this is the uid for the file.
fuid: string &log &optional;
## A mime type if the intelligence hit is related to a file.
## If the $f field is provided this will be automatically filled
## out.
file_mime_type: string &log &optional;
## Frequently files can be "described" to give a bit more context.
## If the $f field is provided this field will be automatically
## filled out.
file_desc: string &log &optional;
## Where the data was seen. ## Where the data was seen.
seen: Seen &log; seen: Seen &log;
## Which indicator types matched.
matched: TypeSet &log;
## Sources which supplied data that resulted in this match. ## Sources which supplied data that resulted in this match.
sources: set[string] &log &default=string_set(); sources: set[string] &log &default=string_set();
}; };
## Intelligence data manipulation function. ## Function to insert intelligence data. If the indicator is already
## present, the associated metadata will be added to the indicator. If
## the indicator already contains a metadata record from the same source,
## the existing metadata record will be updated.
global insert: function(item: Item); global insert: function(item: Item);
## Function to remove intelligence data. If purge_indicator is set, the
## given metadata is ignored and the indicator is removed completely.
global remove: function(item: Item, purge_indicator: bool &default = F);
## Function to declare discovery of a piece of data in order to check ## Function to declare discovery of a piece of data in order to check
## it against known intelligence for matches. ## it against known intelligence for matches.
global seen: function(s: Seen); global seen: function(s: Seen);
## Event to represent a match in the intelligence data from data that ## Event to represent a match in the intelligence data from data that
## was seen. On clusters there is no assurance as to where this event ## was seen. On clusters there is no assurance as to when this event
## will be generated so do not assume that arbitrary global state beyond ## will be generated so do not assume that arbitrary global state beyond
## the given data will be available. ## the given data will be available.
## ##
## This is the primary mechanism where a user will take actions based on ## This is the primary mechanism where a user may take actions based on
## data within the intelligence framework. ## data provided by the intelligence framework.
global match: event(s: Seen, items: set[Item]); global match: event(s: Seen, items: set[Item]);
## This hook can be used to influence the logging of intelligence hits
## (e.g. by adding data to the Info record). The default information is
## added with a priority of 5.
##
## info: The Info record that will be logged.
##
## s: Information about the data seen.
##
## items: The intel items that match the seen data.
##
## In case the hook execution is terminated using break, the match will
## not be logged.
global extend_match: hook(info: Info, s: Seen, items: set[Item]);
## The expiration timeout for intelligence items. Once an item expires, the
## :bro:id:`Intel::item_expired` hook is called. Reinsertion of an item
## resets the timeout. A negative value disables expiration of intelligence
## items.
const item_expiration = -1 min &redef;
## This hook can be used to handle expiration of intelligence items.
##
## indicator: The indicator of the expired item.
##
## indicator_type: The indicator type of the expired item.
##
## metas: The set of metadata describing the expired item.
##
## If all hook handlers are executed, the expiration timeout will be reset.
## Otherwise, if one of the handlers terminates using break, the item will
## be removed.
global item_expired: hook(indicator: string, indicator_type: Type, metas: set[MetaData]);
global log_intel: event(rec: Info); global log_intel: event(rec: Info);
} }
@ -151,16 +181,26 @@ global match_no_items: event(s: Seen);
# Internal events for cluster data distribution. # Internal events for cluster data distribution.
global new_item: event(item: Item); global new_item: event(item: Item);
global updated_item: event(item: Item); global remove_item: event(item: Item, purge_indicator: bool);
global purge_item: event(item: Item);
# Optionally store metadata. This is used internally depending on # Optionally store metadata. This is used internally depending on
# if this is a cluster deployment or not. # if this is a cluster deployment or not.
const have_full_data = T &redef; const have_full_data = T &redef;
# Table of metadata, indexed by source string.
type MetaDataTable: table[string] of MetaData;
# Expiration handlers.
global expire_host_data: function(data: table[addr] of MetaDataTable, idx: addr): interval;
global expire_subnet_data: function(data: table[subnet] of MetaDataTable, idx: subnet): interval;
global expire_string_data: function(data: table[string, Type] of MetaDataTable, idx: any): interval;
# The in memory data structure for holding intelligence. # The in memory data structure for holding intelligence.
type DataStore: record { type DataStore: record {
host_data: table[addr] of set[MetaData]; host_data: table[addr] of MetaDataTable &write_expire=item_expiration &expire_func=expire_host_data;
string_data: table[string, Type] of set[MetaData]; subnet_data: table[subnet] of MetaDataTable &write_expire=item_expiration &expire_func=expire_subnet_data;
string_data: table[string, Type] of MetaDataTable &write_expire=item_expiration &expire_func=expire_string_data;
}; };
global data_store: DataStore &redef; global data_store: DataStore &redef;
@ -169,6 +209,7 @@ global data_store: DataStore &redef;
# a minimal amount of data for the full match to happen on the manager. # a minimal amount of data for the full match to happen on the manager.
type MinDataStore: record { type MinDataStore: record {
host_data: set[addr]; host_data: set[addr];
subnet_data: set[subnet];
string_data: set[string, Type]; string_data: set[string, Type];
}; };
global min_data_store: MinDataStore &redef; global min_data_store: MinDataStore &redef;
@ -179,33 +220,78 @@ event bro_init() &priority=5
Log::create_stream(LOG, [$columns=Info, $ev=log_intel, $path="intel"]); Log::create_stream(LOG, [$columns=Info, $ev=log_intel, $path="intel"]);
} }
# Function that abstracts expiration of different types.
function expire_item(indicator: string, indicator_type: Type, metas: set[MetaData]): interval
{
if ( hook item_expired(indicator, indicator_type, metas) )
return item_expiration;
else
remove([$indicator=indicator, $indicator_type=indicator_type, $meta=[$source=""]], T);
return 0 sec;
}
# Expiration handler definitions.
function expire_host_data(data: table[addr] of MetaDataTable, idx: addr): interval
{
local meta_tbl: MetaDataTable = data[idx];
local metas: set[MetaData];
for ( src in meta_tbl )
add metas[meta_tbl[src]];
return expire_item(cat(idx), ADDR, metas);
}
function expire_subnet_data(data: table[subnet] of MetaDataTable, idx: subnet): interval
{
local meta_tbl: MetaDataTable = data[idx];
local metas: set[MetaData];
for ( src in meta_tbl )
add metas[meta_tbl[src]];
return expire_item(cat(idx), ADDR, metas);
}
function expire_string_data(data: table[string, Type] of MetaDataTable, idx: any): interval
{
local indicator: string;
local indicator_type: Type;
[indicator, indicator_type] = idx;
local meta_tbl: MetaDataTable = data[indicator, indicator_type];
local metas: set[MetaData];
for ( src in meta_tbl )
add metas[meta_tbl[src]];
return expire_item(indicator, indicator_type, metas);
}
# Function to check for intelligence hits.
function find(s: Seen): bool function find(s: Seen): bool
{ {
local ds = have_full_data ? data_store : min_data_store;
if ( s?$host ) if ( s?$host )
{ {
return ((s$host in min_data_store$host_data) || return ((s$host in ds$host_data) ||
(have_full_data && s$host in data_store$host_data)); (|matching_subnets(addr_to_subnet(s$host), ds$subnet_data)| > 0));
}
else if ( ([to_lower(s$indicator), s$indicator_type] in min_data_store$string_data) ||
(have_full_data && [to_lower(s$indicator), s$indicator_type] in data_store$string_data) )
{
return T;
} }
else else
{ {
return F; return ([to_lower(s$indicator), s$indicator_type] in ds$string_data);
} }
} }
# Function to retrieve intelligence items while abstracting from different
# data stores for different indicator types.
function get_items(s: Seen): set[Item] function get_items(s: Seen): set[Item]
{ {
local return_data: set[Item]; local return_data: set[Item];
local mt: MetaDataTable;
if ( ! have_full_data ) if ( ! have_full_data )
{ {
# A reporter warning should be generated here because this function Reporter::warning(fmt("Intel::get_items was called from a host (%s) that doesn't have the full data.",
# should never be called from a host that doesn't have the full data. peer_description));
# TODO: do a reporter warning.
return return_data; return return_data;
} }
@ -214,11 +300,23 @@ function get_items(s: Seen): set[Item]
# See if the host is known about and it has meta values # See if the host is known about and it has meta values
if ( s$host in data_store$host_data ) if ( s$host in data_store$host_data )
{ {
for ( m in data_store$host_data[s$host] ) mt = data_store$host_data[s$host];
for ( m in mt )
{ {
add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=m)]; add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=mt[m])];
} }
} }
# See if the host is part of a known subnet, which has meta values
local nets: table[subnet] of MetaDataTable;
nets = filter_subnet_table(addr_to_subnet(s$host), data_store$subnet_data);
for ( n in nets )
{
mt = nets[n];
for ( m in mt )
{
add return_data[Item($indicator=cat(n), $indicator_type=SUBNET, $meta=mt[m])];
}
}
} }
else else
{ {
@ -226,9 +324,10 @@ function get_items(s: Seen): set[Item]
# See if the string is known about and it has meta values # See if the string is known about and it has meta values
if ( [lower_indicator, s$indicator_type] in data_store$string_data ) if ( [lower_indicator, s$indicator_type] in data_store$string_data )
{ {
for ( m in data_store$string_data[lower_indicator, s$indicator_type] ) mt = data_store$string_data[lower_indicator, s$indicator_type];
for ( m in mt )
{ {
add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=m)]; add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=mt[m])];
} }
} }
} }
@ -263,59 +362,39 @@ function Intel::seen(s: Seen)
} }
} }
function has_meta(check: MetaData, metas: set[MetaData]): bool
{
local check_hash = md5_hash(check);
for ( m in metas )
{
if ( check_hash == md5_hash(m) )
return T;
}
# The records must not be equivalent if we made it this far.
return F;
}
event Intel::match(s: Seen, items: set[Item]) &priority=5 event Intel::match(s: Seen, items: set[Item]) &priority=5
{ {
local info = Info($ts=network_time(), $seen=s); local info = Info($ts=network_time(), $seen=s, $matched=TypeSet());
if ( s?$f ) if ( hook extend_match(info, s, items) )
{ Log::write(Intel::LOG, info);
if ( s$f?$conns && |s$f$conns| == 1 ) }
{
for ( cid in s$f$conns )
s$conn = s$f$conns[cid];
}
if ( ! info?$fuid )
info$fuid = s$f$id;
if ( ! info?$file_mime_type && s$f?$info && s$f$info?$mime_type )
info$file_mime_type = s$f$info$mime_type;
if ( ! info?$file_desc )
info$file_desc = Files::describe(s$f);
}
hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=5
{
# Add default information to matches.
if ( s?$conn ) if ( s?$conn )
{ {
info$uid = s$conn$uid; s$uid = s$conn$uid;
info$id = s$conn$id; info$id = s$conn$id;
} }
for ( item in items ) if ( s?$uid )
add info$sources[item$meta$source]; info$uid = s$uid;
Log::write(Intel::LOG, info); for ( item in items )
{
add info$sources[item$meta$source];
add info$matched[item$indicator_type];
}
} }
function insert(item: Item) function insert(item: Item)
{ {
# Create and fill out the meta data item. # Create and fill out the metadata item.
local meta = item$meta; local meta = item$meta;
local metas: set[MetaData]; local meta_tbl: table [string] of MetaData;
local is_new: bool = T;
# All intelligence is case insensitive at the moment. # All intelligence is case insensitive at the moment.
local lower_indicator = to_lower(item$indicator); local lower_indicator = to_lower(item$indicator);
@ -326,51 +405,157 @@ function insert(item: Item)
if ( have_full_data ) if ( have_full_data )
{ {
if ( host !in data_store$host_data ) if ( host !in data_store$host_data )
data_store$host_data[host] = set(); data_store$host_data[host] = table();
else
is_new = F;
metas = data_store$host_data[host]; meta_tbl = data_store$host_data[host];
} }
add min_data_store$host_data[host]; add min_data_store$host_data[host];
} }
else if ( item$indicator_type == SUBNET )
{
local net = to_subnet(item$indicator);
if ( have_full_data )
{
if ( !check_subnet(net, data_store$subnet_data) )
data_store$subnet_data[net] = table();
else
is_new = F;
meta_tbl = data_store$subnet_data[net];
}
add min_data_store$subnet_data[net];
}
else else
{ {
if ( have_full_data ) if ( have_full_data )
{ {
if ( [lower_indicator, item$indicator_type] !in data_store$string_data ) if ( [lower_indicator, item$indicator_type] !in data_store$string_data )
data_store$string_data[lower_indicator, item$indicator_type] = set(); data_store$string_data[lower_indicator, item$indicator_type] = table();
else
is_new = F;
metas = data_store$string_data[lower_indicator, item$indicator_type]; meta_tbl = data_store$string_data[lower_indicator, item$indicator_type];
} }
add min_data_store$string_data[lower_indicator, item$indicator_type]; add min_data_store$string_data[lower_indicator, item$indicator_type];
} }
local updated = F;
if ( have_full_data ) if ( have_full_data )
{ {
for ( m in metas ) # Insert new metadata or update if already present
{ meta_tbl[meta$source] = meta;
if ( meta$source == m$source )
{
if ( has_meta(meta, metas) )
{
# It's the same item being inserted again.
return;
}
else
{
# Same source, different metadata means updated item.
updated = T;
}
}
}
add metas[item$meta];
} }
if ( updated ) if ( is_new )
event Intel::updated_item(item); # Trigger insert for cluster in case the item is new
else # or insert was called on a worker
event Intel::new_item(item); event Intel::new_item(item);
} }
# Function to check whether an item is present.
function item_exists(item: Item): bool
{
local ds = have_full_data ? data_store : min_data_store;
switch ( item$indicator_type )
{
case ADDR:
return to_addr(item$indicator) in ds$host_data;
case SUBNET:
return to_subnet(item$indicator) in ds$subnet_data;
default:
return [item$indicator, item$indicator_type] in ds$string_data;
}
}
# Function to remove metadata of an item. The function returns T
# if there is no metadata left for the given indicator.
function remove_meta_data(item: Item): bool
{
if ( ! have_full_data )
{
Reporter::warning(fmt("Intel::remove_meta_data was called from a host (%s) that doesn't have the full data.",
peer_description));
return F;
}
switch ( item$indicator_type )
{
case ADDR:
local host = to_addr(item$indicator);
delete data_store$host_data[host][item$meta$source];
return (|data_store$host_data[host]| == 0);
case SUBNET:
local net = to_subnet(item$indicator);
delete data_store$subnet_data[net][item$meta$source];
return (|data_store$subnet_data[net]| == 0);
default:
delete data_store$string_data[item$indicator, item$indicator_type][item$meta$source];
return (|data_store$string_data[item$indicator, item$indicator_type]| == 0);
}
}
function remove(item: Item, purge_indicator: bool)
{
# Check whether the indicator is present
if ( ! item_exists(item) )
{
Reporter::info(fmt("Tried to remove non-existing item '%s' (%s).",
item$indicator, item$indicator_type));
return;
}
# Delegate removal if we are on a worker
if ( !have_full_data )
{
event Intel::remove_item(item, purge_indicator);
return;
}
# Remove metadata from manager's data store
local no_meta_data = remove_meta_data(item);
# Remove whole indicator if necessary
if ( no_meta_data || purge_indicator )
{
switch ( item$indicator_type )
{
case ADDR:
local host = to_addr(item$indicator);
delete data_store$host_data[host];
break;
case SUBNET:
local net = to_subnet(item$indicator);
delete data_store$subnet_data[net];
break;
default:
delete data_store$string_data[item$indicator, item$indicator_type];
break;
}
# Trigger deletion in minimal data stores
event Intel::purge_item(item);
}
}
# Handling of indicator removal in minimal data stores.
event purge_item(item: Item)
{
switch ( item$indicator_type )
{
case ADDR:
local host = to_addr(item$indicator);
delete min_data_store$host_data[host];
break;
case SUBNET:
local net = to_subnet(item$indicator);
delete min_data_store$subnet_data[net];
break;
default:
delete min_data_store$string_data[item$indicator, item$indicator_type];
break;
}
}

Some files were not shown because too many files have changed in this diff Show more