Merge remote-tracking branch 'origin/master' into J-Gras-topic/jgras/bit-1507

# Conflicts:
#	testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log
This commit is contained in:
Seth Hall 2016-06-15 10:32:46 -04:00
commit 6bc7c3f1be
548 changed files with 22229 additions and 3843 deletions

645
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -88,7 +88,7 @@ endif ()
include_directories(BEFORE include_directories(BEFORE
${PCAP_INCLUDE_DIR} ${PCAP_INCLUDE_DIR}
${OpenSSL_INCLUDE_DIR} ${OPENSSL_INCLUDE_DIR}
${BIND_INCLUDE_DIR} ${BIND_INCLUDE_DIR}
${BinPAC_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR} ${ZLIB_INCLUDE_DIR}
@ -141,7 +141,7 @@ endif ()
set(brodeps set(brodeps
${BinPAC_LIBRARY} ${BinPAC_LIBRARY}
${PCAP_LIBRARY} ${PCAP_LIBRARY}
${OpenSSL_LIBRARIES} ${OPENSSL_LIBRARIES}
${BIND_LIBRARY} ${BIND_LIBRARY}
${ZLIB_LIBRARY} ${ZLIB_LIBRARY}
${JEMALLOC_LIBRARIES} ${JEMALLOC_LIBRARIES}

68
NEWS
View file

@ -23,11 +23,30 @@ New Dependencies
New Functionality New Functionality
----------------- -----------------
- Bro now includes the NetControl framework. The framework allows for easy
interaction of Bro with hard- and software switches, firewalls, etc.
- There is a new file entropy analyzer for files.
- Bro now supports the remote framebuffer protocol (RFB) that is used by
VNC servers for remote graphical displays.
- Bro now supports the Radiotap header for 802.11 frames. - Bro now supports the Radiotap header for 802.11 frames.
- Bro now has rudimentary IMAP and XMPP analyzers examinig the initial
phases of the protocol. Right now these analyzer only identify
STARTTLS sessions, handing them over to TLS analysis. The analyzer
does not yet analyze any further IMAP/XMPP content.
- Bro now tracks VLAN IDs. To record them inside the connection log, - Bro now tracks VLAN IDs. To record them inside the connection log,
load protocols/conn/vlan-logging.bro. load protocols/conn/vlan-logging.bro.
- The new misc/stats.bro records Bro executions statistics in a
standard Bro log file.
- A new dns_CAA_reply event gives access to DNS Certification Authority
Authorization replies.
- A new per-packet event raw_packet() provides access to layer 2 - A new per-packet event raw_packet() provides access to layer 2
information. Use with care, generating events per packet is information. Use with care, generating events per packet is
expensive. expensive.
@ -37,12 +56,51 @@ New Functionality
argument that will be used for decoding errors into weird.log argument that will be used for decoding errors into weird.log
(instead of reporter.log). (instead of reporter.log).
- A new get_current_packet_header bif returns the headers of the current
packet.
- Two new built-in functions for handling set[subnet] and table[subnet]:
- check_subnet(subnet, table) checks if a specific subnet is a member
of a set/table. This is different from the "in" operator, which always
performs a longest prefix match.
- matching_subnets(subnet, table) returns all subnets of the set or table
that contain the given subnet.
- filter_subnet_table(subnet, table) works like check_subnet, but returns
a table containing all matching entries.
- Several built-in functions for handling IP addresses and subnets were added:
- is_v4_subnet(subnet) checks whether a subnet specification is IPv4.
- is_v6_subnet(subnet) checks whether a subnet specification is IPv6.
- addr_to_subnet(addr) converts an IP address to a /32 subnet.
- subnet_to_addr(subnet) returns the IP address part of a subnet.
- subnet_width(subnet) returns the width of a subnet.
- The IRC analyzer now recognizes StartTLS sessions and enable the SSL - The IRC analyzer now recognizes StartTLS sessions and enable the SSL
analyzer for them. analyzer for them.
- A set of new built-in function for gathering execution statistics:
get_net_stats(), get_conn_stats(), get_proc_stats(),
get_event_stats(), get_reassembler_stats(), get_dns_stats(),
get_timer_stats(), get_file_analysis_stats(), get_thread_stats(),
get_gap_stats(), get_matcher_stats(),
- Two new functions haversine_distance() and haversine_distance_ip()
for calculating geographic distances. They requires that Bro be
built with libgeoip.
- New Bro plugins in aux/plugins: - New Bro plugins in aux/plugins:
- af_packet: Native AF_PACKET support. - af_packet: Native AF_PACKET support.
- kafka : Log writer interfacing to Kafka.
- myricom: Native Myricom SNF v3 support. - myricom: Native Myricom SNF v3 support.
- pf_ring: Native PF_RING support. - pf_ring: Native PF_RING support.
- redis: An experimental log writer for Redis. - redis: An experimental log writer for Redis.
@ -51,6 +109,16 @@ New Functionality
Changed Functionality Changed Functionality
--------------------- ---------------------
- The BrokerComm and BrokerStore namespaces were renamed to Broker.
The Broker "print" function was renamed to Broker::send_print, and
"event" to "Broker::send_event".
- ``SSH::skip_processing_after_detection`` was removed. The functionality was
replaced by ``SSH::disable_analyzer_after_detection``.
- ``net_stats()`` and ``resource_usage()`` have been superseded by the
new execution statistics functions (see above).
- Some script-level identifier have changed their names: - Some script-level identifier have changed their names:
snaplen -> Pcap::snaplen snaplen -> Pcap::snaplen

View file

@ -1 +1 @@
2.4-253 2.4-613

@ -1 +1 @@
Subproject commit 2edf0a58854ca5bdb444e74ec8cbac0fafbd42f4 Subproject commit 97df41aa79344faadaf075f7fa673b87ecbc6f77

@ -1 +1 @@
Subproject commit f5da34fb4fbe00a683697e9052cffdd7d804f8c1 Subproject commit 4ba16fa2fcd59d90ea497965f77655d2111bc9e8

@ -1 +1 @@
Subproject commit 0880251535df5a3a16feb2b25c26a04aa52585f1 Subproject commit 2592077f96008f5c64b23b6fd605bfce3ec47d84

@ -1 +1 @@
Subproject commit 5d765dd9d94eb25b31d1ecf8df6561fc714694fc Subproject commit 214682a9d4b238dc55d7ecfa7c127c3aaad750d4

@ -1 +1 @@
Subproject commit 5c90543dee9212121d08e6aa630fb81dd5133df7 Subproject commit a4f81f79cfc0d0fe3fe435d33217f5bf9c2279e1

@ -1 +1 @@
Subproject commit 71a1e3efc437aa9f981be71affa1c4615e8d98a5 Subproject commit 4bea8fa948be2bc86ff92399137131bc1c029b08

@ -1 +1 @@
Subproject commit 1021ca5f248b9da01766e94d840896e029fb0e6e Subproject commit ebab672fa404b26944a6df6fbfb1aaab95ec5d48

View file

@ -14,12 +14,18 @@
/* We are on a Linux system */ /* We are on a Linux system */
#cmakedefine HAVE_LINUX #cmakedefine HAVE_LINUX
/* We are on a Mac OS X (Darwin) system */
#cmakedefine HAVE_DARWIN
/* Define if you have the `mallinfo' function. */ /* Define if you have the `mallinfo' function. */
#cmakedefine HAVE_MALLINFO #cmakedefine HAVE_MALLINFO
/* Define if you have the <memory.h> header file. */ /* Define if you have the <memory.h> header file. */
#cmakedefine HAVE_MEMORY_H #cmakedefine HAVE_MEMORY_H
/* Define if you have the <netinet/ether.h> header file */
#cmakedefine HAVE_NETINET_ETHER_H
/* Define if you have the <netinet/if_ether.h> header file. */ /* Define if you have the <netinet/if_ether.h> header file. */
#cmakedefine HAVE_NETINET_IF_ETHER_H #cmakedefine HAVE_NETINET_IF_ETHER_H

2
cmake

@ -1 +1 @@
Subproject commit 23773d7107e8d51e2b1bb0fd2e2d85fda50df743 Subproject commit b8b4604f362aa8d4b64e589cbea499a0c041ef24

10
configure vendored
View file

@ -57,7 +57,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-flex=PATH path to flex executable --with-flex=PATH path to flex executable
--with-bison=PATH path to bison executable --with-bison=PATH path to bison executable
--with-python=PATH path to Python executable --with-python=PATH path to Python executable
--with-libcaf=PATH path to C++ Actor Framework installation --with-caf=PATH path to C++ Actor Framework installation
(a required Broker dependency) (a required Broker dependency)
Optional Packages in Non-Standard Locations: Optional Packages in Non-Standard Locations:
@ -226,7 +226,7 @@ while [ $# -ne 0 ]; do
append_cache_entry DISABLE_RUBY_BINDINGS BOOL false append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
;; ;;
--with-openssl=*) --with-openssl=*)
append_cache_entry OpenSSL_ROOT_DIR PATH $optarg append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
;; ;;
--with-bind=*) --with-bind=*)
append_cache_entry BIND_ROOT_DIR PATH $optarg append_cache_entry BIND_ROOT_DIR PATH $optarg
@ -276,8 +276,12 @@ while [ $# -ne 0 ]; do
--with-swig=*) --with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg append_cache_entry SWIG_EXECUTABLE PATH $optarg
;; ;;
--with-caf=*)
append_cache_entry CAF_ROOT_DIR PATH $optarg
;;
--with-libcaf=*) --with-libcaf=*)
append_cache_entry LIBCAF_ROOT_DIR PATH $optarg echo "warning: --with-libcaf deprecated, use --with-caf instead"
append_cache_entry CAF_ROOT_DIR PATH $optarg
;; ;;
--with-rocksdb=*) --with-rocksdb=*)
append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg

View file

@ -96,13 +96,13 @@ logging is done remotely to the manager, and normally very little is written
to disk. to disk.
The rule of thumb we have followed recently is to allocate approximately 1 The rule of thumb we have followed recently is to allocate approximately 1
core for every 80Mbps of traffic that is being analyzed. However, this core for every 250Mbps of traffic that is being analyzed. However, this
estimate could be extremely traffic mix-specific. It has generally worked estimate could be extremely traffic mix-specific. It has generally worked
for mixed traffic with many users and servers. For example, if your traffic for mixed traffic with many users and servers. For example, if your traffic
peaks around 2Gbps (combined) and you want to handle traffic at peak load, peaks around 2Gbps (combined) and you want to handle traffic at peak load,
you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps you may want to have 8 cores available (2048 / 250 == 8.2). If the 250Mbps
estimate works for your traffic, this could be handled by 3 physical hosts estimate works for your traffic, this could be handled by 2 physical hosts
dedicated to being workers with each one containing dual 6-core processors. dedicated to being workers with each one containing a quad-core processor.
Once a flow-based load balancer is put into place this model is extremely Once a flow-based load balancer is put into place this model is extremely
easy to scale. It is recommended that you estimate the amount of easy to scale. It is recommended that you estimate the amount of

View file

@ -0,0 +1 @@
../../../../aux/plugins/kafka/README

View file

@ -66,7 +66,7 @@ master_doc = 'index'
# General information about the project. # General information about the project.
project = u'Bro' project = u'Bro'
copyright = u'2013, The Bro Project' copyright = u'2016, The Bro Project'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the

View file

@ -17,20 +17,20 @@ Connecting to Peers
=================== ===================
Communication via Broker must first be turned on via Communication via Broker must first be turned on via
:bro:see:`BrokerComm::enable`. :bro:see:`Broker::enable`.
Bro can accept incoming connections by calling :bro:see:`BrokerComm::listen` Bro can accept incoming connections by calling :bro:see:`Broker::listen`
and then monitor connection status updates via the and then monitor connection status updates via the
:bro:see:`BrokerComm::incoming_connection_established` and :bro:see:`Broker::incoming_connection_established` and
:bro:see:`BrokerComm::incoming_connection_broken` events. :bro:see:`Broker::incoming_connection_broken` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
Bro can initiate outgoing connections by calling :bro:see:`BrokerComm::connect` Bro can initiate outgoing connections by calling :bro:see:`Broker::connect`
and then monitor connection status updates via the and then monitor connection status updates via the
:bro:see:`BrokerComm::outgoing_connection_established`, :bro:see:`Broker::outgoing_connection_established`,
:bro:see:`BrokerComm::outgoing_connection_broken`, and :bro:see:`Broker::outgoing_connection_broken`, and
:bro:see:`BrokerComm::outgoing_connection_incompatible` events. :bro:see:`Broker::outgoing_connection_incompatible` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
@ -38,14 +38,14 @@ Remote Printing
=============== ===============
To receive remote print messages, first use the To receive remote print messages, first use the
:bro:see:`BrokerComm::subscribe_to_prints` function to advertise to peers a :bro:see:`Broker::subscribe_to_prints` function to advertise to peers a
topic prefix of interest and then create an event handler for topic prefix of interest and then create an event handler for
:bro:see:`BrokerComm::print_handler` to handle any print messages that are :bro:see:`Broker::print_handler` to handle any print messages that are
received. received.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro
To send remote print messages, just call :bro:see:`BrokerComm::print`. To send remote print messages, just call :bro:see:`Broker::send_print`.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro
@ -69,14 +69,14 @@ Remote Events
============= =============
Receiving remote events is similar to remote prints. Just use the Receiving remote events is similar to remote prints. Just use the
:bro:see:`BrokerComm::subscribe_to_events` function and possibly define any :bro:see:`Broker::subscribe_to_events` function and possibly define any
new events along with handlers that peers may want to send. new events along with handlers that peers may want to send.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
There are two different ways to send events. The first is to call the There are two different ways to send events. The first is to call the
:bro:see:`BrokerComm::event` function directly. The second option is to call :bro:see:`Broker::send_event` function directly. The second option is to call
the :bro:see:`BrokerComm::auto_event` function where you specify a the :bro:see:`Broker::auto_event` function where you specify a
particular event that will be automatically sent to peers whenever the particular event that will be automatically sent to peers whenever the
event is called locally via the normal event invocation syntax. event is called locally via the normal event invocation syntax.
@ -104,14 +104,14 @@ Remote Logging
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
Use the :bro:see:`BrokerComm::subscribe_to_logs` function to advertise interest Use the :bro:see:`Broker::subscribe_to_logs` function to advertise interest
in logs written by peers. The topic names that Bro uses are implicitly of the in logs written by peers. The topic names that Bro uses are implicitly of the
form "bro/log/<stream-name>". form "bro/log/<stream-name>".
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
To send remote logs either redef :bro:see:`Log::enable_remote_logging` or To send remote logs either redef :bro:see:`Log::enable_remote_logging` or
use the :bro:see:`BrokerComm::enable_remote_logs` function. The former use the :bro:see:`Broker::enable_remote_logs` function. The former
allows any log stream to be sent to peers while the latter enables remote allows any log stream to be sent to peers while the latter enables remote
logging for particular streams. logging for particular streams.
@ -137,24 +137,24 @@ Tuning Access Control
By default, endpoints do not restrict the message topics that it sends By default, endpoints do not restrict the message topics that it sends
to peers and do not restrict what message topics and data store to peers and do not restrict what message topics and data store
identifiers get advertised to peers. These are the default identifiers get advertised to peers. These are the default
:bro:see:`BrokerComm::EndpointFlags` supplied to :bro:see:`BrokerComm::enable`. :bro:see:`Broker::EndpointFlags` supplied to :bro:see:`Broker::enable`.
If not using the ``auto_publish`` flag, one can use the If not using the ``auto_publish`` flag, one can use the
:bro:see:`BrokerComm::publish_topic` and :bro:see:`BrokerComm::unpublish_topic` :bro:see:`Broker::publish_topic` and :bro:see:`Broker::unpublish_topic`
functions to manipulate the set of message topics (must match exactly) functions to manipulate the set of message topics (must match exactly)
that are allowed to be sent to peer endpoints. These settings take that are allowed to be sent to peer endpoints. These settings take
precedence over the per-message ``peers`` flag supplied to functions precedence over the per-message ``peers`` flag supplied to functions
that take a :bro:see:`BrokerComm::SendFlags` such as :bro:see:`BrokerComm::print`, that take a :bro:see:`Broker::SendFlags` such as :bro:see:`Broker::send_print`,
:bro:see:`BrokerComm::event`, :bro:see:`BrokerComm::auto_event` or :bro:see:`Broker::send_event`, :bro:see:`Broker::auto_event` or
:bro:see:`BrokerComm::enable_remote_logs`. :bro:see:`Broker::enable_remote_logs`.
If not using the ``auto_advertise`` flag, one can use the If not using the ``auto_advertise`` flag, one can use the
:bro:see:`BrokerComm::advertise_topic` and :bro:see:`Broker::advertise_topic` and
:bro:see:`BrokerComm::unadvertise_topic` functions :bro:see:`Broker::unadvertise_topic` functions
to manipulate the set of topic prefixes that are allowed to be to manipulate the set of topic prefixes that are allowed to be
advertised to peers. If an endpoint does not advertise a topic prefix, then advertised to peers. If an endpoint does not advertise a topic prefix, then
the only way peers can send messages to it is via the ``unsolicited`` the only way peers can send messages to it is via the ``unsolicited``
flag of :bro:see:`BrokerComm::SendFlags` and choosing a topic with a matching flag of :bro:see:`Broker::SendFlags` and choosing a topic with a matching
prefix (i.e. full topic may be longer than receivers prefix, just the prefix (i.e. full topic may be longer than receivers prefix, just the
prefix needs to match). prefix needs to match).
@ -192,8 +192,8 @@ last modification time.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro .. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro
In the above example, if a local copy of the store contents isn't In the above example, if a local copy of the store contents isn't
needed, just replace the :bro:see:`BrokerStore::create_clone` call with needed, just replace the :bro:see:`Broker::create_clone` call with
:bro:see:`BrokerStore::create_frontend`. Queries will then be made against :bro:see:`Broker::create_frontend`. Queries will then be made against
the remote master store instead of the local clone. the remote master store instead of the local clone.
Note that all data store queries must be made within Bro's asynchronous Note that all data store queries must be made within Bro's asynchronous

View file

@ -1,18 +1,18 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
terminate(); terminate();
} }

View file

@ -1,20 +1,20 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event BrokerComm::incoming_connection_broken(peer_name: string) event Broker::incoming_connection_broken(peer_name: string)
{ {
print "BrokerComm::incoming_connection_broken", peer_name; print "Broker::incoming_connection_broken", peer_name;
terminate(); terminate();
} }

View file

@ -1,30 +1,30 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
global my_event: event(msg: string, c: count); global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count); global my_auto_event: event(msg: string, c: count);
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
BrokerComm::auto_event("bro/event/my_auto_event", my_auto_event); Broker::auto_event("bro/event/my_auto_event", my_auto_event);
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "hi", 0)); Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "hi", 0));
event my_auto_event("stuff", 88); event my_auto_event("stuff", 88);
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "...", 1)); Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "...", 1));
event my_auto_event("more stuff", 51); event my_auto_event("more stuff", 51);
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "bye", 2)); Broker::send_event("bro/event/my_event", Broker::event_args(my_event, "bye", 2));
} }
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();

View file

@ -1,20 +1,20 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
global msg_count = 0; global msg_count = 0;
global my_event: event(msg: string, c: count); global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count); global my_auto_event: event(msg: string, c: count);
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_events("bro/event/"); Broker::subscribe_to_events("bro/event/");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event my_event(msg: string, c: count) event my_event(msg: string, c: count)

View file

@ -2,16 +2,16 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
redef Log::enable_local_logging = F; redef Log::enable_local_logging = F;
redef Log::enable_remote_logging = F; redef Log::enable_remote_logging = F;
global n = 0; global n = 0;
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::enable_remote_logs(Test::LOG); Broker::enable_remote_logs(Test::LOG);
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event do_write() event do_write()
@ -24,16 +24,16 @@ event do_write()
event do_write(); event do_write();
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
event do_write(); event do_write();
} }
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();

View file

@ -2,18 +2,18 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_logs("bro/log/Test::LOG"); Broker::subscribe_to_logs("bro/log/Test::LOG");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event Test::log_test(rec: Test::Info) event Test::log_test(rec: Test::Info)

View file

@ -1,25 +1,25 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector"; redef Broker::endpoint_name = "connector";
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec); Broker::connect("127.0.0.1", broker_port, 1sec);
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
print "BrokerComm::outgoing_connection_established", print "Broker::outgoing_connection_established",
peer_address, peer_port, peer_name; peer_address, peer_port, peer_name;
BrokerComm::print("bro/print/hi", "hello"); Broker::send_print("bro/print/hi", "hello");
BrokerComm::print("bro/print/stuff", "..."); Broker::send_print("bro/print/stuff", "...");
BrokerComm::print("bro/print/bye", "goodbye"); Broker::send_print("bro/print/bye", "goodbye");
} }
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();

View file

@ -1,21 +1,21 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener"; redef Broker::endpoint_name = "listener";
global msg_count = 0; global msg_count = 0;
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_prints("bro/print/"); Broker::subscribe_to_prints("bro/print/");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }
event BrokerComm::incoming_connection_established(peer_name: string) event Broker::incoming_connection_established(peer_name: string)
{ {
print "BrokerComm::incoming_connection_established", peer_name; print "Broker::incoming_connection_established", peer_name;
} }
event BrokerComm::print_handler(msg: string) event Broker::print_handler(msg: string)
{ {
++msg_count; ++msg_count;
print "got print message", msg; print "got print message", msg;

View file

@ -1,42 +1,42 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
global h: opaque of BrokerStore::Handle; global h: opaque of Broker::Handle;
function dv(d: BrokerComm::Data): BrokerComm::DataVector function dv(d: Broker::Data): Broker::DataVector
{ {
local rval: BrokerComm::DataVector; local rval: Broker::DataVector;
rval[0] = d; rval[0] = d;
return rval; return rval;
} }
global ready: event(); global ready: event();
event BrokerComm::outgoing_connection_broken(peer_address: string, event Broker::outgoing_connection_broken(peer_address: string,
peer_port: port) peer_port: port)
{ {
terminate(); terminate();
} }
event BrokerComm::outgoing_connection_established(peer_address: string, event Broker::outgoing_connection_established(peer_address: string,
peer_port: port, peer_port: port,
peer_name: string) peer_name: string)
{ {
local myset: set[string] = {"a", "b", "c"}; local myset: set[string] = {"a", "b", "c"};
local myvec: vector of string = {"alpha", "beta", "gamma"}; local myvec: vector of string = {"alpha", "beta", "gamma"};
h = BrokerStore::create_master("mystore"); h = Broker::create_master("mystore");
BrokerStore::insert(h, BrokerComm::data("one"), BrokerComm::data(110)); Broker::insert(h, Broker::data("one"), Broker::data(110));
BrokerStore::insert(h, BrokerComm::data("two"), BrokerComm::data(223)); Broker::insert(h, Broker::data("two"), Broker::data(223));
BrokerStore::insert(h, BrokerComm::data("myset"), BrokerComm::data(myset)); Broker::insert(h, Broker::data("myset"), Broker::data(myset));
BrokerStore::insert(h, BrokerComm::data("myvec"), BrokerComm::data(myvec)); Broker::insert(h, Broker::data("myvec"), Broker::data(myvec));
BrokerStore::increment(h, BrokerComm::data("one")); Broker::increment(h, Broker::data("one"));
BrokerStore::decrement(h, BrokerComm::data("two")); Broker::decrement(h, Broker::data("two"));
BrokerStore::add_to_set(h, BrokerComm::data("myset"), BrokerComm::data("d")); Broker::add_to_set(h, Broker::data("myset"), Broker::data("d"));
BrokerStore::remove_from_set(h, BrokerComm::data("myset"), BrokerComm::data("b")); Broker::remove_from_set(h, Broker::data("myset"), Broker::data("b"));
BrokerStore::push_left(h, BrokerComm::data("myvec"), dv(BrokerComm::data("delta"))); Broker::push_left(h, Broker::data("myvec"), dv(Broker::data("delta")));
BrokerStore::push_right(h, BrokerComm::data("myvec"), dv(BrokerComm::data("omega"))); Broker::push_right(h, Broker::data("myvec"), dv(Broker::data("omega")));
when ( local res = BrokerStore::size(h) ) when ( local res = Broker::size(h) )
{ {
print "master size", res; print "master size", res;
event ready(); event ready();
@ -47,7 +47,7 @@ event BrokerComm::outgoing_connection_established(peer_address: string,
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1secs); Broker::connect("127.0.0.1", broker_port, 1secs);
BrokerComm::auto_event("bro/event/ready", ready); Broker::auto_event("bro/event/ready", ready);
} }

View file

@ -1,13 +1,13 @@
const broker_port: port = 9999/tcp &redef; const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T; redef exit_only_after_terminate = T;
global h: opaque of BrokerStore::Handle; global h: opaque of Broker::Handle;
global expected_key_count = 4; global expected_key_count = 4;
global key_count = 0; global key_count = 0;
function do_lookup(key: string) function do_lookup(key: string)
{ {
when ( local res = BrokerStore::lookup(h, BrokerComm::data(key)) ) when ( local res = Broker::lookup(h, Broker::data(key)) )
{ {
++key_count; ++key_count;
print "lookup", key, res; print "lookup", key, res;
@ -21,15 +21,15 @@ function do_lookup(key: string)
event ready() event ready()
{ {
h = BrokerStore::create_clone("mystore"); h = Broker::create_clone("mystore");
when ( local res = BrokerStore::keys(h) ) when ( local res = Broker::keys(h) )
{ {
print "clone keys", res; print "clone keys", res;
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 0))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 0)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 1))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 1)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 2))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 2)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 3))); do_lookup(Broker::refine_to_string(Broker::vector_lookup(res$result, 3)));
} }
timeout 10sec timeout 10sec
{ print "timeout"; } { print "timeout"; }
@ -37,7 +37,7 @@ event ready()
event bro_init() event bro_init()
{ {
BrokerComm::enable(); Broker::enable();
BrokerComm::subscribe_to_events("bro/event/ready"); Broker::subscribe_to_events("bro/event/ready");
BrokerComm::listen(broker_port, "127.0.0.1"); Broker::listen(broker_port, "127.0.0.1");
} }

View file

@ -13,6 +13,6 @@ export {
event bro_init() &priority=5 event bro_init() &priority=5
{ {
BrokerComm::enable(); Broker::enable();
Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]); Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]);
} }

View file

@ -83,9 +83,9 @@ The hook :bro:see:`Notice::policy` provides the mechanism for applying
actions and generally modifying the notice before it's sent onward to actions and generally modifying the notice before it's sent onward to
the action plugins. Hooks can be thought of as multi-bodied functions the action plugins. Hooks can be thought of as multi-bodied functions
and using them looks very similar to handling events. The difference and using them looks very similar to handling events. The difference
is that they don't go through the event queue like events. Users should is that they don't go through the event queue like events. Users can
directly make modifications to the :bro:see:`Notice::Info` record alter notice processing by directly modifying fields in the
given as the argument to the hook. :bro:see:`Notice::Info` record given as the argument to the hook.
Here's a simple example which tells Bro to send an email for all notices of Here's a simple example which tells Bro to send an email for all notices of
type :bro:see:`SSH::Password_Guessing` if the guesser attempted to log in to type :bro:see:`SSH::Password_Guessing` if the guesser attempted to log in to

View file

@ -75,6 +75,21 @@ To install the required dependencies, you can use:
Note that in older versions of FreeBSD, you might have to use the Note that in older versions of FreeBSD, you might have to use the
"pkg_add -r" command instead of "pkg install". "pkg_add -r" command instead of "pkg install".
For older versions of FreeBSD (especially FreeBSD 9.x), the system compiler
is not new enough to compile Bro. For these systems, you will have to install
a newer compiler using pkg; the ``clang34`` package should work.
You will also have to define several environment variables on these older
systems to use the new compiler and headers similar to this before calling
configure:
.. console::
export CC=clang34
export CXX=clang++34
export CXXFLAGS="-stdlib=libc++ -I${LOCALBASE}/include/c++/v1 -L${LOCALBASE}/lib"
export LDFLAGS="-pthread"
* Mac OS X: * Mac OS X:
Compiling source code on Macs requires first installing Xcode_ (in older Compiling source code on Macs requires first installing Xcode_ (in older

View file

@ -39,6 +39,8 @@ Network Protocols
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| rdp.log | RDP | :bro:type:`RDP::Info` | | rdp.log | RDP | :bro:type:`RDP::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| rfb.log | Remote Framebuffer (RFB) | :bro:type:`RFB::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| sip.log | SIP | :bro:type:`SIP::Info` | | sip.log | SIP | :bro:type:`SIP::Info` |
+----------------------------+---------------------------------------+---------------------------------+ +----------------------------+---------------------------------------+---------------------------------+
| smtp.log | SMTP transactions | :bro:type:`SMTP::Info` | | smtp.log | SMTP transactions | :bro:type:`SMTP::Info` |

View file

@ -277,16 +277,25 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: delete .. bro:keyword:: delete
The "delete" statement is used to remove an element from a The "delete" statement is used to remove an element from a
:bro:type:`set` or :bro:type:`table`. Nothing happens if the :bro:type:`set` or :bro:type:`table`, or to remove a value from
specified element does not exist in the set or table. a :bro:type:`record` field that has the :bro:attr:`&optional` attribute.
When attempting to remove an element from a set or table,
nothing happens if the specified index does not exist.
When attempting to remove a value from an "&optional" record field,
nothing happens if that field doesn't have a value.
Example:: Example::
local myset = set("this", "test"); local myset = set("this", "test");
local mytable = table(["key1"] = 80/tcp, ["key2"] = 53/udp); local mytable = table(["key1"] = 80/tcp, ["key2"] = 53/udp);
local myrec = MyRecordType($a = 1, $b = 2);
delete myset["test"]; delete myset["test"];
delete mytable["key1"]; delete mytable["key1"];
# In this example, "b" must have the "&optional" attribute
delete myrec$b;
.. bro:keyword:: event .. bro:keyword:: event
The "event" statement immediately queues invocation of an event handler. The "event" statement immediately queues invocation of an event handler.
@ -306,30 +315,33 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: for .. bro:keyword:: for
A "for" loop iterates over each element in a string, set, vector, or A "for" loop iterates over each element in a string, set, vector, or
table and executes a statement for each iteration. Currently, table and executes a statement for each iteration (note that the order
modifying a container's membership while iterating over it may in which the loop iterates over the elements in a set or a table is
result in undefined behavior, so avoid adding or removing elements nondeterministic). However, no loop iterations occur if the string,
inside the loop. set, vector, or table is empty.
For each iteration of the loop, a loop variable will be assigned to an For each iteration of the loop, a loop variable will be assigned to an
element if the expression evaluates to a string or set, or an index if element if the expression evaluates to a string or set, or an index if
the expression evaluates to a vector or table. Then the statement the expression evaluates to a vector or table. Then the statement
is executed. However, the statement will not be executed if the expression is executed.
evaluates to an object with no elements.
If the expression is a table or a set with more than one index, then the If the expression is a table or a set with more than one index, then the
loop variable must be specified as a comma-separated list of different loop variable must be specified as a comma-separated list of different
loop variables (one for each index), enclosed in brackets. loop variables (one for each index), enclosed in brackets.
A :bro:keyword:`break` statement can be used at any time to immediately
terminate the "for" loop, and a :bro:keyword:`next` statement can be
used to skip to the next loop iteration.
Note that the loop variable in a "for" statement is not allowed to be Note that the loop variable in a "for" statement is not allowed to be
a global variable, and it does not need to be declared prior to the "for" a global variable, and it does not need to be declared prior to the "for"
statement. The type will be inferred from the elements of the statement. The type will be inferred from the elements of the
expression. expression.
Currently, modifying a container's membership while iterating over it may
result in undefined behavior, so do not add or remove elements
inside the loop.
A :bro:keyword:`break` statement will immediately terminate the "for"
loop, and a :bro:keyword:`next` statement will skip to the next loop
iteration.
Example:: Example::
local myset = set(80/tcp, 81/tcp); local myset = set(80/tcp, 81/tcp);
@ -532,8 +544,6 @@ Here are the statements that the Bro scripting language supports.
end with either a :bro:keyword:`break`, :bro:keyword:`fallthrough`, or end with either a :bro:keyword:`break`, :bro:keyword:`fallthrough`, or
:bro:keyword:`return` statement (although "return" is allowed only :bro:keyword:`return` statement (although "return" is allowed only
if the "switch" statement is inside a function, hook, or event handler). if the "switch" statement is inside a function, hook, or event handler).
If a "case" (or "default") block contain more than one statement, then
there is no need to wrap them in braces.
Note that the braces in a "switch" statement are always required (these Note that the braces in a "switch" statement are always required (these
do not indicate the presence of a `compound statement`_), and that no do not indicate the presence of a `compound statement`_), and that no
@ -604,12 +614,9 @@ Here are the statements that the Bro scripting language supports.
if ( skip_ahead() ) if ( skip_ahead() )
next; next;
[...]
if ( finish_up ) if ( finish_up )
break; break;
[...]
} }
.. _compound statement: .. _compound statement:

View file

@ -0,0 +1,25 @@
module Conn;
export {
## The record type which contains column fields of the connection log.
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
proto: transport_proto &log;
service: string &log &optional;
duration: interval &log &optional;
orig_bytes: count &log &optional;
resp_bytes: count &log &optional;
conn_state: string &log &optional;
local_orig: bool &log &optional;
local_resp: bool &log &optional;
missed_bytes: count &log &default=0;
history: string &log &optional;
orig_pkts: count &log &optional;
orig_ip_bytes: count &log &optional;
resp_pkts: count &log &optional;
resp_ip_bytes: count &log &optional;
tunnel_parents: set[string] &log;
};
}

View file

@ -0,0 +1,7 @@
module HTTP;
export {
## This setting changes if passwords used in Basic-Auth are captured or
## not.
const default_capture_password = F &redef;
}

View file

@ -362,8 +362,7 @@ decrypted from HTTP streams is stored in
:bro:see:`HTTP::default_capture_password` as shown in the stripped down :bro:see:`HTTP::default_capture_password` as shown in the stripped down
excerpt from :doc:`/scripts/base/protocols/http/main.bro` below. excerpt from :doc:`/scripts/base/protocols/http/main.bro` below.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/http/main.bro .. btest-include:: ${DOC_ROOT}/scripting/http_main.bro
:lines: 9-11,20-22,125
Because the constant was declared with the ``&redef`` attribute, if we Because the constant was declared with the ``&redef`` attribute, if we
needed to turn this option on globally, we could do so by adding the needed to turn this option on globally, we could do so by adding the
@ -776,7 +775,7 @@ string against which it will be tested to be on the right.
In the sample above, two local variables are declared to hold our In the sample above, two local variables are declared to hold our
sample sentence and regular expression. Our regular expression in sample sentence and regular expression. Our regular expression in
this case will return true if the string contains either the word this case will return true if the string contains either the word
``quick`` or the word ``fox``. The ``if`` statement in the script uses ``quick`` or the word ``lazy``. The ``if`` statement in the script uses
embedded matching and the ``in`` operator to check for the existence embedded matching and the ``in`` operator to check for the existence
of the pattern within the string. If the statement resolves to true, of the pattern within the string. If the statement resolves to true,
:bro:id:`split` is called to break the string into separate pieces. :bro:id:`split` is called to break the string into separate pieces.
@ -825,8 +824,7 @@ example of the ``record`` data type in the earlier sections, the
:bro:type:`Conn::Info`, which corresponds to the fields logged into :bro:type:`Conn::Info`, which corresponds to the fields logged into
``conn.log``, is shown by the excerpt below. ``conn.log``, is shown by the excerpt below.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/conn/main.bro .. btest-include:: ${DOC_ROOT}/scripting/data_type_record.bro
:lines: 10-12,16-17,19,21,23,25,28,31,35,38,57,63,69,75,98,101,105,108,112,116-117,122
Looking at the structure of the definition, a new collection of data Looking at the structure of the definition, a new collection of data
types is being defined as a type called ``Info``. Since this type types is being defined as a type called ``Info``. Since this type

View file

@ -6,6 +6,7 @@ module X509;
export { export {
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
## The record type which contains the fields of the X.509 log.
type Info: record { type Info: record {
## Current timestamp. ## Current timestamp.
ts: time &log; ts: time &log;

View file

@ -1 +1,2 @@
@load ./main @load ./main
@load ./store

View file

@ -1,11 +1,20 @@
##! Various data structure definitions for use with Bro's communication system. ##! Various data structure definitions for use with Bro's communication system.
module BrokerComm; module Log;
export {
type Log::ID: enum {
## Dummy place-holder.
UNKNOWN
};
}
module Broker;
export { export {
## A name used to identify this endpoint to peers. ## A name used to identify this endpoint to peers.
## .. bro:see:: BrokerComm::connect BrokerComm::listen ## .. bro:see:: Broker::connect Broker::listen
const endpoint_name = "" &redef; const endpoint_name = "" &redef;
## Change communication behavior. ## Change communication behavior.
@ -32,11 +41,11 @@ export {
## Opaque communication data. ## Opaque communication data.
type Data: record { type Data: record {
d: opaque of BrokerComm::Data &optional; d: opaque of Broker::Data &optional;
}; };
## Opaque communication data. ## Opaque communication data.
type DataVector: vector of BrokerComm::Data; type DataVector: vector of Broker::Data;
## Opaque event communication data. ## Opaque event communication data.
type EventArgs: record { type EventArgs: record {
@ -49,55 +58,318 @@ export {
## Opaque communication data used as a convenient way to wrap key-value ## Opaque communication data used as a convenient way to wrap key-value
## pairs that comprise table entries. ## pairs that comprise table entries.
type TableItem : record { type TableItem : record {
key: BrokerComm::Data; key: Broker::Data;
val: BrokerComm::Data; val: Broker::Data;
}; };
## Enable use of communication.
##
## flags: used to tune the local Broker endpoint behavior.
##
## Returns: true if communication is successfully initialized.
global enable: function(flags: EndpointFlags &default = EndpointFlags()): bool;
## Changes endpoint flags originally supplied to :bro:see:`Broker::enable`.
##
## flags: the new endpoint behavior flags to use.
##
## Returns: true if flags were changed.
global set_endpoint_flags: function(flags: EndpointFlags &default = EndpointFlags()): bool;
## Allow sending messages to peers if associated with the given topic.
## This has no effect if auto publication behavior is enabled via the flags
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
##
## topic: a topic to allow messages to be published under.
##
## Returns: true if successful.
global publish_topic: function(topic: string): bool;
## Disallow sending messages to peers if associated with the given topic.
## This has no effect if auto publication behavior is enabled via the flags
## supplied to :bro:see:`Broker::enable` or :bro:see:`Broker::set_endpoint_flags`.
##
## topic: a topic to disallow messages to be published under.
##
## Returns: true if successful.
global unpublish_topic: function(topic: string): bool;
## Listen for remote connections.
##
## p: the TCP port to listen on.
##
## a: an address string on which to accept connections, e.g.
## "127.0.0.1". An empty string refers to @p INADDR_ANY.
##
## reuse: equivalent to behavior of SO_REUSEADDR.
##
## Returns: true if the local endpoint is now listening for connections.
##
## .. bro:see:: Broker::incoming_connection_established
global listen: function(p: port, a: string &default = "", reuse: bool &default = T): bool;
## Initiate a remote connection.
##
## a: an address to connect to, e.g. "localhost" or "127.0.0.1".
##
## p: the TCP port on which the remote side is listening.
##
## retry: an interval at which to retry establishing the
## connection with the remote peer if it cannot be made initially, or
## if it ever becomes disconnected.
##
## Returns: true if it's possible to try connecting with the peer and
## it's a new peer. The actual connection may not be established
## until a later point in time.
##
## .. bro:see:: Broker::outgoing_connection_established
global connect: function(a: string, p: port, retry: interval): bool;
## Remove a remote connection.
##
## a: the address used in previous successful call to :bro:see:`Broker::connect`.
##
## p: the port used in previous successful call to :bro:see:`Broker::connect`.
##
## Returns: true if the arguments match a previously successful call to
## :bro:see:`Broker::connect`.
global disconnect: function(a: string, p: port): bool;
## Print a simple message to any interested peers. The receiver can use
## :bro:see:`Broker::print_handler` to handle messages.
##
## topic: a topic associated with the printed message.
##
## msg: the print message to send to peers.
##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if the message is sent.
global send_print: function(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool;
## Register interest in all peer print messages that use a certain topic
## prefix. Use :bro:see:`Broker::print_handler` to handle received
## messages.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new print subscription and it is now registered.
global subscribe_to_prints: function(topic_prefix: string): bool;
## Unregister interest in all peer print messages that use a topic prefix.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_prints`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_prints: function(topic_prefix: string): bool;
## Send an event to any interested peers.
##
## topic: a topic associated with the event message.
##
## args: event arguments as made by :bro:see:`Broker::event_args`.
##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if the message is sent.
global send_event: function(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool;
## Automatically send an event to any interested peers whenever it is
## locally dispatched (e.g. using "event my_event(...);" in a script).
##
## topic: a topic string associated with the event message.
## Peers advertise interest by registering a subscription to some
## prefix of this topic name.
##
## ev: a Bro event value.
##
## flags: tune the behavior of how the message is sent.
##
## Returns: true if automatic event sending is now enabled.
global auto_event: function(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool;
## Stop automatically sending an event to peers upon local dispatch.
##
## topic: a topic originally given to :bro:see:`Broker::auto_event`.
##
## ev: an event originally given to :bro:see:`Broker::auto_event`.
##
## Returns: true if automatic events will not occur for the topic/event
## pair.
global auto_event_stop: function(topic: string, ev: any): bool;
## Register interest in all peer event messages that use a certain topic
## prefix.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new event subscription and it is now registered.
global subscribe_to_events: function(topic_prefix: string): bool;
## Unregister interest in all peer event messages that use a topic prefix.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_events`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_events: function(topic_prefix: string): bool;
## Enable remote logs for a given log stream.
##
## id: the log stream to enable remote logs for.
##
## flags: tune the behavior of how log entry messages are sent.
##
## Returns: true if remote logs are enabled for the stream.
global enable_remote_logs: function(id: Log::ID, flags: SendFlags &default = SendFlags()): bool;
## Disable remote logs for a given log stream.
##
## id: the log stream to disable remote logs for.
##
## Returns: true if remote logs are disabled for the stream.
global disable_remote_logs: function(id: Log::ID): bool;
## Check if remote logs are enabled for a given log stream.
##
## id: the log stream to check.
##
## Returns: true if remote logs are enabled for the given stream.
global remote_logs_enabled: function(id: Log::ID): bool;
## Register interest in all peer log messages that use a certain topic
## prefix. Logs are implicitly sent with topic "bro/log/<stream-name>" and
## the receiving side processes them through the logging framework as usual.
##
## topic_prefix: a prefix to match against remote message topics.
## e.g. an empty prefix matches everything and "a" matches
## "alice" and "amy" but not "bob".
##
## Returns: true if it's a new log subscription and it is now registered.
global subscribe_to_logs: function(topic_prefix: string): bool;
## Unregister interest in all peer log messages that use a topic prefix.
## Logs are implicitly sent with topic "bro/log/<stream-name>" and the
## receiving side processes them through the logging framework as usual.
##
## topic_prefix: a prefix previously supplied to a successful call to
## :bro:see:`Broker::subscribe_to_logs`.
##
## Returns: true if interest in the topic prefix is no longer advertised.
global unsubscribe_to_logs: function(topic_prefix: string): bool;
} }
module BrokerStore; @load base/bif/comm.bif
@load base/bif/messaging.bif
export { module Broker;
## Whether a data store query could be completed or not. @ifdef ( Broker::__enable )
type QueryStatus: enum {
SUCCESS,
FAILURE,
};
## An expiry time for a key-value pair inserted in to a data store. function enable(flags: EndpointFlags &default = EndpointFlags()) : bool
type ExpiryTime: record { {
## Absolute point in time at which to expire the entry. return __enable(flags);
absolute: time &optional;
## A point in time relative to the last modification time at which
## to expire the entry. New modifications will delay the expiration.
since_last_modification: interval &optional;
};
## The result of a data store query.
type QueryResult: record {
## Whether the query completed or not.
status: BrokerStore::QueryStatus;
## The result of the query. Certain queries may use a particular
## data type (e.g. querying store size always returns a count, but
## a lookup may return various data types).
result: BrokerComm::Data;
};
## Options to tune the SQLite storage backend.
type SQLiteOptions: record {
## File system path of the database.
path: string &default = "store.sqlite";
};
## Options to tune the RocksDB storage backend.
type RocksDBOptions: record {
## File system path of the database.
path: string &default = "store.rocksdb";
};
## Options to tune the particular storage backends.
type BackendOptions: record {
sqlite: SQLiteOptions &default = SQLiteOptions();
rocksdb: RocksDBOptions &default = RocksDBOptions();
};
} }
function set_endpoint_flags(flags: EndpointFlags &default = EndpointFlags()): bool
{
return __set_endpoint_flags(flags);
}
function publish_topic(topic: string): bool
{
return __publish_topic(topic);
}
function unpublish_topic(topic: string): bool
{
return __unpublish_topic(topic);
}
function listen(p: port, a: string &default = "", reuse: bool &default = T): bool
{
return __listen(p, a, reuse);
}
function connect(a: string, p: port, retry: interval): bool
{
return __connect(a, p, retry);
}
function disconnect(a: string, p: port): bool
{
return __disconnect(a, p);
}
function send_print(topic: string, msg: string, flags: SendFlags &default = SendFlags()): bool
{
return __send_print(topic, msg, flags);
}
function subscribe_to_prints(topic_prefix: string): bool
{
return __subscribe_to_prints(topic_prefix);
}
function unsubscribe_to_prints(topic_prefix: string): bool
{
return __unsubscribe_to_prints(topic_prefix);
}
function send_event(topic: string, args: EventArgs, flags: SendFlags &default = SendFlags()): bool
{
return __event(topic, args, flags);
}
function auto_event(topic: string, ev: any, flags: SendFlags &default = SendFlags()): bool
{
return __auto_event(topic, ev, flags);
}
function auto_event_stop(topic: string, ev: any): bool
{
return __auto_event_stop(topic, ev);
}
function subscribe_to_events(topic_prefix: string): bool
{
return __subscribe_to_events(topic_prefix);
}
function unsubscribe_to_events(topic_prefix: string): bool
{
return __unsubscribe_to_events(topic_prefix);
}
function enable_remote_logs(id: Log::ID, flags: SendFlags &default = SendFlags()): bool
{
return __enable_remote_logs(id, flags);
}
function disable_remote_logs(id: Log::ID): bool
{
return __disable_remote_logs(id);
}
function remote_logs_enabled(id: Log::ID): bool
{
return __remote_logs_enabled(id);
}
function subscribe_to_logs(topic_prefix: string): bool
{
return __subscribe_to_logs(topic_prefix);
}
function unsubscribe_to_logs(topic_prefix: string): bool
{
return __unsubscribe_to_logs(topic_prefix);
}
@endif

File diff suppressed because it is too large Load diff

View file

@ -68,7 +68,7 @@ export {
## Events raised by TimeMachine instances and handled by workers. ## Events raised by TimeMachine instances and handled by workers.
const tm2worker_events = /EMPTY/ &redef; const tm2worker_events = /EMPTY/ &redef;
## Events sent by the control host (i.e. BroControl) when dynamically ## Events sent by the control host (i.e., BroControl) when dynamically
## connecting to a running instance to update settings or request data. ## connecting to a running instance to update settings or request data.
const control_events = Control::controller_events &redef; const control_events = Control::controller_events &redef;

View file

@ -174,3 +174,8 @@ signature file-lzma {
file-magic /^\x5d\x00\x00/ file-magic /^\x5d\x00\x00/
} }
# ACE archive file.
signature file-ace-archive {
file-mime "application/x-ace", 100
file-magic /^.{7}\*\*ACE\*\*/
}

View file

@ -2,7 +2,7 @@
# MPEG v3 audio # MPEG v3 audio
signature file-mpeg-audio { signature file-mpeg-audio {
file-mime "audio/mpeg", 20 file-mime "audio/mpeg", 20
file-magic /^\xff[\xe2\xe3\xf2\xf3\xf6\xf7\xfa\xfb\xfc\xfd]/ file-magic /^(ID3|\xff[\xe2\xe3\xf2\xf3\xf6\xf7\xfa\xfb\xfc\xfd])/
} }
# MPEG v4 audio # MPEG v4 audio

View file

@ -9,53 +9,53 @@ signature file-plaintext {
signature file-json { signature file-json {
file-mime "text/json", 1 file-mime "text/json", 1
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\{[\x0d\x0a[:blank:]]*(["][^"]{1,}["]|[a-zA-Z][a-zA-Z0-9\\_]*)[\x0d\x0a[:blank:]]*:[\x0d\x0a[:blank:]]*(["]|\[|\{|[0-9]|true|false)/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x0d\x0a[:blank:]]*\{[\x0d\x0a[:blank:]]*(["][^"]{1,}["]|[a-zA-Z][a-zA-Z0-9\\_]*)[\x0d\x0a[:blank:]]*:[\x0d\x0a[:blank:]]*(["]|\[|\{|[0-9]|true|false)/
} }
signature file-json2 { signature file-json2 {
file-mime "text/json", 1 file-mime "text/json", 1
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\[[\x0d\x0a[:blank:]]*(((["][^"]{1,}["]|[0-9]{1,}(\.[0-9]{1,})?|true|false)[\x0d\x0a[:blank:]]*,)|\{|\[)[\x0d\x0a[:blank:]]*/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x0d\x0a[:blank:]]*\[[\x0d\x0a[:blank:]]*(((["][^"]{1,}["]|[0-9]{1,}(\.[0-9]{1,})?|true|false)[\x0d\x0a[:blank:]]*,)|\{|\[)[\x0d\x0a[:blank:]]*/
} }
# Match empty JSON documents. # Match empty JSON documents.
signature file-json3 { signature file-json3 {
file-mime "text/json", 0 file-mime "text/json", 0
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*(\[\]|\{\})[\x0d\x0a[:blank:]]*$/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x0d\x0a[:blank:]]*(\[\]|\{\})[\x0d\x0a[:blank:]]*$/
} }
signature file-xml { signature file-xml {
file-mime "application/xml", 10 file-mime "application/xml", 10
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<\?xml / file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*\x00?<\x00?\?\x00?x\x00?m\x00?l\x00? \x00?/
} }
signature file-xhtml { signature file-xhtml {
file-mime "text/html", 100 file-mime "text/html", 100
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<(![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]|[hH][tT][mM][lL]|[mM][eE][tT][aA] {1,}[hH][tT][tT][pP]-[eE][qQ][uU][iI][vV])/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<(![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]|[hH][tT][mM][lL]|[mM][eE][tT][aA] {1,}[hH][tT][tT][pP]-[eE][qQ][uU][iI][vV])/
} }
signature file-html { signature file-html {
file-mime "text/html", 49 file-mime "text/html", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]/
} }
signature file-html2 { signature file-html2 {
file-mime "text/html", 20 file-mime "text/html", 20
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([hH][eE][aA][dD]|[hH][tT][mM][lL]|[tT][iI][tT][lL][eE]|[bB][oO][dD][yY])/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([hH][eE][aA][dD]|[hH][tT][mM][lL]|[tT][iI][tT][lL][eE]|[bB][oO][dD][yY])/
} }
signature file-rss { signature file-rss {
file-mime "text/rss", 90 file-mime "text/rss", 90
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[rR][sS][sS]/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[rR][sS][sS]/
} }
signature file-atom { signature file-atom {
file-mime "text/atom", 100 file-mime "text/atom", 100
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([rR][sS][sS][^>]*xmlns:atom|[fF][eE][eE][dD][^>]*xmlns=["']?http:\/\/www.w3.org\/2005\/Atom["']?)/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([rR][sS][sS][^>]*xmlns:atom|[fF][eE][eE][dD][^>]*xmlns=["']?http:\/\/www.w3.org\/2005\/Atom["']?)/
} }
signature file-soap { signature file-soap {
file-mime "application/soap+xml", 49 file-mime "application/soap+xml", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[sS][oO][aA][pP](-[eE][nN][vV])?:[eE][nN][vV][eE][lL][oO][pP][eE]/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[sS][oO][aA][pP](-[eE][nN][vV])?:[eE][nN][vV][eE][lL][oO][pP][eE]/
} }
signature file-cross-domain-policy { signature file-cross-domain-policy {
@ -70,7 +70,7 @@ signature file-cross-domain-policy2 {
signature file-xmlrpc { signature file-xmlrpc {
file-mime "application/xml-rpc", 49 file-mime "application/xml-rpc", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][eE][tT][hH][oO][dD][rR][eE][sS][pP][oO][nN][sS][eE]>/ file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][eE][tT][hH][oO][dD][rR][eE][sS][pP][oO][nN][sS][eE]>/
} }
signature file-coldfusion { signature file-coldfusion {
@ -81,7 +81,13 @@ signature file-coldfusion {
# Adobe Flash Media Manifest # Adobe Flash Media Manifest
signature file-f4m { signature file-f4m {
file-mime "application/f4m", 49 file-mime "application/f4m", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][aA][nN][iI][fF][eE][sS][tT][\x0d\x0a[:blank:]]{1,}xmlns=\"http:\/\/ns\.adobe\.com\/f4m\// file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][aA][nN][iI][fF][eE][sS][tT][\x0d\x0a[:blank:]]{1,}xmlns=\"http:\/\/ns\.adobe\.com\/f4m\//
}
# .ini style files
signature file-ini {
file-mime "text/ini", 20
file-magic /^(\xef\xbb\xbf|\xff\xfe|\xfe\xff)?[\x00\x0d\x0a[:blank:]]*\[[^\x0d\x0a]+\][[:blank:]\x00]*[\x0d\x0a]/
} }
# Microsoft LNK files # Microsoft LNK files
@ -90,6 +96,41 @@ signature file-lnk {
file-magic /^\x4C\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x10\x00\x00\x00\x46/ file-magic /^\x4C\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x10\x00\x00\x00\x46/
} }
# Microsoft Registry policies
signature file-pol {
file-mime "application/vnd.ms-pol", 49
file-magic /^PReg/
}
# Old style Windows registry file
signature file-reg {
file-mime "application/vnd.ms-reg", 49
file-magic /^REGEDIT4/
}
# Newer Windows registry file
signature file-reg-utf16 {
file-mime "application/vnd.ms-reg", 49
file-magic /^\xFF\xFEW\x00i\x00n\x00d\x00o\x00w\x00s\x00 \x00R\x00e\x00g\x00i\x00s\x00t\x00r\x00y\x00 \x00E\x00d\x00i\x00t\x00o\x00r\x00 \x00V\x00e\x00r\x00s\x00i\x00o\x00n\x00 \x005\x00\.\x000\x000/
}
# Microsoft Registry format (typically DESKTOP.DAT)
signature file-regf {
file-mime "application vnd.ms-regf", 49
file-magic /^\x72\x65\x67\x66/
}
# Microsoft Outlook PST files
signature file-pst {
file-mime "application/vnd.ms-outlook", 49
file-magic /!BDN......[\x0e\x0f\x15\x17][\x00-\x02]/
}
signature file-afpinfo {
file-mime "application/vnd.apple-afpinfo"
file-magic /^AFP/
}
signature file-jar { signature file-jar {
file-mime "application/java-archive", 100 file-mime "application/java-archive", 100
file-magic /^PK\x03\x04.{1,200}\x14\x00..META-INF\/MANIFEST\.MF/ file-magic /^PK\x03\x04.{1,200}\x14\x00..META-INF\/MANIFEST\.MF/

View file

@ -91,9 +91,20 @@ export {
## connection record should go here to give context to the data. ## connection record should go here to give context to the data.
conn: connection &optional; conn: connection &optional;
## If the data was discovered within a connection, the
## connection uid should go here to give context to the data.
## If the *conn* field is provided, this will be automatically
## filled out.
uid: string &optional;
## If the data was discovered within a file, the file record ## If the data was discovered within a file, the file record
## should go here to provide context to the data. ## should go here to provide context to the data.
f: fa_file &optional; f: fa_file &optional;
## If the data was discovered within a file, the file uid should
## go here to provide context to the data. If the *f* field is
## provided, this will be automatically filled out.
fuid: string &optional;
}; };
## Record used for the logging framework representing a positive ## Record used for the logging framework representing a positive
@ -112,6 +123,7 @@ export {
## If a file was associated with this intelligence hit, ## If a file was associated with this intelligence hit,
## this is the uid for the file. ## this is the uid for the file.
fuid: string &log &optional; fuid: string &log &optional;
## A mime type if the intelligence hit is related to a file. ## A mime type if the intelligence hit is related to a file.
## If the $f field is provided this will be automatically filled ## If the $f field is provided this will be automatically filled
## out. ## out.
@ -283,15 +295,14 @@ event Intel::match(s: Seen, items: set[Item]) &priority=5
if ( s?$f ) if ( s?$f )
{ {
s$fuid = s$f$id;
if ( s$f?$conns && |s$f$conns| == 1 ) if ( s$f?$conns && |s$f$conns| == 1 )
{ {
for ( cid in s$f$conns ) for ( cid in s$f$conns )
s$conn = s$f$conns[cid]; s$conn = s$f$conns[cid];
} }
if ( ! info?$fuid )
info$fuid = s$f$id;
if ( ! info?$file_mime_type && s$f?$info && s$f$info?$mime_type ) if ( ! info?$file_mime_type && s$f?$info && s$f$info?$mime_type )
info$file_mime_type = s$f$info$mime_type; info$file_mime_type = s$f$info$mime_type;
@ -299,12 +310,18 @@ event Intel::match(s: Seen, items: set[Item]) &priority=5
info$file_desc = Files::describe(s$f); info$file_desc = Files::describe(s$f);
} }
if ( s?$fuid )
info$fuid = s$fuid;
if ( s?$conn ) if ( s?$conn )
{ {
info$uid = s$conn$uid; s$uid = s$conn$uid;
info$id = s$conn$id; info$id = s$conn$id;
} }
if ( s?$uid )
info$uid = s$uid;
for ( item in items ) for ( item in items )
add info$sources[item$meta$source]; add info$sources[item$meta$source];

View file

@ -0,0 +1,15 @@
@load ./types
@load ./main
@load ./plugins
@load ./drop
@load ./shunt
@load ./catch-and-release
# The cluster framework must be loaded first.
@load base/frameworks/cluster
@if ( Cluster::is_enabled() )
@load ./cluster
@else
@load ./non-cluster
@endif

View file

@ -0,0 +1,104 @@
##! Implementation of catch-and-release functionality for NetControl.
module NetControl;
@load ./main
@load ./drop
export {
## Stops all packets involving an IP address from being forwarded. This function
## uses catch-and-release functionality, where the IP address is only dropped for
## a short amount of time that is incremented steadily when the IP is encountered
## again.
##
## a: The address to be dropped.
##
## t: How long to drop it, with 0 being indefinitly.
##
## location: An optional string describing where the drop was triggered.
##
## Returns: The id of the inserted rule on succes and zero on failure.
global drop_address_catch_release: function(a: addr, location: string &default="") : string;
## Time intervals for which a subsequent drops of the same IP take
## effect.
const catch_release_intervals: vector of interval = vector(10min, 1hr, 24hrs, 7days) &redef;
}
function per_block_interval(t: table[addr] of count, idx: addr): interval
{
local ct = t[idx];
# watch for the time of the next block...
local blocktime = catch_release_intervals[ct];
if ( (ct+1) in catch_release_intervals )
blocktime = catch_release_intervals[ct+1];
return blocktime;
}
# This is the internally maintained table containing all the currently going on catch-and-release
# blocks.
global blocks: table[addr] of count = {}
&create_expire=0secs
&expire_func=per_block_interval;
function current_block_interval(s: set[addr], idx: addr): interval
{
if ( idx !in blocks )
{
Reporter::error(fmt("Address %s not in blocks while inserting into current_blocks!", idx));
return 0sec;
}
return catch_release_intervals[blocks[idx]];
}
global current_blocks: set[addr] = set()
&create_expire=0secs
&expire_func=current_block_interval;
function drop_address_catch_release(a: addr, location: string &default=""): string
{
if ( a in blocks )
{
Reporter::warning(fmt("Address %s already blocked using catch-and-release - ignoring duplicate", a));
return "";
}
local block_interval = catch_release_intervals[0];
local ret = drop_address(a, block_interval, location);
if ( ret != "" )
{
blocks[a] = 0;
add current_blocks[a];
}
return ret;
}
function check_conn(a: addr)
{
if ( a in blocks )
{
if ( a in current_blocks )
# block has not been applied yet?
return;
# ok, this one returned again while still in the backoff period.
local try = blocks[a];
if ( (try+1) in catch_release_intervals )
++try;
blocks[a] = try;
add current_blocks[a];
local block_interval = catch_release_intervals[try];
drop_address(a, block_interval, "Re-drop by catch-and-release");
}
}
event new_connection(c: connection)
{
# let's only check originating connections...
check_conn(c$id$orig_h);
}

View file

@ -0,0 +1,99 @@
##! Cluster support for the NetControl framework.
@load ./main
@load base/frameworks/cluster
module NetControl;
export {
## This is the event used to transport add_rule calls to the manager.
global cluster_netcontrol_add_rule: event(r: Rule);
## This is the event used to transport remove_rule calls to the manager.
global cluster_netcontrol_remove_rule: event(id: string);
}
## Workers need ability to forward commands to manager.
redef Cluster::worker2manager_events += /NetControl::cluster_netcontrol_(add|remove)_rule/;
## Workers need to see the result events from the manager.
redef Cluster::manager2worker_events += /NetControl::rule_(added|removed|timeout|error)/;
function activate(p: PluginState, priority: int)
{
# we only run the activate function on the manager.
if ( Cluster::local_node_type() != Cluster::MANAGER )
return;
activate_impl(p, priority);
}
global local_rule_count: count = 1;
function add_rule(r: Rule) : string
{
if ( Cluster::local_node_type() == Cluster::MANAGER )
return add_rule_impl(r);
else
{
if ( r$id == "" )
r$id = cat(Cluster::node, ":", ++local_rule_count);
event NetControl::cluster_netcontrol_add_rule(r);
return r$id;
}
}
function remove_rule(id: string) : bool
{
if ( Cluster::local_node_type() == Cluster::MANAGER )
return remove_rule_impl(id);
else
{
event NetControl::cluster_netcontrol_remove_rule(id);
return T; # well, we can't know here. So - just hope...
}
}
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event NetControl::cluster_netcontrol_add_rule(r: Rule)
{
add_rule_impl(r);
}
event NetControl::cluster_netcontrol_remove_rule(id: string)
{
remove_rule_impl(id);
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event rule_expire(r: Rule, p: PluginState) &priority=-5
{
rule_expire_impl(r, p);
}
event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5
{
rule_added_impl(r, p, msg);
if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire )
schedule r$expire { rule_expire(r, p) };
}
event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5
{
rule_removed_impl(r, p, msg);
}
event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5
{
rule_timeout_impl(r, i, p);
}
event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5
{
rule_error_impl(r, p, msg);
}
@endif

View file

@ -0,0 +1,98 @@
##! Implementation of the drop functionality for NetControl.
module NetControl;
@load ./main
export {
redef enum Log::ID += { DROP };
## Stops all packets involving an IP address from being forwarded.
##
## a: The address to be dropped.
##
## t: How long to drop it, with 0 being indefinitly.
##
## location: An optional string describing where the drop was triggered.
##
## Returns: The id of the inserted rule on succes and zero on failure.
global drop_address: function(a: addr, t: interval, location: string &default="") : string;
## Stops all packets involving an connection address from being forwarded.
##
## c: The connection to be dropped.
##
## t: How long to drop it, with 0 being indefinitly.
##
## location: An optional string describing where the drop was triggered.
##
## Returns: The id of the inserted rule on succes and zero on failure.
global drop_connection: function(c: conn_id, t: interval, location: string &default="") : string;
type DropInfo: record {
## Time at which the recorded activity occurred.
ts: time &log;
## ID of the rule; unique during each Bro run
rule_id: string &log;
orig_h: addr &log; ##< The originator's IP address.
orig_p: port &log &optional; ##< The originator's port number.
resp_h: addr &log &optional; ##< The responder's IP address.
resp_p: port &log &optional; ##< The responder's port number.
## Expiry time of the shunt
expire: interval &log;
## Location where the underlying action was triggered.
location: string &log &optional;
};
## Event that can be handled to access the :bro:type:`NetControl::ShuntInfo`
## record as it is sent on to the logging framework.
global log_netcontrol_drop: event(rec: DropInfo);
}
event bro_init() &priority=5
{
Log::create_stream(NetControl::DROP, [$columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop"]);
}
function drop_connection(c: conn_id, t: interval, location: string &default="") : string
{
local e: Entity = [$ty=CONNECTION, $conn=c];
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
local id = add_rule(r);
# Error should already be logged
if ( id == "" )
return id;
local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=c$orig_h, $orig_p=c$orig_p, $resp_h=c$resp_h, $resp_p=c$resp_p, $expire=t);
if ( location != "" )
log$location=location;
Log::write(DROP, log);
return id;
}
function drop_address(a: addr, t: interval, location: string &default="") : string
{
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
local id = add_rule(r);
# Error should already be logged
if ( id == "" )
return id;
local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=a, $expire=t);
if ( location != "" )
log$location=location;
Log::write(DROP, log);
return id;
}

View file

@ -0,0 +1,938 @@
##! Bro's packet aquisition and control framework.
##!
##! This plugin-based framework allows to control the traffic that Bro monitors
##! as well as, if having access to the forwarding path, the traffic the network
##! forwards. By default, the framework lets everything through, to both Bro
##! itself as well as on the network. Scripts can then add rules to impose
##! restrictions on entities, such as specific connections or IP addresses.
##!
##! This framework has two APIs: a high-level and low-level. The high-level API
##! provides convinience functions for a set of common operations. The
##! low-level API provides full flexibility.
module NetControl;
@load ./plugin
@load ./types
export {
## The framework's logging stream identifier.
redef enum Log::ID += { LOG };
# ###
# ### Generic functions and events.
# ###
## Activates a plugin.
##
## p: The plugin to acticate.
##
## priority: The higher the priority, the earlier this plugin will be checked
## whether it supports an operation, relative to other plugins.
global activate: function(p: PluginState, priority: int);
## Event that is used to initialize plugins. Place all plugin initialization
## related functionality in this event.
global NetControl::init: event();
## Event that is raised once all plugins activated in ``NetControl::init``
## have finished their initialization.
global NetControl::init_done: event();
# ###
# ### High-level API.
# ###
# ### Note - other high level primitives are in catch-and-release.bro, shunt.bro and
# ### drop.bro
## Allows all traffic involving a specific IP address to be forwarded.
##
## a: The address to be whitelistet.
##
## t: How long to whitelist it, with 0 being indefinitly.
##
## location: An optional string describing whitelist was triddered.
##
## Returns: The id of the inserted rule on succes and zero on failure.
global whitelist_address: function(a: addr, t: interval, location: string &default="") : string;
## Allows all traffic involving a specific IP subnet to be forwarded.
##
## s: The subnet to be whitelistet.
##
## t: How long to whitelist it, with 0 being indefinitly.
##
## location: An optional string describing whitelist was triddered.
##
## Returns: The id of the inserted rule on succes and zero on failure.
global whitelist_subnet: function(s: subnet, t: interval, location: string &default="") : string;
## Redirects an uni-directional flow to another port.
##
## f: The flow to redirect.
##
## out_port: Port to redirect the flow to
##
## t: How long to leave the redirect in place, with 0 being indefinitly.
##
## location: An optional string describing where the redirect was triggered.
##
## Returns: The id of the inserted rule on succes and zero on failure.
global redirect_flow: function(f: flow_id, out_port: count, t: interval, location: string &default="") : string;
## Quarantines a host by redirecting rewriting DNS queries to the network dns server dns
## to the host. Host has to answer to all queries with its own address. Only http communication
## from infected to quarantinehost is allowed.
##
## infected: the host to quarantine
##
## dns: the network dns server
##
## quarantine: the quarantine server running a dns and a web server
##
## t: how long to leave the quarantine in place
##
## Returns: Vector of inserted rules on success, empty list on failure.
global quarantine_host: function(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string;
## Flushes all state.
global clear: function();
# ###
# ### Low-level API.
# ###
###### Manipulation of rules.
## Installs a rule.
##
## r: The rule to install.
##
## Returns: If succesful, returns an ID string unique to the rule that can
## later be used to refer to it. If unsuccessful, returns an empty
## string. The ID is also assigned to ``r$id``. Note that
## "successful" means "a plugin knew how to handle the rule", it
## doesn't necessarily mean that it was indeed successfully put in
## place, because that might happen asynchronously and thus fail
## only later.
global add_rule: function(r: Rule) : string;
## Removes a rule.
##
## id: The rule to remove, specified as the ID returned by :bro:id:`NetControl::add_rule`.
##
## Returns: True if succesful, the relevant plugin indicated that it knew
## how to handle the removal. Note that again "success" means the
## plugin accepted the removal. They might still fail to put it
## into effect, as that might happen asynchronously and thus go
## wrong at that point.
global remove_rule: function(id: string) : bool;
## Searches all rules affecting a certain IP address.
##
## ip: The ip address to search for
##
## Returns: vector of all rules affecting the IP address
global find_rules_addr: function(ip: addr) : vector of Rule;
## Searches all rules affecting a certain subnet.
##
## sn: The subnet to search for
##
## Returns: vector of all rules affecting the subnet
global find_rules_subnet: function(sn: subnet) : vector of Rule;
###### Asynchronous feedback on rules.
## Confirms that a rule was put in place.
##
## r: The rule now in place.
##
## p: The state for the plugin that put it into place.
##
## msg: An optional informational message by the plugin.
global rule_added: event(r: Rule, p: PluginState, msg: string &default="");
## Reports that a rule was removed due to a remove: function() call.
##
## r: The rule now removed.
##
## p: The state for the plugin that had the rule in place and now
## removed it.
##
## msg: An optional informational message by the plugin.
global rule_removed: event(r: Rule, p: PluginState, msg: string &default="");
## Reports that a rule was removed internally due to a timeout.
##
## r: The rule now removed.
##
## i: Additional flow information, if supported by the protocol.
##
## p: The state for the plugin that had the rule in place and now
## removed it.
##
## msg: An optional informational message by the plugin.
global rule_timeout: event(r: Rule, i: FlowInfo, p: PluginState);
## Reports an error when operating on a rule.
##
## r: The rule that encountered an error.
##
## p: The state for the plugin that reported the error.
##
## msg: An optional informational message by the plugin.
global rule_error: event(r: Rule, p: PluginState, msg: string &default="");
## Hook that allows the modification of rules passed to add_rule before they
## are passed on to the plugins. If one of the hooks uses break, the rule is
## ignored and not passed on to any plugin.
##
## r: The rule to be added
global NetControl::rule_policy: hook(r: Rule);
##### Plugin functions
## Function called by plugins once they finished their activation. After all
## plugins defined in bro_init finished to activate, rules will start to be sent
## to the plugins. Rules that scripts try to set before the backends are ready
## will be discarded.
global plugin_activated: function(p: PluginState);
## Type of an entry in the NetControl log.
type InfoCategory: enum {
## A log entry reflecting a framework message.
MESSAGE,
## A log entry reflecting a framework message.
ERROR,
## A log entry about about a rule.
RULE
};
## State of an entry in the NetControl log.
type InfoState: enum {
REQUESTED,
SUCCEEDED,
FAILED,
REMOVED,
TIMEOUT,
};
## The record type defining the column fields of the NetControl log.
type Info: record {
## Time at which the recorded activity occurred.
ts: time &log;
## ID of the rule; unique during each Bro run
rule_id: string &log &optional;
## Type of the log entry.
category: InfoCategory &log &optional;
## The command the log entry is about.
cmd: string &log &optional;
## State the log entry reflects.
state: InfoState &log &optional;
## String describing an action the entry is about.
action: string &log &optional;
## The target type of the action.
target: TargetType &log &optional;
## Type of the entity the log entry is about.
entity_type: string &log &optional;
## String describing the entity the log entry is about.
entity: string &log &optional;
## String describing the optional modification of the entry (e.h. redirect)
mod: string &log &optional;
## String with an additional message.
msg: string &log &optional;
## Number describing the priority of the log entry
priority: int &log &optional;
## Expiry time of the log entry
expire: interval &log &optional;
## Location where the underlying action was triggered.
location: string &log &optional;
## Plugin triggering the log entry.
plugin: string &log &optional;
};
## Event that can be handled to access the :bro:type:`NetControl::Info`
## record as it is sent on to the logging framework.
global log_netcontrol: event(rec: Info);
}
redef record Rule += {
##< Internally set to the plugins handling the rule.
_plugin_ids: set[count] &default=count_set();
##< Internally set to the plugins on which the rule is currently active.
_active_plugin_ids: set[count] &default=count_set();
##< Track if the rule was added succesfully by all responsible plugins.
_added: bool &default=F;
};
# Variable tracking the state of plugin activation. Once all plugins that
# have been added in bro_init are activated, this will switch to T and
# the event NetControl::init_done will be raised.
global plugins_active: bool = F;
# Set to true at the end of bro_init (with very low priority).
# Used to track when plugin activation could potentially be finished
global bro_init_done: bool = F;
# The counters that are used to generate the rule and plugin IDs
global rule_counter: count = 1;
global plugin_counter: count = 1;
# List of the currently active plugins
global plugins: vector of PluginState;
global plugin_ids: table[count] of PluginState;
# These tables hold information about rules.
global rules: table[string] of Rule; # Rules indexed by id and cid
# All rules that apply to a certain subnet/IP address.
global rules_by_subnets: table[subnet] of set[string];
# Rules pertaining to a specific entity.
# There always only can be one rule of each type for one entity.
global rule_entities: table[Entity, RuleType] of Rule;
event bro_init() &priority=5
{
Log::create_stream(NetControl::LOG, [$columns=Info, $ev=log_netcontrol, $path="netcontrol"]);
}
function entity_to_info(info: Info, e: Entity)
{
info$entity_type = fmt("%s", e$ty);
switch ( e$ty ) {
case ADDRESS:
info$entity = fmt("%s", e$ip);
break;
case CONNECTION:
info$entity = fmt("%s/%d<->%s/%d",
e$conn$orig_h, e$conn$orig_p,
e$conn$resp_h, e$conn$resp_p);
break;
case FLOW:
local ffrom_ip = "*";
local ffrom_port = "*";
local fto_ip = "*";
local fto_port = "*";
local ffrom_mac = "*";
local fto_mac = "*";
if ( e$flow?$src_h )
ffrom_ip = cat(e$flow$src_h);
if ( e$flow?$src_p )
ffrom_port = fmt("%d", e$flow$src_p);
if ( e$flow?$dst_h )
fto_ip = cat(e$flow$dst_h);
if ( e$flow?$dst_p )
fto_port = fmt("%d", e$flow$dst_p);
info$entity = fmt("%s/%s->%s/%s",
ffrom_ip, ffrom_port,
fto_ip, fto_port);
if ( e$flow?$src_m || e$flow?$dst_m )
{
if ( e$flow?$src_m )
ffrom_mac = e$flow$src_m;
if ( e$flow?$dst_m )
fto_mac = e$flow$dst_m;
info$entity = fmt("%s (%s->%s)", info$entity, ffrom_mac, fto_mac);
}
break;
case MAC:
info$entity = e$mac;
break;
default:
info$entity = "<unknown entity type>";
break;
}
}
function rule_to_info(info: Info, r: Rule)
{
info$action = fmt("%s", r$ty);
info$target = r$target;
info$rule_id = r$id;
info$expire = r$expire;
info$priority = r$priority;
if ( r?$location && r$location != "" )
info$location = r$location;
if ( r$ty == REDIRECT )
info$mod = fmt("-> %d", r$out_port);
if ( r$ty == MODIFY )
{
local mfrom_ip = "_";
local mfrom_port = "_";
local mto_ip = "_";
local mto_port = "_";
local mfrom_mac = "_";
local mto_mac = "_";
if ( r$mod?$src_h )
mfrom_ip = cat(r$mod$src_h);
if ( r$mod?$src_p )
mfrom_port = fmt("%d", r$mod$src_p);
if ( r$mod?$dst_h )
mto_ip = cat(r$mod$dst_h);
if ( r$mod?$dst_p )
mto_port = fmt("%d", r$mod$dst_p);
if ( r$mod?$src_m )
mfrom_mac = r$mod$src_m;
if ( r$mod?$dst_m )
mto_mac = r$mod$dst_m;
info$mod = fmt("Src: %s/%s (%s) Dst: %s/%s (%s)",
mfrom_ip, mfrom_port, mfrom_mac, mto_ip, mto_port, mto_mac);
if ( r$mod?$redirect_port )
info$mod = fmt("%s -> %d", info$mod, r$mod$redirect_port);
}
entity_to_info(info, r$entity);
}
function log_msg(msg: string, p: PluginState)
{
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)]);
}
function log_error(msg: string, p: PluginState)
{
Log::write(LOG, [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]);
}
function log_msg_no_plugin(msg: string)
{
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg]);
}
function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: string &default="")
{
local info: Info = [$ts=network_time()];
info$category = RULE;
info$cmd = cmd;
info$state = state;
info$plugin = p$plugin$name(p);
if ( msg != "" )
info$msg = msg;
rule_to_info(info, r);
Log::write(LOG, info);
}
function log_rule_error(r: Rule, msg: string, p: PluginState)
{
local info: Info = [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)];
rule_to_info(info, r);
Log::write(LOG, info);
}
function log_rule_no_plugin(r: Rule, state: InfoState, msg: string)
{
local info: Info = [$ts=network_time()];
info$category = RULE;
info$state = state;
info$msg = msg;
rule_to_info(info, r);
Log::write(LOG, info);
}
function whitelist_address(a: addr, t: interval, location: string &default="") : string
{
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
return add_rule(r);
}
function whitelist_subnet(s: subnet, t: interval, location: string &default="") : string
{
local e: Entity = [$ty=ADDRESS, $ip=s];
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
return add_rule(r);
}
function redirect_flow(f: flow_id, out_port: count, t: interval, location: string &default="") : string
{
local flow = NetControl::Flow(
$src_h=addr_to_subnet(f$src_h),
$src_p=f$src_p,
$dst_h=addr_to_subnet(f$dst_h),
$dst_p=f$dst_p
);
local e: Entity = [$ty=FLOW, $flow=flow];
local r: Rule = [$ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port];
return add_rule(r);
}
function quarantine_host(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string
{
local orules: vector of string = vector();
local edrop: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected))];
local rdrop: Rule = [$ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location];
orules[|orules|] = add_rule(rdrop);
local todnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp)];
local todnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=todnse, $expire=t, $location=location, $mod=FlowMod($dst_h=quarantine), $priority=+5);
orules[|orules|] = add_rule(todnsr);
local fromdnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected))];
local fromdnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=fromdnse, $expire=t, $location=location, $mod=FlowMod($src_h=dns), $priority=+5);
orules[|orules|] = add_rule(fromdnsr);
local wle: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp)];
local wlr = Rule($ty=WHITELIST, $target=FORWARD, $entity=wle, $expire=t, $location=location, $priority=+5);
orules[|orules|] = add_rule(wlr);
return orules;
}
function check_plugins()
{
if ( plugins_active )
return;
local all_active = T;
for ( i in plugins )
{
local p = plugins[i];
if ( p$_activated == F )
all_active = F;
}
if ( all_active )
{
plugins_active = T;
# Skip log message if there are no plugins
if ( |plugins| > 0 )
log_msg_no_plugin("plugin initialization done");
event NetControl::init_done();
}
}
function plugin_activated(p: PluginState)
{
local id = p$_id;
if ( id !in plugin_ids )
{
log_error("unknown plugin activated", p);
return;
}
plugin_ids[id]$_activated = T;
log_msg("activation finished", p);
if ( bro_init_done )
check_plugins();
}
event bro_init() &priority=-5
{
event NetControl::init();
}
event NetControl::init() &priority=-20
{
bro_init_done = T;
check_plugins();
if ( plugins_active == F )
log_msg_no_plugin("waiting for plugins to initialize");
}
# Low-level functions that only runs on the manager (or standalone) Bro node.
function activate_impl(p: PluginState, priority: int)
{
p$_priority = priority;
plugins[|plugins|] = p;
sort(plugins, function(p1: PluginState, p2: PluginState) : int { return p2$_priority - p1$_priority; });
plugin_ids[plugin_counter] = p;
p$_id = plugin_counter;
++plugin_counter;
# perform one-time initialization
if ( p$plugin?$init )
{
log_msg(fmt("activating plugin with priority %d", priority), p);
p$plugin$init(p);
}
else
{
# no initialization necessary, mark plugin as active right away
plugin_activated(p);
}
}
function add_one_subnet_entry(s: subnet, r: Rule)
{
if ( ! check_subnet(s, rules_by_subnets) )
rules_by_subnets[s] = set(r$id);
else
add rules_by_subnets[s][r$id];
}
function add_subnet_entry(rule: Rule)
{
local e = rule$entity;
if ( e$ty == ADDRESS )
{
add_one_subnet_entry(e$ip, rule);
}
else if ( e$ty == CONNECTION )
{
add_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule);
add_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule);
}
else if ( e$ty == FLOW )
{
if ( e$flow?$src_h )
add_one_subnet_entry(e$flow$src_h, rule);
if ( e$flow?$dst_h )
add_one_subnet_entry(e$flow$dst_h, rule);
}
}
function remove_one_subnet_entry(s: subnet, r: Rule)
{
if ( ! check_subnet(s, rules_by_subnets) )
return;
if ( r$id !in rules_by_subnets[s] )
return;
delete rules_by_subnets[s][r$id];
if ( |rules_by_subnets[s]| == 0 )
delete rules_by_subnets[s];
}
function remove_subnet_entry(rule: Rule)
{
local e = rule$entity;
if ( e$ty == ADDRESS )
{
remove_one_subnet_entry(e$ip, rule);
}
else if ( e$ty == CONNECTION )
{
remove_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule);
remove_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule);
}
else if ( e$ty == FLOW )
{
if ( e$flow?$src_h )
remove_one_subnet_entry(e$flow$src_h, rule);
if ( e$flow?$dst_h )
remove_one_subnet_entry(e$flow$dst_h, rule);
}
}
function find_rules_subnet(sn: subnet) : vector of Rule
{
local ret: vector of Rule = vector();
local matches = matching_subnets(sn, rules_by_subnets);
for ( m in matches )
{
local sn_entry = matches[m];
local rule_ids = rules_by_subnets[sn_entry];
for ( rule_id in rules_by_subnets[sn_entry] )
{
if ( rule_id in rules )
ret[|ret|] = rules[rule_id];
else
Reporter::error("find_rules_subnet - internal data structure error, missing rule");
}
}
return ret;
}
function find_rules_addr(ip: addr) : vector of Rule
{
return find_rules_subnet(addr_to_subnet(ip));
}
function add_rule_impl(rule: Rule) : string
{
if ( ! plugins_active )
{
log_rule_no_plugin(rule, FAILED, "plugins not initialized yet");
return "";
}
rule$cid = ++rule_counter; # numeric id that can be used by plugins for their rules.
if ( ! rule?$id || rule$id == "" )
rule$id = cat(rule$cid);
if ( ! hook NetControl::rule_policy(rule) )
return "";
if ( [rule$entity, rule$ty] in rule_entities )
{
log_rule_no_plugin(rule, FAILED, "discarded duplicate insertion");
return "";
}
local accepted = F;
local priority: int = +0;
for ( i in plugins )
{
local p = plugins[i];
if ( p$_activated == F )
next;
# in this case, rule was accepted by earlier plugin and this plugin has a lower
# priority. Abort and do not send there...
if ( accepted == T && p$_priority != priority )
break;
if ( p$plugin$add_rule(p, rule) )
{
accepted = T;
priority = p$_priority;
log_rule(rule, "ADD", REQUESTED, p);
add rule$_plugin_ids[p$_id];
}
}
if ( accepted )
{
rules[rule$id] = rule;
rule_entities[rule$entity, rule$ty] = rule;
add_subnet_entry(rule);
return rule$id;
}
log_rule_no_plugin(rule, FAILED, "not supported");
return "";
}
function remove_rule_plugin(r: Rule, p: PluginState): bool
{
local success = T;
if ( ! p$plugin$remove_rule(p, r) )
{
# still continue and send to other plugins
log_rule_error(r, "remove failed", p);
success = F;
}
else
{
log_rule(r, "REMOVE", REQUESTED, p);
}
return success;
}
function remove_rule_impl(id: string) : bool
{
if ( id !in rules )
{
Reporter::error(fmt("Rule %s does not exist in NetControl::remove_rule", id));
return F;
}
local r = rules[id];
local success = T;
for ( plugin_id in r$_active_plugin_ids )
{
local p = plugin_ids[plugin_id];
success = remove_rule_plugin(r, p);
}
return success;
}
function rule_expire_impl(r: Rule, p: PluginState) &priority=-5
{
# do not emit timeout events on shutdown
if ( bro_is_terminating() )
return;
if ( r$id !in rules )
# Removed already.
return;
event NetControl::rule_timeout(r, FlowInfo(), p); # timeout implementation will handle the removal
}
function rule_added_impl(r: Rule, p: PluginState, msg: string &default="")
{
if ( r$id !in rules )
{
log_rule_error(r, "Addition of unknown rule", p);
return;
}
# use our version to prevent operating on copies.
local rule = rules[r$id];
if ( p$_id !in rule$_plugin_ids )
{
log_rule_error(rule, "Rule added to non-responsible plugin", p);
return;
}
log_rule(r, "ADD", SUCCEEDED, p, msg);
add rule$_active_plugin_ids[p$_id];
if ( |rule$_plugin_ids| == |rule$_active_plugin_ids| )
{
# rule was completely added.
rule$_added = T;
}
}
function rule_cleanup(r: Rule)
{
if ( |r$_active_plugin_ids| > 0 )
return;
remove_subnet_entry(r);
delete rule_entities[r$entity, r$ty];
delete rules[r$id];
}
function rule_removed_impl(r: Rule, p: PluginState, msg: string &default="")
{
if ( r$id !in rules )
{
log_rule_error(r, "Removal of non-existing rule", p);
return;
}
# use our version to prevent operating on copies.
local rule = rules[r$id];
if ( p$_id !in rule$_plugin_ids )
{
log_rule_error(r, "Removed from non-assigned plugin", p);
return;
}
if ( p$_id in rule$_active_plugin_ids )
{
delete rule$_active_plugin_ids[p$_id];
}
log_rule(rule, "REMOVE", SUCCEEDED, p, msg);
rule_cleanup(rule);
}
function rule_timeout_impl(r: Rule, i: FlowInfo, p: PluginState)
{
if ( r$id !in rules )
{
log_rule_error(r, "Timeout of non-existing rule", p);
return;
}
local rule = rules[r$id];
local msg = "";
if ( i?$packet_count )
msg = fmt("Packets: %d", i$packet_count);
if ( i?$byte_count )
{
if ( msg != "" )
msg = msg + " ";
msg = fmt("%sBytes: %s", msg, i$byte_count);
}
log_rule(rule, "EXPIRE", TIMEOUT, p, msg);
if ( ! p$plugin$can_expire )
{
# in this case, we actually have to delete the rule and the timeout
# call just originated locally
remove_rule_plugin(rule, p);
return;
}
if ( p$_id !in rule$_plugin_ids )
{
log_rule_error(r, "Timeout from non-assigned plugin", p);
return;
}
if ( p$_id in rule$_active_plugin_ids )
{
delete rule$_active_plugin_ids[p$_id];
}
rule_cleanup(rule);
}
function rule_error_impl(r: Rule, p: PluginState, msg: string &default="")
{
if ( r$id !in rules )
{
log_rule_error(r, "Error of non-existing rule", p);
return;
}
local rule = rules[r$id];
log_rule_error(rule, msg, p);
# Remove the plugin both from active and all plugins of the rule. If there
# are no plugins left afterwards - delete it
if ( p$_id !in rule$_plugin_ids )
{
log_rule_error(r, "Error from non-assigned plugin", p);
return;
}
if ( p$_id in rule$_active_plugin_ids )
{
# error during removal. Let's pretend it worked.
delete rule$_plugin_ids[p$_id];
delete rule$_active_plugin_ids[p$_id];
rule_cleanup(rule);
}
else
{
# error during insertion. Meh. If we are the only plugin, remove the rule again.
# Otherwhise - keep it, minus us.
delete rule$_plugin_ids[p$_id];
if ( |rule$_plugin_ids| == 0 )
{
rule_cleanup(rule);
}
}
}
function clear()
{
for ( id in rules )
remove_rule(id);
}

View file

@ -0,0 +1,47 @@
module NetControl;
@load ./main
function activate(p: PluginState, priority: int)
{
activate_impl(p, priority);
}
function add_rule(r: Rule) : string
{
return add_rule_impl(r);
}
function remove_rule(id: string) : bool
{
return remove_rule_impl(id);
}
event rule_expire(r: Rule, p: PluginState) &priority=-5
{
rule_expire_impl(r, p);
}
event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5
{
rule_added_impl(r, p, msg);
if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire )
schedule r$expire { rule_expire(r, p) };
}
event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5
{
rule_removed_impl(r, p, msg);
}
event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5
{
rule_timeout_impl(r, i, p);
}
event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5
{
rule_error_impl(r, p, msg);
}

View file

@ -0,0 +1,89 @@
##! Plugin interface for NetControl backends.
module NetControl;
@load ./types
export {
## State for a plugin instance.
type PluginState: record {
## Table for a plugin to store custom, instance-specfific state.
config: table[string] of string &default=table();
## Unique plugin identifier -- used for backlookup of plugins from Rules. Set internally.
_id: count &optional;
## Set internally.
_priority: int &default=+0;
## Set internally. Signifies if the plugin has returned that it has activated succesfully
_activated: bool &default=F;
};
# Definition of a plugin.
#
# Generally a plugin needs to implement only what it can support. By
# returning failure, it indicates that it can't support something and the
# the framework will then try another plugin, if available; or inform the
# that the operation failed. If a function isn't implemented by a plugin,
# that's considered an implicit failure to support the operation.
#
# If plugin accepts a rule operation, it *must* generate one of the reporting
# events ``rule_{added,remove,error}`` to signal if it indeed worked out;
# this is separate from accepting the operation because often a plugin
# will only know later (i.e., asynchrously) if that was an error for
# something it thought it could handle.
type Plugin: record {
# Returns a descriptive name of the plugin instance, suitable for use in logging
# messages. Note that this function is not optional.
name: function(state: PluginState) : string;
## If true, plugin can expire rules itself. If false,
## framework will manage rule expiration.
can_expire: bool;
# One-time initialization function called when plugin gets registered, and
# before any other methods are called.
#
# If this function is provided, NetControl assumes that the plugin has to
# perform, potentially lengthy, initialization before the plugin will become
# active. In this case, the plugin has to call ``NetControl::plugin_activated``,
# once initialization finishes.
init: function(state: PluginState) &optional;
# One-time finalization function called when a plugin is shutdown; no further
# functions will be called afterwords.
done: function(state: PluginState) &optional;
# Implements the add_rule() operation. If the plugin accepts the rule,
# it returns true, false otherwise. The rule will already have its
# ``id`` field set, which the plugin may use for identification
# purposes.
add_rule: function(state: PluginState, r: Rule) : bool &optional;
# Implements the remove_rule() operation. This will only be called for
# rules that the plugins has previously accepted with add_rule(). The
# ``id`` field will match that of the add_rule() call. Generally,
# a plugin that accepts an add_rule() should also accept the
# remove_rule().
remove_rule: function(state: PluginState, r: Rule) : bool &optional;
# A transaction groups a number of operations. The plugin can add them internally
# and postpone putting them into effect until committed. This allows to build a
# configuration of multiple rules at once, including replaying a previous state.
transaction_begin: function(state: PluginState) &optional;
transaction_end: function(state: PluginState) &optional;
};
# Table for a plugin to store instance-specific configuration information.
#
# Note, it would be nicer to pass the Plugin instance to all the below, instead
# of this state table. However Bro's type resolver has trouble with refering to a
# record type from inside itself.
redef record PluginState += {
## The plugin that the state belongs to. (Defined separately
## because of cyclic type dependency.)
plugin: Plugin &optional;
};
}

View file

@ -0,0 +1,5 @@
@load ./debug
@load ./openflow
@load ./packetfilter
@load ./broker
@load ./acld

View file

@ -0,0 +1,297 @@
##! Acld plugin for the netcontrol framework.
module NetControl;
@load ../main
@load ../plugin
@load base/frameworks/broker
@ifdef ( Broker::__enable )
export {
type AclRule : record {
command: string;
cookie: count;
arg: string;
comment: string &optional;
};
type AcldConfig: record {
## The acld topic used to send events to
acld_topic: string;
## Broker host to connect to
acld_host: addr;
## Broker port to connect to
acld_port: port;
## Do we accept rules for the monitor path? Default false
monitor: bool &default=F;
## Do we accept rules for the forward path? Default true
forward: bool &default=T;
## Predicate that is called on rule insertion or removal.
##
## p: Current plugin state
##
## r: The rule to be inserted or removed
##
## Returns: T if the rule can be handled by the current backend, F otherwhise
check_pred: function(p: PluginState, r: Rule): bool &optional;
};
## Instantiates the acld plugin.
global create_acld: function(config: AcldConfig) : PluginState;
redef record PluginState += {
acld_config: AcldConfig &optional;
## The ID of this acld instance - for the mapping to PluginStates
acld_id: count &optional;
};
## Hook that is called after a rule is converted to an acld rule.
## The hook may modify the rule before it is sent to acld.
## Setting the acld command to F will cause the rule to be rejected
## by the plugin
##
## p: Current plugin state
##
## r: The rule to be inserted or removed
##
## ar: The acld rule to be inserted or removed
global NetControl::acld_rule_policy: hook(p: PluginState, r: Rule, ar: AclRule);
## Events that are sent from us to Broker
global acld_add_rule: event(id: count, r: Rule, ar: AclRule);
global acld_remove_rule: event(id: count, r: Rule, ar: AclRule);
## Events that are sent from Broker to us
global acld_rule_added: event(id: count, r: Rule, msg: string);
global acld_rule_removed: event(id: count, r: Rule, msg: string);
global acld_rule_error: event(id: count, r: Rule, msg: string);
}
global netcontrol_acld_peers: table[port, string] of PluginState;
global netcontrol_acld_topics: set[string] = set();
global netcontrol_acld_id: table[count] of PluginState = table();
global netcontrol_acld_current_id: count = 0;
const acld_add_to_remove: table[string] of string = {
["drop"] = "restore",
["whitelist"] = "remwhitelist",
["blockhosthost"] = "restorehosthost",
["droptcpport"] = "restoretcpport",
["dropudpport"] = "restoreudpport",
["droptcpdsthostport"] ="restoretcpdsthostport",
["dropudpdsthostport"] ="restoreudpdsthostport",
["permittcpdsthostport"] ="unpermittcpdsthostport",
["permitudpdsthostport"] ="unpermitudpdsthostport",
["nullzero"] ="nonullzero"
};
event NetControl::acld_rule_added(id: count, r: Rule, msg: string)
{
if ( id !in netcontrol_acld_id )
{
Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id));
return;
}
local p = netcontrol_acld_id[id];
event NetControl::rule_added(r, p, msg);
}
event NetControl::acld_rule_removed(id: count, r: Rule, msg: string)
{
if ( id !in netcontrol_acld_id )
{
Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id));
return;
}
local p = netcontrol_acld_id[id];
event NetControl::rule_removed(r, p, msg);
}
event NetControl::acld_rule_error(id: count, r: Rule, msg: string)
{
if ( id !in netcontrol_acld_id )
{
Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id));
return;
}
local p = netcontrol_acld_id[id];
event NetControl::rule_error(r, p, msg);
}
function acld_name(p: PluginState) : string
{
return fmt("Acld-%s", p$acld_config$acld_topic);
}
# check that subnet specifies an addr
function check_sn(sn: subnet) : bool
{
if ( is_v4_subnet(sn) && subnet_width(sn) == 32 )
return T;
if ( is_v6_subnet(sn) && subnet_width(sn) == 128 )
return T;
Reporter::error(fmt("Acld: rule_to_acl_rule was given a subnet that does not specify a distinct address where needed - %s", sn));
return F;
}
function rule_to_acl_rule(p: PluginState, r: Rule) : AclRule
{
local e = r$entity;
local command: string = "";
local arg: string = "";
if ( e$ty == ADDRESS )
{
if ( r$ty == DROP )
command = "drop";
else if ( r$ty == WHITELIST )
command = "whitelist";
arg = cat(e$ip);
}
else if ( e$ty == FLOW )
{
local f = e$flow;
if ( ( ! f?$src_h ) && ( ! f?$src_p ) && f?$dst_h && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) )
{
if ( !check_sn(f$dst_h) )
command = ""; # invalid addr, do nothing
else if ( is_tcp_port(f$dst_p) && r$ty == DROP )
command = "droptcpdsthostport";
else if ( is_tcp_port(f$dst_p) && r$ty == WHITELIST )
command = "permittcpdsthostport";
else if ( is_udp_port(f$dst_p) && r$ty == DROP)
command = "dropucpdsthostport";
else if ( is_udp_port(f$dst_p) && r$ty == WHITELIST)
command = "permitucpdsthostport";
arg = fmt("%s %d", subnet_to_addr(f$dst_h), f$dst_p);
}
else if ( f?$src_h && ( ! f?$src_p ) && f?$dst_h && ( ! f?$dst_p ) && ( ! f?$src_m ) && ( ! f?$dst_m ) )
{
if ( !check_sn(f$src_h) || !check_sn(f$dst_h) )
command = "";
else if ( r$ty == DROP )
command = "blockhosthost";
arg = fmt("%s %s", subnet_to_addr(f$src_h), subnet_to_addr(f$dst_h));
}
else if ( ( ! f?$src_h ) && ( ! f?$src_p ) && ( ! f?$dst_h ) && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) )
{
if ( is_tcp_port(f$dst_p) && r$ty == DROP )
command = "droptcpport";
else if ( is_udp_port(f$dst_p) && r$ty == DROP )
command = "dropudpport";
arg = fmt("%d", f$dst_p);
}
}
local ar = AclRule($command=command, $cookie=r$cid, $arg=arg);
if ( r?$location )
ar$comment = r$location;
hook NetControl::acld_rule_policy(p, r, ar);
return ar;
}
function acld_check_rule(p: PluginState, r: Rule) : bool
{
local c = p$acld_config;
if ( p$acld_config?$check_pred )
return p$acld_config$check_pred(p, r);
if ( r$target == MONITOR && c$monitor )
return T;
if ( r$target == FORWARD && c$forward )
return T;
return F;
}
function acld_add_rule_fun(p: PluginState, r: Rule) : bool
{
if ( ! acld_check_rule(p, r) )
return F;
local ar = rule_to_acl_rule(p, r);
if ( ar$command == "" )
return F;
Broker::send_event(p$acld_config$acld_topic, Broker::event_args(acld_add_rule, p$acld_id, r, ar));
return T;
}
function acld_remove_rule_fun(p: PluginState, r: Rule) : bool
{
if ( ! acld_check_rule(p, r) )
return F;
local ar = rule_to_acl_rule(p, r);
if ( ar$command in acld_add_to_remove )
ar$command = acld_add_to_remove[ar$command];
else
return F;
Broker::send_event(p$acld_config$acld_topic, Broker::event_args(acld_remove_rule, p$acld_id, r, ar));
return T;
}
function acld_init(p: PluginState)
{
Broker::enable();
Broker::connect(cat(p$acld_config$acld_host), p$acld_config$acld_port, 1sec);
Broker::subscribe_to_events(p$acld_config$acld_topic);
}
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
{
if ( [peer_port, peer_address] !in netcontrol_acld_peers )
# ok, this one was none of ours...
return;
local p = netcontrol_acld_peers[peer_port, peer_address];
plugin_activated(p);
}
global acld_plugin = Plugin(
$name=acld_name,
$can_expire = F,
$add_rule = acld_add_rule_fun,
$remove_rule = acld_remove_rule_fun,
$init = acld_init
);
function create_acld(config: AcldConfig) : PluginState
{
if ( config$acld_topic in netcontrol_acld_topics )
Reporter::warning(fmt("Topic %s was added to NetControl acld plugin twice. Possible duplication of commands", config$acld_topic));
else
add netcontrol_acld_topics[config$acld_topic];
local host = cat(config$acld_host);
local p: PluginState = [$acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id];
if ( [config$acld_port, host] in netcontrol_acld_peers )
Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, config$acld_port));
else
netcontrol_acld_peers[config$acld_port, host] = p;
netcontrol_acld_id[netcontrol_acld_current_id] = p;
++netcontrol_acld_current_id;
return p;
}
@endif

View file

@ -0,0 +1,167 @@
##! Broker plugin for the netcontrol framework. Sends the raw data structures
##! used in NetControl on to Broker to allow for easy handling, e.g., of
##! command-line scripts.
module NetControl;
@load ../main
@load ../plugin
@load base/frameworks/broker
@ifdef ( Broker::__enable )
export {
## Instantiates the broker plugin.
global create_broker: function(host: addr, host_port: port, topic: string, can_expire: bool &default=F) : PluginState;
redef record PluginState += {
## The broker topic used to send events to
broker_topic: string &optional;
## The ID of this broker instance - for the mapping to PluginStates
broker_id: count &optional;
## Broker host to connect to
broker_host: addr &optional;
## Broker port to connect to
broker_port: port &optional;
};
global broker_add_rule: event(id: count, r: Rule);
global broker_remove_rule: event(id: count, r: Rule);
global broker_rule_added: event(id: count, r: Rule, msg: string);
global broker_rule_removed: event(id: count, r: Rule, msg: string);
global broker_rule_error: event(id: count, r: Rule, msg: string);
global broker_rule_timeout: event(id: count, r: Rule, i: FlowInfo);
}
global netcontrol_broker_peers: table[port, string] of PluginState;
global netcontrol_broker_topics: set[string] = set();
global netcontrol_broker_id: table[count] of PluginState = table();
global netcontrol_broker_current_id: count = 0;
event NetControl::broker_rule_added(id: count, r: Rule, msg: string)
{
if ( id !in netcontrol_broker_id )
{
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
return;
}
local p = netcontrol_broker_id[id];
event NetControl::rule_added(r, p, msg);
}
event NetControl::broker_rule_removed(id: count, r: Rule, msg: string)
{
if ( id !in netcontrol_broker_id )
{
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
return;
}
local p = netcontrol_broker_id[id];
event NetControl::rule_removed(r, p, msg);
}
event NetControl::broker_rule_error(id: count, r: Rule, msg: string)
{
if ( id !in netcontrol_broker_id )
{
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
return;
}
local p = netcontrol_broker_id[id];
event NetControl::rule_error(r, p, msg);
}
event NetControl::broker_rule_timeout(id: count, r: Rule, i: FlowInfo)
{
if ( id !in netcontrol_broker_id )
{
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
return;
}
local p = netcontrol_broker_id[id];
event NetControl::rule_timeout(r, i, p);
}
function broker_name(p: PluginState) : string
{
return fmt("Broker-%s", p$broker_topic);
}
function broker_add_rule_fun(p: PluginState, r: Rule) : bool
{
Broker::send_event(p$broker_topic, Broker::event_args(broker_add_rule, p$broker_id, r));
return T;
}
function broker_remove_rule_fun(p: PluginState, r: Rule) : bool
{
Broker::send_event(p$broker_topic, Broker::event_args(broker_remove_rule, p$broker_id, r));
return T;
}
function broker_init(p: PluginState)
{
Broker::enable();
Broker::connect(cat(p$broker_host), p$broker_port, 1sec);
Broker::subscribe_to_events(p$broker_topic);
}
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
{
if ( [peer_port, peer_address] !in netcontrol_broker_peers )
return;
local p = netcontrol_broker_peers[peer_port, peer_address];
plugin_activated(p);
}
global broker_plugin = Plugin(
$name=broker_name,
$can_expire = F,
$add_rule = broker_add_rule_fun,
$remove_rule = broker_remove_rule_fun,
$init = broker_init
);
global broker_plugin_can_expire = Plugin(
$name=broker_name,
$can_expire = T,
$add_rule = broker_add_rule_fun,
$remove_rule = broker_remove_rule_fun,
$init = broker_init
);
function create_broker(host: addr, host_port: port, topic: string, can_expire: bool &default=F) : PluginState
{
if ( topic in netcontrol_broker_topics )
Reporter::warning(fmt("Topic %s was added to NetControl broker plugin twice. Possible duplication of commands", topic));
else
add netcontrol_broker_topics[topic];
local plugin = broker_plugin;
if ( can_expire )
plugin = broker_plugin_can_expire;
local p: PluginState = [$broker_host=host, $broker_port=host_port, $plugin=plugin, $broker_topic=topic, $broker_id=netcontrol_broker_current_id];
if ( [host_port, cat(host)] in netcontrol_broker_peers )
Reporter::warning(fmt("Peer %s:%s was added to NetControl broker plugin twice.", host, host_port));
else
netcontrol_broker_peers[host_port, cat(host)] = p;
netcontrol_broker_id[netcontrol_broker_current_id] = p;
++netcontrol_broker_current_id;
return p;
}
@endif

View file

@ -0,0 +1,99 @@
##! Debugging plugin for the NetControl framework, providing insight into
##! executed operations.
@load ../plugin
@load ../main
module NetControl;
export {
## Instantiates a debug plugin for the NetControl framework. The debug
## plugin simply logs the operations it receives.
##
## do_something: If true, the plugin will claim it supports all operations; if
## false, it will indicate it doesn't support any.
global create_debug: function(do_something: bool) : PluginState;
}
function do_something(p: PluginState) : bool
{
return p$config["all"] == "1";
}
function debug_name(p: PluginState) : string
{
return fmt("Debug-%s", (do_something(p) ? "All" : "None"));
}
function debug_log(p: PluginState, msg: string)
{
print fmt("netcontrol debug (%s): %s", debug_name(p), msg);
}
function debug_init(p: PluginState)
{
debug_log(p, "init");
plugin_activated(p);
}
function debug_done(p: PluginState)
{
debug_log(p, "init");
}
function debug_add_rule(p: PluginState, r: Rule) : bool
{
local s = fmt("add_rule: %s", r);
debug_log(p, s);
if ( do_something(p) )
{
event NetControl::rule_added(r, p);
return T;
}
return F;
}
function debug_remove_rule(p: PluginState, r: Rule) : bool
{
local s = fmt("remove_rule: %s", r);
debug_log(p, s);
event NetControl::rule_removed(r, p);
return T;
}
function debug_transaction_begin(p: PluginState)
{
debug_log(p, "transaction_begin");
}
function debug_transaction_end(p: PluginState)
{
debug_log(p, "transaction_end");
}
global debug_plugin = Plugin(
$name=debug_name,
$can_expire = F,
$init = debug_init,
$done = debug_done,
$add_rule = debug_add_rule,
$remove_rule = debug_remove_rule,
$transaction_begin = debug_transaction_begin,
$transaction_end = debug_transaction_end
);
function create_debug(do_something: bool) : PluginState
{
local p: PluginState = [$plugin=debug_plugin];
# FIXME: Why's the default not working?
p$config = table();
p$config["all"] = (do_something ? "1" : "0");
return p;
}

View file

@ -0,0 +1,432 @@
##! OpenFlow plugin for the NetControl framework.
@load ../main
@load ../plugin
@load base/frameworks/openflow
module NetControl;
export {
type OfConfig: record {
monitor: bool &default=T;
forward: bool &default=T;
idle_timeout: count &default=0;
table_id: count &optional;
priority_offset: int &default=+0; ##< add this to all rule priorities. Can be useful if you want the openflow priorities be offset from the netcontrol priorities without having to write a filter function.
## Predicate that is called on rule insertion or removal.
##
## p: Current plugin state
##
## r: The rule to be inserted or removed
##
## Returns: T if the rule can be handled by the current backend, F otherwhise
check_pred: function(p: PluginState, r: Rule): bool &optional;
match_pred: function(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match): vector of OpenFlow::ofp_match &optional;
flow_mod_pred: function(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod &optional;
};
redef record PluginState += {
## OpenFlow controller for NetControl OpenFlow plugin
of_controller: OpenFlow::Controller &optional;
## OpenFlow configuration record that is passed on initialization
of_config: OfConfig &optional;
};
type OfTable: record {
p: PluginState;
r: Rule;
c: count &default=0; # how many replies did we see so far? needed for ids where we have multiple rules...
packet_count: count &default=0;
byte_count: count &default=0;
duration_sec: double &default=0.0;
};
## the time interval after which an openflow message is considered to be timed out
## and we delete it from our internal tracking.
const openflow_message_timeout = 20secs &redef;
## the time interval after we consider a flow timed out. This should be fairly high (or
## even disabled) if you expect a lot of long flows. However, one also will have state
## buildup for quite a while if keeping this around...
const openflow_flow_timeout = 24hrs &redef;
## Instantiates an openflow plugin for the NetControl framework.
global create_openflow: function(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState;
}
global of_messages: table[count, OpenFlow::ofp_flow_mod_command] of OfTable &create_expire=openflow_message_timeout
&expire_func=function(t: table[count, OpenFlow::ofp_flow_mod_command] of OfTable, idx: any): interval
{
local rid: count;
local command: OpenFlow::ofp_flow_mod_command;
[rid, command] = idx;
local p = t[rid, command]$p;
local r = t[rid, command]$r;
event NetControl::rule_error(r, p, "Timeout during rule insertion/removal");
return 0secs;
};
global of_flows: table[count] of OfTable &create_expire=openflow_flow_timeout;
global of_instances: table[string] of PluginState;
function openflow_name(p: PluginState) : string
{
return fmt("Openflow-%s", p$of_controller$describe(p$of_controller$state));
}
function openflow_check_rule(p: PluginState, r: Rule) : bool
{
local c = p$of_config;
if ( p$of_config?$check_pred )
return p$of_config$check_pred(p, r);
if ( r$target == MONITOR && c$monitor )
return T;
if ( r$target == FORWARD && c$forward )
return T;
return F;
}
function openflow_match_pred(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match) : vector of OpenFlow::ofp_match
{
if ( p$of_config?$match_pred )
return p$of_config$match_pred(p, e, m);
return m;
}
function openflow_flow_mod_pred(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod
{
if ( p$of_config?$flow_mod_pred )
return p$of_config$flow_mod_pred(p, r, m);
return m;
}
function determine_dl_type(s: subnet): count
{
local pdl = OpenFlow::ETH_IPv4;
if ( is_v6_subnet(s) )
pdl = OpenFlow::ETH_IPv6;
return pdl;
}
function determine_proto(p: port): count
{
local proto = OpenFlow::IP_TCP;
if ( is_udp_port(p) )
proto = OpenFlow::IP_UDP;
else if ( is_icmp_port(p) )
proto = OpenFlow::IP_ICMP;
return proto;
}
function entity_to_match(p: PluginState, e: Entity): vector of OpenFlow::ofp_match
{
local v : vector of OpenFlow::ofp_match = vector();
if ( e$ty == CONNECTION )
{
v[|v|] = OpenFlow::match_conn(e$conn); # forward and...
v[|v|] = OpenFlow::match_conn(e$conn, T); # reverse
return openflow_match_pred(p, e, v);
}
if ( e$ty == MAC )
{
v[|v|] = OpenFlow::ofp_match(
$dl_src=e$mac
);
v[|v|] = OpenFlow::ofp_match(
$dl_dst=e$mac
);
return openflow_match_pred(p, e, v);
}
local dl_type = OpenFlow::ETH_IPv4;
if ( e$ty == ADDRESS )
{
if ( is_v6_subnet(e$ip) )
dl_type = OpenFlow::ETH_IPv6;
v[|v|] = OpenFlow::ofp_match(
$dl_type=dl_type,
$nw_src=e$ip
);
v[|v|] = OpenFlow::ofp_match(
$dl_type=dl_type,
$nw_dst=e$ip
);
return openflow_match_pred(p, e, v);
}
local proto = OpenFlow::IP_TCP;
if ( e$ty == FLOW )
{
local m = OpenFlow::ofp_match();
local f = e$flow;
if ( f?$src_m )
m$dl_src=f$src_m;
if ( f?$dst_m )
m$dl_dst=f$dst_m;
if ( f?$src_h )
{
m$dl_type = determine_dl_type(f$src_h);
m$nw_src = f$src_h;
}
if ( f?$dst_h )
{
m$dl_type = determine_dl_type(f$dst_h);
m$nw_dst = f$dst_h;
}
if ( f?$src_p )
{
m$nw_proto = determine_proto(f$src_p);
m$tp_src = port_to_count(f$src_p);
}
if ( f?$dst_p )
{
m$nw_proto = determine_proto(f$dst_p);
m$tp_dst = port_to_count(f$dst_p);
}
v[|v|] = m;
return openflow_match_pred(p, e, v);
}
Reporter::error(fmt("Entity type %s not supported for openflow yet", cat(e$ty)));
return openflow_match_pred(p, e, v);
}
function openflow_rule_to_flow_mod(p: PluginState, r: Rule) : OpenFlow::ofp_flow_mod
{
local c = p$of_config;
local flow_mod = OpenFlow::ofp_flow_mod(
$cookie=OpenFlow::generate_cookie(r$cid*2), # leave one space for the cases in which we need two rules.
$command=OpenFlow::OFPFC_ADD,
$idle_timeout=c$idle_timeout,
$priority=int_to_count(r$priority + c$priority_offset),
$flags=OpenFlow::OFPFF_SEND_FLOW_REM # please notify us when flows are removed
);
if ( r?$expire )
flow_mod$hard_timeout = double_to_count(interval_to_double(r$expire));
if ( c?$table_id )
flow_mod$table_id = c$table_id;
if ( r$ty == DROP )
{
# default, nothing to do. We simply do not add an output port to the rule...
}
else if ( r$ty == WHITELIST )
{
# at the moment our interpretation of whitelist is to hand this off to the switches L2/L3 routing.
flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL);
}
else if ( r$ty == MODIFY )
{
# if no ports are given, just assume normal pipeline...
flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL);
local mod = r$mod;
if ( mod?$redirect_port )
flow_mod$actions$out_ports = vector(mod$redirect_port);
if ( mod?$src_h )
flow_mod$actions$nw_src = mod$src_h;
if ( mod?$dst_h )
flow_mod$actions$nw_dst = mod$dst_h;
if ( mod?$src_m )
flow_mod$actions$dl_src = mod$src_m;
if ( mod?$dst_m )
flow_mod$actions$dl_dst = mod$dst_m;
if ( mod?$src_p )
flow_mod$actions$tp_src = mod$src_p;
if ( mod?$dst_p )
flow_mod$actions$tp_dst = mod$dst_p;
}
else if ( r$ty == REDIRECT )
{
# redirect to port c
flow_mod$actions$out_ports = vector(r$out_port);
}
else
{
Reporter::error(fmt("Rule type %s not supported for openflow yet", cat(r$ty)));
}
return openflow_flow_mod_pred(p, r, flow_mod);
}
function openflow_add_rule(p: PluginState, r: Rule) : bool
{
if ( ! openflow_check_rule(p, r) )
return F;
local flow_mod = openflow_rule_to_flow_mod(p, r);
local matches = entity_to_match(p, r$entity);
for ( i in matches )
{
if ( OpenFlow::flow_mod(p$of_controller, matches[i], flow_mod) )
{
of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r);
flow_mod = copy(flow_mod);
++flow_mod$cookie;
}
else
event rule_error(r, p, "Error while executing OpenFlow::flow_mod");
}
return T;
}
function openflow_remove_rule(p: PluginState, r: Rule) : bool
{
if ( ! openflow_check_rule(p, r) )
return F;
local flow_mod: OpenFlow::ofp_flow_mod = [
$cookie=OpenFlow::generate_cookie(r$cid*2),
$command=OpenFlow::OFPFC_DELETE
];
if ( OpenFlow::flow_mod(p$of_controller, [], flow_mod) )
of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r);
else
{
event rule_error(r, p, "Error while executing OpenFlow::flow_mod");
return F;
}
# if this was an address or mac match, we also need to remove the reverse
if ( r$entity$ty == ADDRESS || r$entity$ty == MAC )
{
local flow_mod_2 = copy(flow_mod);
++flow_mod_2$cookie;
OpenFlow::flow_mod(p$of_controller, [], flow_mod_2);
}
return T;
}
event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3
{
local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2;
if ( [id, flow_mod$command] !in of_messages )
return;
local r = of_messages[id,flow_mod$command]$r;
local p = of_messages[id,flow_mod$command]$p;
local c = of_messages[id,flow_mod$command]$c;
if ( r$entity$ty == ADDRESS || r$entity$ty == MAC )
{
++of_messages[id,flow_mod$command]$c;
if ( of_messages[id,flow_mod$command]$c < 2 )
return; # will do stuff once the second part arrives...
}
delete of_messages[id,flow_mod$command];
if ( p$of_controller$supports_flow_removed )
of_flows[id] = OfTable($p=p, $r=r);
if ( flow_mod$command == OpenFlow::OFPFC_ADD )
event NetControl::rule_added(r, p, msg);
else if ( flow_mod$command == OpenFlow::OFPFC_DELETE || flow_mod$command == OpenFlow::OFPFC_DELETE_STRICT )
event NetControl::rule_removed(r, p, msg);
}
event OpenFlow::flow_mod_failure(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3
{
local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2;
if ( [id, flow_mod$command] !in of_messages )
return;
local r = of_messages[id,flow_mod$command]$r;
local p = of_messages[id,flow_mod$command]$p;
delete of_messages[id,flow_mod$command];
event NetControl::rule_error(r, p, msg);
}
event OpenFlow::flow_removed(name: string, match: OpenFlow::ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count)
{
local id = OpenFlow::get_cookie_uid(cookie)/2;
if ( id !in of_flows )
return;
local rec = of_flows[id];
local r = rec$r;
local p = rec$p;
if ( r$entity$ty == ADDRESS || r$entity$ty == MAC )
{
++of_flows[id]$c;
if ( of_flows[id]$c < 2 )
return; # will do stuff once the second part arrives...
else
event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval((rec$duration_sec+duration_sec)/2), $packet_count=packet_count+rec$packet_count, $byte_count=byte_count+rec$byte_count), p);
return;
}
event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval(duration_sec+0.0), $packet_count=packet_count, $byte_count=byte_count), p);
}
function openflow_init(p: PluginState)
{
local name = p$of_controller$state$_name;
if ( name in of_instances )
Reporter::error(fmt("OpenFlow instance %s added to NetControl twice.", name));
of_instances[name] = p;
# let's check, if our OpenFlow controller is already active. If not, we have to wait for it to become active.
if ( p$of_controller$state$_activated )
plugin_activated(p);
}
event OpenFlow::controller_activated(name: string, controller: OpenFlow::Controller)
{
if ( name in of_instances )
plugin_activated(of_instances[name]);
}
global openflow_plugin = Plugin(
$name=openflow_name,
$can_expire = T,
$init = openflow_init,
# $done = openflow_done,
$add_rule = openflow_add_rule,
$remove_rule = openflow_remove_rule
# $transaction_begin = openflow_transaction_begin,
# $transaction_end = openflow_transaction_end
);
function create_openflow(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState
{
local p: PluginState = [$plugin=openflow_plugin, $of_controller=controller, $of_config=config];
return p;
}

View file

@ -0,0 +1,113 @@
##! NetControl plugin for the process-level PacketFilter that comes with
##! Bro. Since the PacketFilter in Bro is quite limited in scope
##! and can only add/remove filters for addresses, this is quite
##! limited in scope at the moment.
module NetControl;
@load ../plugin
export {
## Instantiates the packetfilter plugin.
global create_packetfilter: function() : PluginState;
}
# Check if we can handle this rule. If it specifies ports or
# anything Bro cannot handle, simply ignore it for now.
function packetfilter_check_rule(r: Rule) : bool
{
if ( r$ty != DROP )
return F;
if ( r$target != MONITOR )
return F;
local e = r$entity;
if ( e$ty == ADDRESS )
return T;
if ( e$ty != FLOW ) # everything else requires ports or MAC stuff
return F;
if ( e$flow?$src_p || e$flow?$dst_p || e$flow?$src_m || e$flow?$dst_m )
return F;
return T;
}
function packetfilter_add_rule(p: PluginState, r: Rule) : bool
{
if ( ! packetfilter_check_rule(r) )
return F;
local e = r$entity;
if ( e$ty == ADDRESS )
{
install_src_net_filter(e$ip, 0, 1.0);
install_dst_net_filter(e$ip, 0, 1.0);
return T;
}
if ( e$ty == FLOW )
{
local f = e$flow;
if ( f?$src_h )
install_src_net_filter(f$src_h, 0, 1.0);
if ( f?$dst_h )
install_dst_net_filter(f$dst_h, 0, 1.0);
return T;
}
return F;
}
function packetfilter_remove_rule(p: PluginState, r: Rule) : bool
{
if ( ! packetfilter_check_rule(r) )
return F;
local e = r$entity;
if ( e$ty == ADDRESS )
{
uninstall_src_net_filter(e$ip);
uninstall_dst_net_filter(e$ip);
return T;
}
if ( e$ty == FLOW )
{
local f = e$flow;
if ( f?$src_h )
uninstall_src_net_filter(f$src_h);
if ( f?$dst_h )
uninstall_dst_net_filter(f$dst_h);
return T;
}
return F;
}
function packetfilter_name(p: PluginState) : string
{
return "Packetfilter";
}
global packetfilter_plugin = Plugin(
$name=packetfilter_name,
$can_expire = F,
# $init = packetfilter_init,
# $done = packetfilter_done,
$add_rule = packetfilter_add_rule,
$remove_rule = packetfilter_remove_rule
);
function create_packetfilter() : PluginState
{
local p: PluginState = [$plugin=packetfilter_plugin];
return p;
}

View file

@ -0,0 +1,69 @@
##! Implementation of the shunt functionality for NetControl.
module NetControl;
@load ./main
export {
redef enum Log::ID += { SHUNT };
## Stops forwarding a uni-directional flow's packets to Bro.
##
## f: The flow to shunt.
##
## t: How long to leave the shunt in place, with 0 being indefinitly.
##
## location: An optional string describing where the shunt was triggered.
##
## Returns: The id of the inserted rule on succes and zero on failure.
global shunt_flow: function(f: flow_id, t: interval, location: string &default="") : string;
type ShuntInfo: record {
## Time at which the recorded activity occurred.
ts: time &log;
## ID of the rule; unique during each Bro run
rule_id: string &log;
## Flow ID of the shunted flow
f: flow_id &log;
## Expiry time of the shunt
expire: interval &log;
## Location where the underlying action was triggered.
location: string &log &optional;
};
## Event that can be handled to access the :bro:type:`NetControl::ShuntInfo`
## record as it is sent on to the logging framework.
global log_netcontrol_shunt: event(rec: ShuntInfo);
}
event bro_init() &priority=5
{
Log::create_stream(NetControl::SHUNT, [$columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt"]);
}
function shunt_flow(f: flow_id, t: interval, location: string &default="") : string
{
local flow = NetControl::Flow(
$src_h=addr_to_subnet(f$src_h),
$src_p=f$src_p,
$dst_h=addr_to_subnet(f$dst_h),
$dst_p=f$dst_p
);
local e: Entity = [$ty=FLOW, $flow=flow];
local r: Rule = [$ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location];
local id = add_rule(r);
# Error should already be logged
if ( id == "" )
return id;
local log = ShuntInfo($ts=network_time(), $rule_id=id, $f=f, $expire=t);
if ( location != "" )
log$location=location;
Log::write(SHUNT, log);
return id;
}

View file

@ -0,0 +1,109 @@
##! Types used by the NetControl framework.
module NetControl;
export {
const default_priority: int = +0 &redef;
const whitelist_priority: int = +5 &redef;
## Type of a :bro:id:`Entity` for defining an action.
type EntityType: enum {
ADDRESS, ##< Activity involving a specific IP address.
CONNECTION, ##< All of a bi-directional connection's activity.
FLOW, ##< All of a uni-directional flow's activity. Can contain wildcards.
MAC, ##< Activity involving a MAC address.
};
## Type for defining a flow.
type Flow: record {
src_h: subnet &optional; ##< The source IP address/subnet.
src_p: port &optional; ##< The source port number.
dst_h: subnet &optional; ##< The destination IP address/subnet.
dst_p: port &optional; ##< The desintation port number.
src_m: string &optional; ##< The source MAC address.
dst_m: string &optional; ##< The destination MAC address.
};
## Type defining the enity an :bro:id:`Rule` is operating on.
type Entity: record {
ty: EntityType; ##< Type of entity.
conn: conn_id &optional; ##< Used with :bro:enum:`NetControl::CONNECTION`.
flow: Flow &optional; ##< Used with :bro:enum:`NetControl::FLOW`.
ip: subnet &optional; ##< Used with :bro:enum:`NetControl::ADDRESS` to specifiy a CIDR subnet.
mac: string &optional; ##< Used with :bro:enum:`NetControl::MAC`.
};
## Target of :bro:id:`Rule` action.
type TargetType: enum {
FORWARD, #< Apply rule actively to traffic on forwarding path.
MONITOR, #< Apply rule passively to traffic sent to Bro for monitoring.
};
## Type of rules that the framework supports. Each type lists the
## :bro:id:`Rule` argument(s) it uses, if any.
##
## Plugins may extend this type to define their own.
type RuleType: enum {
## Stop forwarding all packets matching entity.
##
## No arguments.
DROP,
## Begin modifying all packets matching entity.
##
## .. todo::
## Define arguments.
MODIFY,
## Begin redirecting all packets matching entity.
##
## .. todo::
## c: output port to redirect traffic to.
REDIRECT,
## Whitelists all packets of an entity, meaning no restrictions will be applied.
## While whitelisting is the default if no rule matches an this can type can be
## used to override lower-priority rules that would otherwise take effect for the
## entity.
WHITELIST,
};
## Type for defining a flow modification action.
type FlowMod: record {
src_h: addr &optional; ##< The source IP address.
src_p: count &optional; ##< The source port number.
dst_h: addr &optional; ##< The destination IP address.
dst_p: count &optional; ##< The desintation port number.
src_m: string &optional; ##< The source MAC address.
dst_m: string &optional; ##< The destination MAC address.
redirect_port: count &optional;
};
## A rule for the framework to put in place. Of all rules currently in
## place, the first match will be taken, sorted by priority. All
## further rules will be ignored.
type Rule: record {
ty: RuleType; ##< Type of rule.
target: TargetType; ##< Where to apply rule.
entity: Entity; ##< Entity to apply rule to.
expire: interval &optional; ##< Timeout after which to expire the rule.
priority: int &default=default_priority; ##< Priority if multiple rules match an entity (larger value is higher priority).
location: string &optional; ##< Optional string describing where/what installed the rule.
out_port: count &optional; ##< Argument for :bro:enum:`NetControl::REDIRECT` rules.
mod: FlowMod &optional; ##< Argument for :bro:enum:`NetControl::MODIFY` rules.
id: string &default=""; ##< Internally determined unique ID for this rule. Will be set when added.
cid: count &default=0; ##< Internally determined unique numeric ID for this rule. Set when added.
};
## Information of a flow that can be provided by switches when the flow times out.
## Currently this is heavily influenced by the data that OpenFlow returns by default.
## That being said - their design makes sense and this is probably the data one
## can expect to be available.
type FlowInfo: record {
duration: interval &optional; ##< total duration of the rule
packet_count: count &optional; ##< number of packets exchanged over connections matched by the rule
byte_count: count &optional; ##< total bytes exchanged over connections matched by the rule
};
}

View file

@ -44,6 +44,7 @@ export {
ACTION_ALARM, ACTION_ALARM,
}; };
## Type that represents a set of actions.
type ActionSet: set[Notice::Action]; type ActionSet: set[Notice::Action];
## The notice framework is able to do automatic notice suppression by ## The notice framework is able to do automatic notice suppression by
@ -52,6 +53,7 @@ export {
## suppression. ## suppression.
const default_suppression_interval = 1hrs &redef; const default_suppression_interval = 1hrs &redef;
## The record type that is used for representing and logging notices.
type Info: record { type Info: record {
## An absolute time indicating when the notice occurred, ## An absolute time indicating when the notice occurred,
## defaults to the current network time. ## defaults to the current network time.

View file

@ -0,0 +1,13 @@
@load ./consts
@load ./types
@load ./main
@load ./plugins
# The cluster framework must be loaded first.
@load base/frameworks/cluster
@if ( Cluster::is_enabled() )
@load ./cluster
@else
@load ./non-cluster
@endif

View file

@ -0,0 +1,120 @@
##! Cluster support for the OpenFlow framework.
@load ./main
@load base/frameworks/cluster
module OpenFlow;
export {
## This is the event used to transport flow_mod messages to the manager.
global cluster_flow_mod: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod);
## This is the event used to transport flow_clear messages to the manager.
global cluster_flow_clear: event(name: string);
}
## Workers need ability to forward commands to manager.
redef Cluster::worker2manager_events += /OpenFlow::cluster_flow_(mod|clear)/;
# the flow_mod function wrapper
function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool
{
if ( ! controller?$flow_mod )
return F;
if ( Cluster::local_node_type() == Cluster::MANAGER )
return controller$flow_mod(controller$state, match, flow_mod);
else
event OpenFlow::cluster_flow_mod(controller$state$_name, match, flow_mod);
return T;
}
function flow_clear(controller: Controller): bool
{
if ( ! controller?$flow_clear )
return F;
if ( Cluster::local_node_type() == Cluster::MANAGER )
return controller$flow_clear(controller$state);
else
event OpenFlow::cluster_flow_clear(controller$state$_name);
return T;
}
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event OpenFlow::cluster_flow_mod(name: string, match: ofp_match, flow_mod: ofp_flow_mod)
{
if ( name !in name_to_controller )
{
Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name));
return;
}
local c = name_to_controller[name];
if ( ! c$state$_activated )
return;
if ( c?$flow_mod )
c$flow_mod(c$state, match, flow_mod);
}
event OpenFlow::cluster_flow_clear(name: string)
{
if ( name !in name_to_controller )
{
Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name));
return;
}
local c = name_to_controller[name];
if ( ! c$state$_activated )
return;
if ( c?$flow_clear )
c$flow_clear(c$state);
}
@endif
function register_controller(tpe: OpenFlow::Plugin, name: string, controller: Controller)
{
controller$state$_name = cat(tpe, name);
controller$state$_plugin = tpe;
# we only run the init functions on the manager.
if ( Cluster::local_node_type() != Cluster::MANAGER )
return;
register_controller_impl(tpe, name, controller);
}
function unregister_controller(controller: Controller)
{
# we only run the on the manager.
if ( Cluster::local_node_type() != Cluster::MANAGER )
return;
unregister_controller_impl(controller);
}
function lookup_controller(name: string): vector of Controller
{
# we only run the on the manager. Otherwhise we don't have a mapping or state -> return empty
if ( Cluster::local_node_type() != Cluster::MANAGER )
return vector();
# I am not quite sure if we can actually get away with this - in the
# current state, this means that the individual nodes cannot lookup
# a controller by name.
#
# This means that there can be no reactions to things on the actual
# worker nodes - because they cannot look up a name. On the other hand -
# currently we also do not even send the events to the worker nodes (at least
# not if we are using broker). Because of that I am not really feeling that
# badly about it...
return lookup_controller_impl(name);
}

View file

@ -0,0 +1,229 @@
##! Constants used by the OpenFlow framework.
# All types/constants not specific to OpenFlow will be defined here
# unitl they somehow get into Bro.
module OpenFlow;
# Some cookie specific constants.
# first 24 bits
const COOKIE_BID_SIZE = 16777216;
# start at bit 40 (1 << 40)
const COOKIE_BID_START = 1099511627776;
# bro specific cookie ID shall have the 42 bit set (1 << 42)
const BRO_COOKIE_ID = 4;
# 8 bits group identifier
const COOKIE_GID_SIZE = 256;
# start at bit 32 (1 << 32)
const COOKIE_GID_START = 4294967296;
# 32 bits unique identifier
const COOKIE_UID_SIZE = 4294967296;
# start at bit 0 (1 << 0)
const COOKIE_UID_START = 0;
export {
# All ethertypes can be found at
# http://standards.ieee.org/develop/regauth/ethertype/eth.txt
# but are not interesting for us at this point
#type ethertype: enum {
# Internet protocol version 4
const ETH_IPv4 = 0x0800;
# Address resolution protocol
const ETH_ARP = 0x0806;
# Wake on LAN
const ETH_WOL = 0x0842;
# Reverse address resolution protocol
const ETH_RARP = 0x8035;
# Appletalk
const ETH_APPLETALK = 0x809B;
# Appletalk address resolution protocol
const ETH_APPLETALK_ARP = 0x80F3;
# IEEE 802.1q & IEEE 802.1aq
const ETH_VLAN = 0x8100;
# Novell IPX old
const ETH_IPX_OLD = 0x8137;
# Novell IPX
const ETH_IPX = 0x8138;
# Internet protocol version 6
const ETH_IPv6 = 0x86DD;
# IEEE 802.3x
const ETH_ETHER_FLOW_CONTROL = 0x8808;
# Multiprotocol Label Switching unicast
const ETH_MPLS_UNICAST = 0x8847;
# Multiprotocol Label Switching multicast
const ETH_MPLS_MULTICAST = 0x8848;
# Point-to-point protocol over Ethernet discovery phase (rfc2516)
const ETH_PPPOE_DISCOVERY = 0x8863;
# Point-to-point protocol over Ethernet session phase (rfc2516)
const ETH_PPPOE_SESSION = 0x8864;
# Jumbo frames
const ETH_JUMBO_FRAMES = 0x8870;
# IEEE 802.1X
const ETH_EAP_OVER_LAN = 0x888E;
# IEEE 802.1ad & IEEE 802.1aq
const ETH_PROVIDER_BRIDING = 0x88A8;
# IEEE 802.1ae
const ETH_MAC_SECURITY = 0x88E5;
# IEEE 802.1ad (QinQ)
const ETH_QINQ = 0x9100;
#};
# A list of ip protocol numbers can be found at
# http://en.wikipedia.org/wiki/List_of_IP_protocol_numbers
#type iptype: enum {
# IPv6 Hop-by-Hop Option (RFC2460)
const IP_HOPOPT = 0x00;
# Internet Control Message Protocol (RFC792)
const IP_ICMP = 0x01;
# Internet Group Management Protocol (RFC1112)
const IP_IGMP = 0x02;
# Gateway-to-Gateway Protocol (RFC823)
const IP_GGP = 0x03;
# IP-Within-IP (encapsulation) (RFC2003)
const IP_IPIP = 0x04;
# Internet Stream Protocol (RFC1190;RFC1819)
const IP_ST = 0x05;
# Tansmission Control Protocol (RFC793)
const IP_TCP = 0x06;
# Core-based trees (RFC2189)
const IP_CBT = 0x07;
# Exterior Gateway Protocol (RFC888)
const IP_EGP = 0x08;
# Interior Gateway Protocol (any private interior
# gateway (used by Cisco for their IGRP))
const IP_IGP = 0x09;
# User Datagram Protocol (RFC768)
const IP_UDP = 0x11;
# Reliable Datagram Protocol (RFC908)
const IP_RDP = 0x1B;
# IPv6 Encapsulation (RFC2473)
const IP_IPv6 = 0x29;
# Resource Reservation Protocol (RFC2205)
const IP_RSVP = 0x2E;
# Generic Routing Encapsulation (RFC2784;RFC2890)
const IP_GRE = 0x2F;
# Open Shortest Path First (RFC1583)
const IP_OSPF = 0x59;
# Multicast Transport Protocol
const IP_MTP = 0x5C;
# IP-within-IP Encapsulation Protocol (RFC2003)
### error 0x5E;
# Ethernet-within-IP Encapsulation Protocol (RFC3378)
const IP_ETHERIP = 0x61;
# Layer Two Tunneling Protocol Version 3 (RFC3931)
const IP_L2TP = 0x73;
# Intermediate System to Intermediate System (IS-IS) Protocol over IPv4 (RFC1142;RFC1195)
const IP_ISIS = 0x7C;
# Fibre Channel
const IP_FC = 0x85;
# Multiprotocol Label Switching Encapsulated in IP (RFC4023)
const IP_MPLS = 0x89;
#};
## Return value for a cookie from a flow
## which is not added, modified or deleted
## from the bro openflow framework
const INVALID_COOKIE = 0xffffffffffffffff;
# Openflow pysical port definitions
## Send the packet out the input port. This
## virual port must be explicitly used in
## order to send back out of the input port.
const OFPP_IN_PORT = 0xfffffff8;
## Perform actions in flow table.
## NB: This can only be the destination port
## for packet-out messages.
const OFPP_TABLE = 0xfffffff9;
## Process with normal L2/L3 switching.
const OFPP_NORMAL = 0xfffffffa;
## All pysical ports except input port and
## those disabled by STP.
const OFPP_FLOOD = 0xfffffffb;
## All pysical ports except input port.
const OFPP_ALL = 0xfffffffc;
## Send to controller.
const OFPP_CONTROLLER = 0xfffffffd;
## Local openflow "port".
const OFPP_LOCAL = 0xfffffffe;
## Wildcard port used only for flow mod (delete) and flow stats requests.
const OFPP_ANY = 0xffffffff;
# Openflow no buffer constant.
const OFP_NO_BUFFER = 0xffffffff;
## Send flow removed message when flow
## expires or is deleted.
const OFPFF_SEND_FLOW_REM = 0x1;
## Check for overlapping entries first.
const OFPFF_CHECK_OVERLAP = 0x2;
## Remark this is for emergency.
## Flows added with this are only used
## when the controller is disconnected.
const OFPFF_EMERG = 0x4;
# Wildcard table used for table config,
# flow stats and flow deletes.
const OFPTT_ALL = 0xff;
## Openflow action_type definitions
##
## The openflow action type defines
## what actions openflow can take
## to modify a packet
type ofp_action_type: enum {
## Output to switch port.
OFPAT_OUTPUT = 0x0000,
## Set the 802.1q VLAN id.
OFPAT_SET_VLAN_VID = 0x0001,
## Set the 802.1q priority.
OFPAT_SET_VLAN_PCP = 0x0002,
## Strip the 802.1q header.
OFPAT_STRIP_VLAN = 0x0003,
## Ethernet source address.
OFPAT_SET_DL_SRC = 0x0004,
## Ethernet destination address.
OFPAT_SET_DL_DST = 0x0005,
## IP source address
OFPAT_SET_NW_SRC = 0x0006,
## IP destination address.
OFPAT_SET_NW_DST = 0x0007,
## IP ToS (DSCP field, 6 bits).
OFPAT_SET_NW_TOS = 0x0008,
## TCP/UDP source port.
OFPAT_SET_TP_SRC = 0x0009,
## TCP/UDP destination port.
OFPAT_SET_TP_DST = 0x000a,
## Output to queue.
OFPAT_ENQUEUE = 0x000b,
## Vendor specific
OFPAT_VENDOR = 0xffff,
};
## Openflow flow_mod_command definitions
##
## The openflow flow_mod_command describes
## of what kind an action is.
type ofp_flow_mod_command: enum {
## New flow.
OFPFC_ADD = 0x0,
## Modify all matching flows.
OFPFC_MODIFY = 0x1,
## Modify entry strictly matching wildcards.
OFPFC_MODIFY_STRICT = 0x2,
## Delete all matching flows.
OFPFC_DELETE = 0x3,
## Strictly matching wildcards and priority.
OFPFC_DELETE_STRICT = 0x4,
};
## Openflow config flag definitions
##
## TODO: describe
type ofp_config_flags: enum {
## No special handling for fragments.
OFPC_FRAG_NORMAL = 0,
## Drop fragments.
OFPC_FRAG_DROP = 1,
## Reassemble (only if OFPC_IP_REASM set).
OFPC_FRAG_REASM = 2,
OFPC_FRAG_MASK = 3,
};
}

View file

@ -0,0 +1,289 @@
##! Bro's OpenFlow control framework
##!
##! This plugin-based framework allows to control OpenFlow capable
##! switches by implementing communication to an OpenFlow controller
##! via plugins. The framework has to be instantiated via the new function
##! in one of the plugins. This framework only offers very low-level
##! functionality; if you want to use OpenFlow capable switches, e.g.,
##! for shunting, please look at the PACF framework, which provides higher
##! level functions and can use the OpenFlow framework as a backend.
module OpenFlow;
@load ./consts
@load ./types
export {
## Global flow_mod function.
##
## controller: The controller which should execute the flow modification
##
## match: The ofp_match record which describes the flow to match.
##
## flow_mod: The openflow flow_mod record which describes the action to take.
##
## Returns: F on error or if the plugin does not support the operation, T when the operation was queued.
global flow_mod: function(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool;
## Clear the current flow table of the controller.
##
## controller: The controller which should execute the flow modification
##
## Returns: F on error or if the plugin does not support the operation, T when the operation was queued.
global flow_clear: function(controller: Controller): bool;
## Event confirming successful modification of a flow rule.
##
## name: The unique name of the OpenFlow controller from which this event originated.
##
## match: The ofp_match record which describes the flow to match.
##
## flow_mod: The openflow flow_mod record which describes the action to take.
##
## msg: An optional informational message by the plugin.
global flow_mod_success: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default="");
## Reports an error while installing a flow Rule.
##
## name: The unique name of the OpenFlow controller from which this event originated.
##
## match: The ofp_match record which describes the flow to match.
##
## flow_mod: The openflow flow_mod record which describes the action to take.
##
## msg: Message to describe the event.
global flow_mod_failure: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default="");
## Reports that a flow was removed by the switch because of either the hard or the idle timeout.
## This message is only generated by controllers that indicate that they support flow removal
## in supports_flow_removed.
##
## name: The unique name of the OpenFlow controller from which this event originated.
##
## match: The ofp_match record which was used to create the flow.
##
## cookie: The cookie that was specified when creating the flow.
##
## priority: The priority that was specified when creating the flow.
##
## reason: The reason for flow removal (OFPRR_*)
##
## duration_sec: duration of the flow in seconds
##
## packet_count: packet count of the flow
##
## byte_count: byte count of the flow
global flow_removed: event(name: string, match: ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count);
## Convert a conn_id record into an ofp_match record that can be used to
## create match objects for OpenFlow.
##
## id: the conn_id record that describes the record.
##
## reverse: reverse the sources and destinations when creating the match record (default F)
##
## Returns: ofp_match object for the conn_id record.
global match_conn: function(id: conn_id, reverse: bool &default=F): ofp_match;
# ###
# ### Low-level functions for cookie handling and plugin registration.
# ###
## Function to get the unique id out of a given cookie.
##
## cookie: The openflow match cookie.
##
## Returns: The cookie unique id.
global get_cookie_uid: function(cookie: count): count;
## Function to get the group id out of a given cookie.
##
## cookie: The openflow match cookie.
##
## Returns: The cookie group id.
global get_cookie_gid: function(cookie: count): count;
## Function to generate a new cookie using our group id.
##
## cookie: The openflow match cookie.
##
## Returns: The cookie group id.
global generate_cookie: function(cookie: count &default=0): count;
## Function to register a controller instance. This function
## is called automatically by the plugin _new functions.
##
## tpe: type of this plugin
##
## name: unique name of this controller instance.
##
## controller: The controller to register
global register_controller: function(tpe: OpenFlow::Plugin, name: string, controller: Controller);
## Function to unregister a controller instance. This function
## should be called when a specific controller should no longer
## be used.
##
## controller: The controller to unregister
global unregister_controller: function(controller: Controller);
## Function to signal that a controller finished activation and is
## ready to use. Will throw the ``OpenFlow::controller_activated``
## event.
global controller_init_done: function(controller: Controller);
## Event that is raised once a controller finishes initialization
## and is completely activated.
## name: unique name of this controller instance.
##
## controller: The controller that finished activation.
global OpenFlow::controller_activated: event(name: string, controller: Controller);
## Function to lookup a controller instance by name
##
## name: unique name of the controller to look up
##
## Returns: one element vector with controller, if found. Empty vector otherwhise.
global lookup_controller: function(name: string): vector of Controller;
}
global name_to_controller: table[string] of Controller;
function match_conn(id: conn_id, reverse: bool &default=F): ofp_match
{
local dl_type = ETH_IPv4;
local proto = IP_TCP;
local orig_h: addr;
local orig_p: port;
local resp_h: addr;
local resp_p: port;
if ( reverse == F )
{
orig_h = id$orig_h;
orig_p = id$orig_p;
resp_h = id$resp_h;
resp_p = id$resp_p;
}
else
{
orig_h = id$resp_h;
orig_p = id$resp_p;
resp_h = id$orig_h;
resp_p = id$orig_p;
}
if ( is_v6_addr(orig_h) )
dl_type = ETH_IPv6;
if ( is_udp_port(orig_p) )
proto = IP_UDP;
else if ( is_icmp_port(orig_p) )
proto = IP_ICMP;
return ofp_match(
$dl_type=dl_type,
$nw_proto=proto,
$nw_src=addr_to_subnet(orig_h),
$tp_src=port_to_count(orig_p),
$nw_dst=addr_to_subnet(resp_h),
$tp_dst=port_to_count(resp_p)
);
}
# local function to forge a flow_mod cookie for this framework.
# all flow entries from the openflow framework should have the
# 42 bit of the cookie set.
function generate_cookie(cookie: count &default=0): count
{
local c = BRO_COOKIE_ID * COOKIE_BID_START;
if ( cookie >= COOKIE_UID_SIZE )
Reporter::warning(fmt("The given cookie uid '%d' is > 32bit and will be discarded", cookie));
else
c += cookie;
return c;
}
# local function to check if a given flow_mod cookie is forged from this framework.
function is_valid_cookie(cookie: count): bool
{
if ( cookie / COOKIE_BID_START == BRO_COOKIE_ID )
return T;
Reporter::warning(fmt("The given Openflow cookie '%d' is not valid", cookie));
return F;
}
function get_cookie_uid(cookie: count): count
{
if( is_valid_cookie(cookie) )
return (cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START));
return INVALID_COOKIE;
}
function get_cookie_gid(cookie: count): count
{
if( is_valid_cookie(cookie) )
return (
(cookie - (COOKIE_BID_START * BRO_COOKIE_ID) -
(cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START))) /
COOKIE_GID_START
);
return INVALID_COOKIE;
}
function controller_init_done(controller: Controller)
{
if ( controller$state$_name !in name_to_controller )
{
Reporter::error(fmt("Openflow initialized unknown plugin %s successfully?", controller$state$_name));
return;
}
controller$state$_activated = T;
event OpenFlow::controller_activated(controller$state$_name, controller);
}
# Functions that are called from cluster.bro and non-cluster.bro
function register_controller_impl(tpe: OpenFlow::Plugin, name: string, controller: Controller)
{
if ( controller$state$_name in name_to_controller )
{
Reporter::error(fmt("OpenFlow Controller %s was already registered. Ignored duplicate registration", controller$state$_name));
return;
}
name_to_controller[controller$state$_name] = controller;
if ( controller?$init )
controller$init(controller$state);
else
controller_init_done(controller);
}
function unregister_controller_impl(controller: Controller)
{
if ( controller$state$_name in name_to_controller )
delete name_to_controller[controller$state$_name];
else
Reporter::error("OpenFlow Controller %s was not registered in unregister.");
if ( controller?$destroy )
controller$destroy(controller$state);
}
function lookup_controller_impl(name: string): vector of Controller
{
if ( name in name_to_controller )
return vector(name_to_controller[name]);
else
return vector();
}

View file

@ -0,0 +1,44 @@
@load ./main
module OpenFlow;
# the flow_mod function wrapper
function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool
{
if ( ! controller$state$_activated )
return F;
if ( controller?$flow_mod )
return controller$flow_mod(controller$state, match, flow_mod);
else
return F;
}
function flow_clear(controller: Controller): bool
{
if ( ! controller$state$_activated )
return F;
if ( controller?$flow_clear )
return controller$flow_clear(controller$state);
else
return F;
}
function register_controller(tpe: OpenFlow::Plugin, name: string, controller: Controller)
{
controller$state$_name = cat(tpe, name);
controller$state$_plugin = tpe;
register_controller_impl(tpe, name, controller);
}
function unregister_controller(controller: Controller)
{
unregister_controller_impl(controller);
}
function lookup_controller(name: string): vector of Controller
{
return lookup_controller_impl(name);
}

View file

@ -0,0 +1,3 @@
@load ./ryu
@load ./log
@load ./broker

View file

@ -0,0 +1,98 @@
##! OpenFlow plugin for interfacing to controllers via Broker.
@load base/frameworks/openflow
@load base/frameworks/broker
module OpenFlow;
@ifdef ( Broker::__enable )
export {
redef enum Plugin += {
BROKER,
};
## Broker controller constructor.
##
## host: Controller ip.
##
## host_port: Controller listen port.
##
## topic: broker topic to send messages to.
##
## dpid: OpenFlow switch datapath id.
##
## Returns: OpenFlow::Controller record
global broker_new: function(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller;
redef record ControllerState += {
## Controller ip.
broker_host: addr &optional;
## Controller listen port.
broker_port: port &optional;
## OpenFlow switch datapath id.
broker_dpid: count &optional;
## Topic to sent events for this controller to
broker_topic: string &optional;
};
global broker_flow_mod: event(name: string, dpid: count, match: ofp_match, flow_mod: ofp_flow_mod);
global broker_flow_clear: event(name: string, dpid: count);
}
global broker_peers: table[port, string] of Controller;
function broker_describe(state: ControllerState): string
{
return fmt("Broker-%s:%d-%d", state$broker_host, state$broker_port, state$broker_dpid);
}
function broker_flow_mod_fun(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
{
Broker::send_event(state$broker_topic, Broker::event_args(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod));
return T;
}
function broker_flow_clear_fun(state: OpenFlow::ControllerState): bool
{
Broker::send_event(state$broker_topic, Broker::event_args(broker_flow_clear, state$_name, state$broker_dpid));
return T;
}
function broker_init(state: OpenFlow::ControllerState)
{
Broker::enable();
Broker::connect(cat(state$broker_host), state$broker_port, 1sec);
Broker::subscribe_to_events(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker.
}
event Broker::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
{
if ( [peer_port, peer_address] !in broker_peers )
# ok, this one was none of ours...
return;
local p = broker_peers[peer_port, peer_address];
controller_init_done(p);
delete broker_peers[peer_port, peer_address];
}
# broker controller constructor
function broker_new(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller
{
local c = OpenFlow::Controller($state=OpenFlow::ControllerState($broker_host=host, $broker_port=host_port, $broker_dpid=dpid, $broker_topic=topic),
$flow_mod=broker_flow_mod_fun, $flow_clear=broker_flow_clear_fun, $describe=broker_describe, $supports_flow_removed=T, $init=broker_init);
register_controller(OpenFlow::BROKER, name, c);
if ( [host_port, cat(host)] in broker_peers )
Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, host_port));
else
broker_peers[host_port, cat(host)] = c;
return c;
}
@endif

View file

@ -0,0 +1,76 @@
##! OpenFlow plugin that outputs flow-modification commands
##! to a Bro log file.
@load base/frameworks/openflow
@load base/frameworks/logging
module OpenFlow;
export {
redef enum Plugin += {
OFLOG,
};
redef enum Log::ID += { LOG };
## Log controller constructor.
##
## dpid: OpenFlow switch datapath id.
##
## success_event: If true, flow_mod_success is raised for each logged line.
##
## Returns: OpenFlow::Controller record
global log_new: function(dpid: count, success_event: bool &default=T): OpenFlow::Controller;
redef record ControllerState += {
## OpenFlow switch datapath id.
log_dpid: count &optional;
## Raise or do not raise success event
log_success_event: bool &optional;
};
## The record type which contains column fields of the OpenFlow log.
type Info: record {
## Network time
ts: time &log;
## OpenFlow switch datapath id
dpid: count &log;
## OpenFlow match fields
match: ofp_match &log;
## OpenFlow modify flow entry message
flow_mod: ofp_flow_mod &log;
};
## Event that can be handled to access the :bro:type:`OpenFlow::Info`
## record as it is sent on to the logging framework.
global log_openflow: event(rec: Info);
}
event bro_init() &priority=5
{
Log::create_stream(OpenFlow::LOG, [$columns=Info, $ev=log_openflow, $path="openflow"]);
}
function log_flow_mod(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
{
Log::write(OpenFlow::LOG, [$ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod]);
if ( state$log_success_event )
event OpenFlow::flow_mod_success(state$_name, match, flow_mod);
return T;
}
function log_describe(state: ControllerState): string
{
return fmt("Log-%d", state$log_dpid);
}
function log_new(dpid: count, success_event: bool &default=T): OpenFlow::Controller
{
local c = OpenFlow::Controller($state=OpenFlow::ControllerState($log_dpid=dpid, $log_success_event=success_event),
$flow_mod=log_flow_mod, $describe=log_describe, $supports_flow_removed=F);
register_controller(OpenFlow::OFLOG, cat(dpid), c);
return c;
}

View file

@ -0,0 +1,190 @@
##! OpenFlow plugin for the Ryu controller.
@load base/frameworks/openflow
@load base/utils/active-http
@load base/utils/exec
@load base/utils/json
module OpenFlow;
export {
redef enum Plugin += {
RYU,
};
## Ryu controller constructor.
##
## host: Controller ip.
##
## host_port: Controller listen port.
##
## dpid: OpenFlow switch datapath id.
##
## Returns: OpenFlow::Controller record
global ryu_new: function(host: addr, host_port: count, dpid: count): OpenFlow::Controller;
redef record ControllerState += {
## Controller ip.
ryu_host: addr &optional;
## Controller listen port.
ryu_port: count &optional;
## OpenFlow switch datapath id.
ryu_dpid: count &optional;
## Enable debug mode - output JSON to stdout; do not perform actions
ryu_debug: bool &default=F;
};
}
# Ryu ReST API flow_mod URL-path
const RYU_FLOWENTRY_PATH = "/stats/flowentry/";
# Ryu ReST API flow_stats URL-path
#const RYU_FLOWSTATS_PATH = "/stats/flow/";
# Ryu ReST API action_output type.
type ryu_flow_action: record {
# Ryu uses strings as its ReST API output action.
_type: string;
# The output port for type OUTPUT
_port: count &optional;
};
# The ReST API documentation can be found at
# https://media.readthedocs.org/pdf/ryu/latest/ryu.pdf
# Ryu ReST API flow_mod type.
type ryu_ofp_flow_mod: record {
dpid: count;
cookie: count &optional;
cookie_mask: count &optional;
table_id: count &optional;
idle_timeout: count &optional;
hard_timeout: count &optional;
priority: count &optional;
flags: count &optional;
match: OpenFlow::ofp_match;
actions: vector of ryu_flow_action;
out_port: count &optional;
out_group: count &optional;
};
# Mapping between ofp flow mod commands and ryu urls
const ryu_url: table[ofp_flow_mod_command] of string = {
[OFPFC_ADD] = "add",
[OFPFC_MODIFY] = "modify",
[OFPFC_MODIFY_STRICT] = "modify_strict",
[OFPFC_DELETE] = "delete",
[OFPFC_DELETE_STRICT] = "delete_strict",
};
# Ryu flow_mod function
function ryu_flow_mod(state: OpenFlow::ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
{
if ( state$_plugin != RYU )
{
Reporter::error("Ryu openflow plugin was called with state of non-ryu plugin");
return F;
}
# Generate ryu_flow_actions because their type differs (using strings as type).
local flow_actions: vector of ryu_flow_action = vector();
for ( i in flow_mod$actions$out_ports )
flow_actions[|flow_actions|] = ryu_flow_action($_type="OUTPUT", $_port=flow_mod$actions$out_ports[i]);
# Generate our ryu_flow_mod record for the ReST API call.
local mod: ryu_ofp_flow_mod = ryu_ofp_flow_mod(
$dpid=state$ryu_dpid,
$cookie=flow_mod$cookie,
$idle_timeout=flow_mod$idle_timeout,
$hard_timeout=flow_mod$hard_timeout,
$priority=flow_mod$priority,
$flags=flow_mod$flags,
$match=match,
$actions=flow_actions
);
if ( flow_mod?$out_port )
mod$out_port = flow_mod$out_port;
if ( flow_mod?$out_group )
mod$out_group = flow_mod$out_group;
# Type of the command
local command_type: string;
if ( flow_mod$command in ryu_url )
command_type = ryu_url[flow_mod$command];
else
{
Reporter::warning(fmt("The given OpenFlow command type '%s' is not available", cat(flow_mod$command)));
return F;
}
local url=cat("http://", cat(state$ryu_host), ":", cat(state$ryu_port), RYU_FLOWENTRY_PATH, command_type);
if ( state$ryu_debug )
{
print url;
print to_json(mod);
event OpenFlow::flow_mod_success(state$_name, match, flow_mod);
return T;
}
# Create the ActiveHTTP request and convert the record to a Ryu ReST API JSON string
local request: ActiveHTTP::Request = ActiveHTTP::Request(
$url=url,
$method="POST",
$client_data=to_json(mod)
);
# Execute call to Ryu's ReST API
when ( local result = ActiveHTTP::request(request) )
{
if(result$code == 200)
event OpenFlow::flow_mod_success(state$_name, match, flow_mod, result$body);
else
{
Reporter::warning(fmt("Flow modification failed with error: %s", result$body));
event OpenFlow::flow_mod_failure(state$_name, match, flow_mod, result$body);
return F;
}
}
return T;
}
function ryu_flow_clear(state: OpenFlow::ControllerState): bool
{
local url=cat("http://", cat(state$ryu_host), ":", cat(state$ryu_port), RYU_FLOWENTRY_PATH, "clear", "/", state$ryu_dpid);
if ( state$ryu_debug )
{
print url;
return T;
}
local request: ActiveHTTP::Request = ActiveHTTP::Request(
$url=url,
$method="DELETE"
);
when ( local result = ActiveHTTP::request(request) )
{
}
return T;
}
function ryu_describe(state: ControllerState): string
{
return fmt("Ryu-%d-http://%s:%d", state$ryu_dpid, state$ryu_host, state$ryu_port);
}
# Ryu controller constructor
function ryu_new(host: addr, host_port: count, dpid: count): OpenFlow::Controller
{
local c = OpenFlow::Controller($state=OpenFlow::ControllerState($ryu_host=host, $ryu_port=host_port, $ryu_dpid=dpid),
$flow_mod=ryu_flow_mod, $flow_clear=ryu_flow_clear, $describe=ryu_describe, $supports_flow_removed=F);
register_controller(OpenFlow::RYU, cat(host,host_port,dpid), c);
return c;
}

View file

@ -0,0 +1,132 @@
##! Types used by the OpenFlow framework.
module OpenFlow;
@load ./consts
export {
## Available openflow plugins
type Plugin: enum {
## Internal placeholder plugin
INVALID,
};
## Controller related state.
## Can be redefined by plugins to
## add state.
type ControllerState: record {
## Internally set to the type of plugin used.
_plugin: Plugin &optional;
## Internally set to the unique name of the controller.
_name: string &optional;
## Internally set to true once the controller is activated
_activated: bool &default=F;
} &redef;
## Openflow match definition.
##
## The openflow match record describes
## which packets match to a specific
## rule in a flow table.
type ofp_match: record {
# Input switch port.
in_port: count &optional;
# Ethernet source address.
dl_src: string &optional;
# Ethernet destination address.
dl_dst: string &optional;
# Input VLAN id.
dl_vlan: count &optional;
# Input VLAN priority.
dl_vlan_pcp: count &optional;
# Ethernet frame type.
dl_type: count &optional;
# IP ToS (actually DSCP field, 6bits).
nw_tos: count &optional;
# IP protocol or lower 8 bits of ARP opcode.
nw_proto: count &optional;
# At the moment, we store both v4 and v6 in the same fields.
# This is not how OpenFlow does it, we might want to change that...
# IP source address.
nw_src: subnet &optional;
# IP destination address.
nw_dst: subnet &optional;
# TCP/UDP source port.
tp_src: count &optional;
# TCP/UDP destination port.
tp_dst: count &optional;
} &log;
## The actions that can be taken in a flow.
## (Sepearate record to make ofp_flow_mod less crowded)
type ofp_flow_action: record {
## Output ports to send data to.
out_ports: vector of count &default=vector();
## set vlan vid to this value
vlan_vid: count &optional;
## set vlan priority to this value
vlan_pcp: count &optional;
## strip vlan tag
vlan_strip: bool &default=F;
## set ethernet source address
dl_src: string &optional;
## set ethernet destination address
dl_dst: string &optional;
## set ip tos to this value
nw_tos: count &optional;
## set source to this ip
nw_src: addr &optional;
## set destination to this ip
nw_dst: addr &optional;
## set tcp/udp source port
tp_src: count &optional;
## set tcp/udp destination port
tp_dst: count &optional;
} &log;
## Openflow flow_mod definition, describing the action to perform.
type ofp_flow_mod: record {
## Opaque controller-issued identifier.
# This is optional in the specification - but let's force
# it so we always can identify our flows...
cookie: count; # &default=BRO_COOKIE_ID * COOKIE_BID_START;
# Flow actions
## Table to put the flow in. OFPTT_ALL can be used for delete,
## to delete flows from all matching tables.
table_id: count &optional;
## One of OFPFC_*.
command: ofp_flow_mod_command; # &default=OFPFC_ADD;
## Idle time before discarding (seconds).
idle_timeout: count &default=0;
## Max time before discarding (seconds).
hard_timeout: count &default=0;
## Priority level of flow entry.
priority: count &default=0;
## For OFPFC_DELETE* commands, require matching entried to include
## this as an output port/group. OFPP_ANY/OFPG_ANY means no restrictions.
out_port: count &optional;
out_group: count &optional;
## Bitmap of the OFPFF_* flags
flags: count &default=0;
## Actions to take on match
actions: ofp_flow_action &default=ofp_flow_action();
} &log;
## Controller record representing an openflow controller
type Controller: record {
## Controller related state.
state: ControllerState;
## Does the controller support the flow_removed event?
supports_flow_removed: bool;
## function that describes the controller. Has to be implemented.
describe: function(state: ControllerState): string;
## one-time initialization function. If defined, controller_init_done has to be called once initialization finishes.
init: function (state: ControllerState) &optional;
## one-time destruction function
destroy: function (state: ControllerState) &optional;
## flow_mod function
flow_mod: function(state: ControllerState, match: ofp_match, flow_mod: ofp_flow_mod): bool &optional;
## flow_clear function
flow_clear: function(state: ControllerState): bool &optional;
};
}

View file

@ -18,7 +18,7 @@ export {
event net_stats_update(last_stat: NetStats) event net_stats_update(last_stat: NetStats)
{ {
local ns = net_stats(); local ns = get_net_stats();
local new_dropped = ns$pkts_dropped - last_stat$pkts_dropped; local new_dropped = ns$pkts_dropped - last_stat$pkts_dropped;
if ( new_dropped > 0 ) if ( new_dropped > 0 )
{ {
@ -38,5 +38,5 @@ event bro_init()
# Since this currently only calculates packet drops, let's skip the stats # Since this currently only calculates packet drops, let's skip the stats
# collection if reading traces. # collection if reading traces.
if ( ! reading_traces() ) if ( ! reading_traces() )
schedule stats_collection_interval { net_stats_update(net_stats()) }; schedule stats_collection_interval { net_stats_update(get_net_stats()) };
} }

View file

@ -5,7 +5,8 @@
module SumStats; module SumStats;
export { export {
## The various calculations are all defined as plugins. ## Type to represent the calculations that are available. The calculations
## are all defined as plugins.
type Calculation: enum { type Calculation: enum {
PLACEHOLDER PLACEHOLDER
}; };
@ -39,6 +40,7 @@ export {
str: string &optional; str: string &optional;
}; };
## Represents a reducer.
type Reducer: record { type Reducer: record {
## Observation stream identifier for the reducer ## Observation stream identifier for the reducer
## to attach to. ## to attach to.
@ -56,7 +58,7 @@ export {
normalize_key: function(key: SumStats::Key): Key &optional; normalize_key: function(key: SumStats::Key): Key &optional;
}; };
## Value calculated for an observation stream fed into a reducer. ## Result calculated for an observation stream fed into a reducer.
## Most of the fields are added by plugins. ## Most of the fields are added by plugins.
type ResultVal: record { type ResultVal: record {
## The time when the first observation was added to ## The time when the first observation was added to
@ -71,14 +73,15 @@ export {
num: count &default=0; num: count &default=0;
}; };
## Type to store results for multiple reducers. ## Type to store a table of results for multiple reducers indexed by
## observation stream identifier.
type Result: table[string] of ResultVal; type Result: table[string] of ResultVal;
## Type to store a table of sumstats results indexed by keys. ## Type to store a table of sumstats results indexed by keys.
type ResultTable: table[Key] of Result; type ResultTable: table[Key] of Result;
## SumStats represent an aggregation of reducers along with ## Represents a SumStat, which consists of an aggregation of reducers along
## mechanisms to handle various situations like the epoch ending ## with mechanisms to handle various situations like the epoch ending
## or thresholds being crossed. ## or thresholds being crossed.
## ##
## It's best to not access any global state outside ## It's best to not access any global state outside
@ -101,21 +104,28 @@ export {
## The reducers for the SumStat. ## The reducers for the SumStat.
reducers: set[Reducer]; reducers: set[Reducer];
## Provide a function to calculate a value from the ## A function that will be called once for each observation in order
## :bro:see:`SumStats::Result` structure which will be used ## to calculate a value from the :bro:see:`SumStats::Result` structure
## for thresholding. ## which will be used for thresholding.
## This is required if a *threshold* value is given. ## This function is required if a *threshold* value or
## a *threshold_series* is given.
threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional; threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional;
## The threshold value for calling the ## The threshold value for calling the *threshold_crossed* callback.
## *threshold_crossed* callback. ## If you need more than one threshold value, then use
## *threshold_series* instead.
threshold: double &optional; threshold: double &optional;
## A series of thresholds for calling the ## A series of thresholds for calling the *threshold_crossed*
## *threshold_crossed* callback. ## callback. These thresholds must be listed in ascending order,
## because a threshold is not checked until the preceding one has
## been crossed.
threshold_series: vector of double &optional; threshold_series: vector of double &optional;
## A callback that is called when a threshold is crossed. ## A callback that is called when a threshold is crossed.
## A threshold is crossed when the value returned from *threshold_val*
## is greater than or equal to the threshold value, but only the first
## time this happens within an epoch.
threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional; threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional;
## A callback that receives each of the results at the ## A callback that receives each of the results at the
@ -130,6 +140,8 @@ export {
}; };
## Create a summary statistic. ## Create a summary statistic.
##
## ss: The SumStat to create.
global create: function(ss: SumStats::SumStat); global create: function(ss: SumStats::SumStat);
## Add data into an observation stream. This should be ## Add data into an observation stream. This should be

View file

@ -1,3 +1,5 @@
##! Calculate the average.
@load ../main @load ../main
module SumStats; module SumStats;
@ -9,7 +11,7 @@ export {
}; };
redef record ResultVal += { redef record ResultVal += {
## For numeric data, this calculates the average of all values. ## For numeric data, this is the average of all values.
average: double &optional; average: double &optional;
}; };
} }

View file

@ -1,3 +1,5 @@
##! Calculate the number of unique values (using the HyperLogLog algorithm).
@load base/frameworks/sumstats @load base/frameworks/sumstats
module SumStats; module SumStats;

View file

@ -1,3 +1,5 @@
##! Keep the last X observations.
@load base/frameworks/sumstats @load base/frameworks/sumstats
@load base/utils/queue @load base/utils/queue

View file

@ -1,3 +1,5 @@
##! Find the maximum value.
@load ../main @load ../main
module SumStats; module SumStats;
@ -9,7 +11,7 @@ export {
}; };
redef record ResultVal += { redef record ResultVal += {
## For numeric data, this tracks the maximum value given. ## For numeric data, this tracks the maximum value.
max: double &optional; max: double &optional;
}; };
} }

View file

@ -1,3 +1,5 @@
##! Find the minimum value.
@load ../main @load ../main
module SumStats; module SumStats;
@ -9,7 +11,7 @@ export {
}; };
redef record ResultVal += { redef record ResultVal += {
## For numeric data, this tracks the minimum value given. ## For numeric data, this tracks the minimum value.
min: double &optional; min: double &optional;
}; };
} }

View file

@ -1,3 +1,5 @@
##! Keep a random sample of values.
@load base/frameworks/sumstats/main @load base/frameworks/sumstats/main
module SumStats; module SumStats;
@ -10,7 +12,7 @@ export {
}; };
redef record Reducer += { redef record Reducer += {
## A number of sample Observations to collect. ## The number of sample Observations to collect.
num_samples: count &default=0; num_samples: count &default=0;
}; };

View file

@ -1,3 +1,5 @@
##! Calculate the standard deviation.
@load ./variance @load ./variance
@load ../main @load ../main
@ -5,7 +7,7 @@ module SumStats;
export { export {
redef enum Calculation += { redef enum Calculation += {
## Find the standard deviation of the values. ## Calculate the standard deviation of the values.
STD_DEV STD_DEV
}; };

View file

@ -1,11 +1,13 @@
##! Calculate the sum.
@load ../main @load ../main
module SumStats; module SumStats;
export { export {
redef enum Calculation += { redef enum Calculation += {
## Sums the values given. For string values, ## Calculate the sum of the values. For string values,
## this will be the number of strings given. ## this will be the number of strings.
SUM SUM
}; };

View file

@ -1,3 +1,5 @@
##! Keep the top-k (i.e., most frequently occurring) observations.
@load base/frameworks/sumstats @load base/frameworks/sumstats
module SumStats; module SumStats;
@ -9,10 +11,13 @@ export {
}; };
redef enum Calculation += { redef enum Calculation += {
## Keep a top-k list of values.
TOPK TOPK
}; };
redef record ResultVal += { redef record ResultVal += {
## A handle which can be passed to some built-in functions to get
## the top-k results.
topk: opaque of topk &optional; topk: opaque of topk &optional;
}; };

View file

@ -1,10 +1,12 @@
##! Calculate the number of unique values.
@load ../main @load ../main
module SumStats; module SumStats;
export { export {
redef record Reducer += { redef record Reducer += {
## Maximum number of unique elements to store. ## Maximum number of unique values to store.
unique_max: count &optional; unique_max: count &optional;
}; };
@ -15,7 +17,7 @@ export {
redef record ResultVal += { redef record ResultVal += {
## If cardinality is being tracked, the number of unique ## If cardinality is being tracked, the number of unique
## items is tracked here. ## values is tracked here.
unique: count &default=0; unique: count &default=0;
}; };
} }

View file

@ -1,3 +1,5 @@
##! Calculate the variance.
@load ./average @load ./average
@load ../main @load ../main
@ -5,12 +7,12 @@ module SumStats;
export { export {
redef enum Calculation += { redef enum Calculation += {
## Find the variance of the values. ## Calculate the variance of the values.
VARIANCE VARIANCE
}; };
redef record ResultVal += { redef record ResultVal += {
## For numeric data, this calculates the variance. ## For numeric data, this is the variance.
variance: double &optional; variance: double &optional;
}; };
} }

View file

@ -39,6 +39,13 @@ type count_set: set[count];
## directly and then remove this alias. ## directly and then remove this alias.
type index_vec: vector of count; type index_vec: vector of count;
## A vector of subnets.
##
## .. todo:: We need this type definition only for declaring builtin functions
## via ``bifcl``. We should extend ``bifcl`` to understand composite types
## directly and then remove this alias.
type subnet_vec: vector of subnet;
## A vector of any, used by some builtin functions to store a list of varying ## A vector of any, used by some builtin functions to store a list of varying
## types. ## types.
## ##
@ -120,6 +127,18 @@ type conn_id: record {
resp_p: port; ##< The responder's port number. resp_p: port; ##< The responder's port number.
} &log; } &log;
## The identifying 4-tuple of a uni-directional flow.
##
## .. note:: It's actually a 5-tuple: the transport-layer protocol is stored as
## part of the port values, `src_p` and `dst_p`, and can be extracted from
## them with :bro:id:`get_port_transport_proto`.
type flow_id : record {
src_h: addr; ##< The source IP address.
src_p: port; ##< The source port number.
dst_h: addr; ##< The destination IP address.
dst_p: port; ##< The desintation port number.
} &log;
## Specifics about an ICMP conversation. ICMP events typically pass this in ## Specifics about an ICMP conversation. ICMP events typically pass this in
## addition to :bro:type:`conn_id`. ## addition to :bro:type:`conn_id`.
## ##
@ -310,6 +329,8 @@ type endpoint: record {
## The current IPv6 flow label that the connection endpoint is using. ## The current IPv6 flow label that the connection endpoint is using.
## Always 0 if the connection is over IPv4. ## Always 0 if the connection is over IPv4.
flow_label: count; flow_label: count;
## The link-layer address seen in the first packet (if available).
l2_addr: string &optional;
}; };
## A connection. This is Bro's basic connection type describing IP- and ## A connection. This is Bro's basic connection type describing IP- and
@ -346,10 +367,10 @@ type connection: record {
## handled and reassigns this field to the new encapsulation. ## handled and reassigns this field to the new encapsulation.
tunnel: EncapsulatingConnVector &optional; tunnel: EncapsulatingConnVector &optional;
## The outer VLAN, if applicable, for this connection. ## The outer VLAN, if applicable for this connection.
vlan: int &optional; vlan: int &optional;
## The inner VLAN, if applicable, for this connection. ## The inner VLAN, if applicable for this connection.
inner_vlan: int &optional; inner_vlan: int &optional;
}; };
@ -455,14 +476,38 @@ type NetStats: record {
bytes_recvd: count &default=0; ##< Bytes received by Bro. bytes_recvd: count &default=0; ##< Bytes received by Bro.
}; };
## Statistics about Bro's resource consumption. type ConnStats: record {
total_conns: count; ##<
current_conns: count; ##<
current_conns_extern: count; ##<
sess_current_conns: count; ##<
num_packets: count;
num_fragments: count;
max_fragments: count;
num_tcp_conns: count; ##< Current number of TCP connections in memory.
max_tcp_conns: count; ##< Maximum number of concurrent TCP connections so far.
cumulative_tcp_conns: count; ##< Total number of TCP connections so far.
num_udp_conns: count; ##< Current number of UDP flows in memory.
max_udp_conns: count; ##< Maximum number of concurrent UDP flows so far.
cumulative_udp_conns: count; ##< Total number of UDP flows so far.
num_icmp_conns: count; ##< Current number of ICMP flows in memory.
max_icmp_conns: count; ##< Maximum number of concurrent ICMP flows so far.
cumulative_icmp_conns: count; ##< Total number of ICMP flows so far.
killed_by_inactivity: count;
};
## Statistics about Bro's process.
## ##
## .. bro:see:: resource_usage ## .. bro:see:: get_proc_stats
## ##
## .. note:: All process-level values refer to Bro's main process only, not to ## .. note:: All process-level values refer to Bro's main process only, not to
## the child process it spawns for doing communication. ## the child process it spawns for doing communication.
type bro_resources: record { type ProcStats: record {
version: string; ##< Bro version string.
debug: bool; ##< True if compiled with --enable-debug. debug: bool; ##< True if compiled with --enable-debug.
start_time: time; ##< Start time of process. start_time: time; ##< Start time of process.
real_time: interval; ##< Elapsed real time since Bro started running. real_time: interval; ##< Elapsed real time since Bro started running.
@ -475,46 +520,85 @@ type bro_resources: record {
blocking_input: count; ##< Blocking input operations. blocking_input: count; ##< Blocking input operations.
blocking_output: count; ##< Blocking output operations. blocking_output: count; ##< Blocking output operations.
num_context: count; ##< Number of involuntary context switches. num_context: count; ##< Number of involuntary context switches.
};
num_TCP_conns: count; ##< Current number of TCP connections in memory. type EventStats: record {
num_UDP_conns: count; ##< Current number of UDP flows in memory. queued: count; ##< Total number of events queued so far.
num_ICMP_conns: count; ##< Current number of ICMP flows in memory. dispatched: count; ##< Total number of events dispatched so far.
num_fragments: count; ##< Current number of fragments pending reassembly.
num_packets: count; ##< Total number of packets processed to date.
num_timers: count; ##< Current number of pending timers.
num_events_queued: count; ##< Total number of events queued so far.
num_events_dispatched: count; ##< Total number of events dispatched so far.
max_TCP_conns: count; ##< Maximum number of concurrent TCP connections so far.
max_UDP_conns: count; ##< Maximum number of concurrent UDP connections so far.
max_ICMP_conns: count; ##< Maximum number of concurrent ICMP connections so far.
max_fragments: count; ##< Maximum number of concurrently buffered fragments so far.
max_timers: count; ##< Maximum number of concurrent timers pending so far.
}; };
## Summary statistics of all regular expression matchers. ## Summary statistics of all regular expression matchers.
## ##
## .. bro:see:: get_reassembler_stats
type ReassemblerStats: record {
file_size: count; ##< Byte size of File reassembly tracking.
frag_size: count; ##< Byte size of Fragment reassembly tracking.
tcp_size: count; ##< Byte size of TCP reassembly tracking.
unknown_size: count; ##< Byte size of reassembly tracking for unknown purposes.
};
## Statistics of all regular expression matchers.
##
## .. bro:see:: get_matcher_stats ## .. bro:see:: get_matcher_stats
type matcher_stats: record { type MatcherStats: record {
matchers: count; ##< Number of distinct RE matchers. matchers: count; ##< Number of distinct RE matchers.
nfa_states: count; ##< Number of NFA states across all matchers.
dfa_states: count; ##< Number of DFA states across all matchers. dfa_states: count; ##< Number of DFA states across all matchers.
computed: count; ##< Number of computed DFA state transitions. computed: count; ##< Number of computed DFA state transitions.
mem: count; ##< Number of bytes used by DFA states. mem: count; ##< Number of bytes used by DFA states.
hits: count; ##< Number of cache hits. hits: count; ##< Number of cache hits.
misses: count; ##< Number of cache misses. misses: count; ##< Number of cache misses.
avg_nfa_states: count; ##< Average number of NFA states across all matchers. };
## Statistics of timers.
##
## .. bro:see:: get_timer_stats
type TimerStats: record {
current: count; ##< Current number of pending timers.
max: count; ##< Maximum number of concurrent timers pending so far.
cumulative: count; ##< Cumulative number of timers scheduled.
};
## Statistics of file analysis.
##
## .. bro:see:: get_file_analysis_stats
type FileAnalysisStats: record {
current: count; ##< Current number of files being analyzed.
max: count; ##< Maximum number of concurrent files so far.
cumulative: count; ##< Cumulative number of files analyzed.
};
## Statistics related to Bro's active use of DNS. These numbers are
## about Bro performing DNS queries on it's own, not traffic
## being seen.
##
## .. bro:see:: get_dns_stats
type DNSStats: record {
requests: count; ##< Number of DNS requests made
successful: count; ##< Number of successful DNS replies.
failed: count; ##< Number of DNS reply failures.
pending: count; ##< Current pending queries.
cached_hosts: count; ##< Number of cached hosts.
cached_addresses: count; ##< Number of cached addresses.
}; };
## Statistics about number of gaps in TCP connections. ## Statistics about number of gaps in TCP connections.
## ##
## .. bro:see:: gap_report get_gap_summary ## .. bro:see:: get_gap_stats
type gap_info: record { type GapStats: record {
ack_events: count; ##< How many ack events *could* have had gaps. ack_events: count; ##< How many ack events *could* have had gaps.
ack_bytes: count; ##< How many bytes those covered. ack_bytes: count; ##< How many bytes those covered.
gap_events: count; ##< How many *did* have gaps. gap_events: count; ##< How many *did* have gaps.
gap_bytes: count; ##< How many bytes were missing in the gaps. gap_bytes: count; ##< How many bytes were missing in the gaps.
}; };
## Statistics about threads.
##
## .. bro:see:: get_thread_stats
type ThreadStats: record {
num_threads: count;
};
## Deprecated. ## Deprecated.
## ##
## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere ## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere
@ -774,71 +858,6 @@ type entropy_test_result: record {
serial_correlation: double; ##< Serial correlation coefficient. serial_correlation: double; ##< Serial correlation coefficient.
}; };
# Prototypes of Bro built-in functions.
@load base/bif/strings.bif
@load base/bif/bro.bif
@load base/bif/reporter.bif
## Deprecated. This is superseded by the new logging framework.
global log_file_name: function(tag: string): string &redef;
## Deprecated. This is superseded by the new logging framework.
global open_log_file: function(tag: string): file &redef;
## Specifies a directory for Bro to store its persistent state. All globals can
## be declared persistent via the :bro:attr:`&persistent` attribute.
const state_dir = ".state" &redef;
## Length of the delays inserted when storing state incrementally. To avoid
## dropping packets when serializing larger volumes of persistent state to
## disk, Bro interleaves the operation with continued packet processing.
const state_write_delay = 0.01 secs &redef;
global done_with_network = F;
event net_done(t: time) { done_with_network = T; }
function log_file_name(tag: string): string
{
local suffix = getenv("BRO_LOG_SUFFIX") == "" ? "log" : getenv("BRO_LOG_SUFFIX");
return fmt("%s.%s", tag, suffix);
}
function open_log_file(tag: string): file
{
return open(log_file_name(tag));
}
## Internal function.
function add_interface(iold: string, inew: string): string
{
if ( iold == "" )
return inew;
else
return fmt("%s %s", iold, inew);
}
## Network interfaces to listen on. Use ``redef interfaces += "eth0"`` to
## extend.
global interfaces = "" &add_func = add_interface;
## Internal function.
function add_signature_file(sold: string, snew: string): string
{
if ( sold == "" )
return snew;
else
return cat(sold, " ", snew);
}
## Signature files to read. Use ``redef signature_files += "foo.sig"`` to
## extend. Signature files added this way will be searched relative to
## ``BROPATH``. Using the ``@load-sigs`` directive instead is preferred
## since that can search paths relative to the current script.
global signature_files = "" &add_func = add_signature_file;
## ``p0f`` fingerprint file to use. Will be searched relative to ``BROPATH``.
const passive_fingerprint_file = "base/misc/p0f.fp" &redef;
# TCP values for :bro:see:`endpoint` *state* field. # TCP values for :bro:see:`endpoint` *state* field.
# todo:: these should go into an enum to make them autodoc'able. # todo:: these should go into an enum to make them autodoc'able.
const TCP_INACTIVE = 0; ##< Endpoint is still inactive. const TCP_INACTIVE = 0; ##< Endpoint is still inactive.
@ -1749,6 +1768,71 @@ type gtp_delete_pdp_ctx_response_elements: record {
ext: gtp_private_extension &optional; ext: gtp_private_extension &optional;
}; };
# Prototypes of Bro built-in functions.
@load base/bif/strings.bif
@load base/bif/bro.bif
@load base/bif/reporter.bif
## Deprecated. This is superseded by the new logging framework.
global log_file_name: function(tag: string): string &redef;
## Deprecated. This is superseded by the new logging framework.
global open_log_file: function(tag: string): file &redef;
## Specifies a directory for Bro to store its persistent state. All globals can
## be declared persistent via the :bro:attr:`&persistent` attribute.
const state_dir = ".state" &redef;
## Length of the delays inserted when storing state incrementally. To avoid
## dropping packets when serializing larger volumes of persistent state to
## disk, Bro interleaves the operation with continued packet processing.
const state_write_delay = 0.01 secs &redef;
global done_with_network = F;
event net_done(t: time) { done_with_network = T; }
function log_file_name(tag: string): string
{
local suffix = getenv("BRO_LOG_SUFFIX") == "" ? "log" : getenv("BRO_LOG_SUFFIX");
return fmt("%s.%s", tag, suffix);
}
function open_log_file(tag: string): file
{
return open(log_file_name(tag));
}
## Internal function.
function add_interface(iold: string, inew: string): string
{
if ( iold == "" )
return inew;
else
return fmt("%s %s", iold, inew);
}
## Network interfaces to listen on. Use ``redef interfaces += "eth0"`` to
## extend.
global interfaces = "" &add_func = add_interface;
## Internal function.
function add_signature_file(sold: string, snew: string): string
{
if ( sold == "" )
return snew;
else
return cat(sold, " ", snew);
}
## Signature files to read. Use ``redef signature_files += "foo.sig"`` to
## extend. Signature files added this way will be searched relative to
## ``BROPATH``. Using the ``@load-sigs`` directive instead is preferred
## since that can search paths relative to the current script.
global signature_files = "" &add_func = add_signature_file;
## ``p0f`` fingerprint file to use. Will be searched relative to ``BROPATH``.
const passive_fingerprint_file = "base/misc/p0f.fp" &redef;
## Definition of "secondary filters". A secondary filter is a BPF filter given ## Definition of "secondary filters". A secondary filter is a BPF filter given
## as index in this table. For each such filter, the corresponding event is ## as index in this table. For each such filter, the corresponding event is
## raised for all matching packets. ## raised for all matching packets.
@ -3416,23 +3500,17 @@ global pkt_profile_file: file &redef;
## .. bro:see:: load_sample ## .. bro:see:: load_sample
global load_sample_freq = 20 &redef; global load_sample_freq = 20 &redef;
## Rate at which to generate :bro:see:`gap_report` events assessing to what
## degree the measurement process appears to exhibit loss.
##
## .. bro:see:: gap_report
const gap_report_freq = 1.0 sec &redef;
## Whether to attempt to automatically detect SYN/FIN/RST-filtered trace ## Whether to attempt to automatically detect SYN/FIN/RST-filtered trace
## and not report missing segments for such connections. ## and not report missing segments for such connections.
## If this is enabled, then missing data at the end of connections may not ## If this is enabled, then missing data at the end of connections may not
## be reported via :bro:see:`content_gap`. ## be reported via :bro:see:`content_gap`.
const detect_filtered_trace = F &redef; const detect_filtered_trace = F &redef;
## Whether we want :bro:see:`content_gap` and :bro:see:`gap_report` for partial ## Whether we want :bro:see:`content_gap` and :bro:see:`get_gap_summary` for partial
## connections. A connection is partial if it is missing a full handshake. Note ## connections. A connection is partial if it is missing a full handshake. Note
## that gap reports for partial connections might not be reliable. ## that gap reports for partial connections might not be reliable.
## ##
## .. bro:see:: content_gap gap_report partial_connection ## .. bro:see:: content_gap get_gap_summary partial_connection
const report_gaps_for_partial = F &redef; const report_gaps_for_partial = F &redef;
## Flag to prevent Bro from exiting automatically when input is exhausted. ## Flag to prevent Bro from exiting automatically when input is exhausted.

View file

@ -13,6 +13,7 @@
@load base/utils/email @load base/utils/email
@load base/utils/exec @load base/utils/exec
@load base/utils/files @load base/utils/files
@load base/utils/geoip-distance
@load base/utils/numbers @load base/utils/numbers
@load base/utils/paths @load base/utils/paths
@load base/utils/patterns @load base/utils/patterns
@ -38,6 +39,8 @@
@load base/frameworks/reporter @load base/frameworks/reporter
@load base/frameworks/sumstats @load base/frameworks/sumstats
@load base/frameworks/tunnels @load base/frameworks/tunnels
@load base/frameworks/openflow
@load base/frameworks/netcontrol
@load base/protocols/conn @load base/protocols/conn
@load base/protocols/dhcp @load base/protocols/dhcp
@ -45,6 +48,7 @@
@load base/protocols/dns @load base/protocols/dns
@load base/protocols/ftp @load base/protocols/ftp
@load base/protocols/http @load base/protocols/http
@load base/protocols/imap
@load base/protocols/irc @load base/protocols/irc
@load base/protocols/krb @load base/protocols/krb
@load base/protocols/modbus @load base/protocols/modbus
@ -52,6 +56,7 @@
@load base/protocols/pop3 @load base/protocols/pop3
@load base/protocols/radius @load base/protocols/radius
@load base/protocols/rdp @load base/protocols/rdp
@load base/protocols/rfb
@load base/protocols/sip @load base/protocols/sip
@load base/protocols/snmp @load base/protocols/snmp
@load base/protocols/smtp @load base/protocols/smtp
@ -60,6 +65,7 @@
@load base/protocols/ssl @load base/protocols/ssl
@load base/protocols/syslog @load base/protocols/syslog
@load base/protocols/tunnels @load base/protocols/tunnels
@load base/protocols/xmpp
@load base/files/pe @load base/files/pe
@load base/files/hash @load base/files/hash

View file

@ -26,7 +26,7 @@ event ChecksumOffloading::check()
if ( done ) if ( done )
return; return;
local pkts_recvd = net_stats()$pkts_recvd; local pkts_recvd = get_net_stats()$pkts_recvd;
local bad_ip_checksum_pct = (pkts_recvd != 0) ? (bad_ip_checksums*1.0 / pkts_recvd*1.0) : 0; local bad_ip_checksum_pct = (pkts_recvd != 0) ? (bad_ip_checksums*1.0 / pkts_recvd*1.0) : 0;
local bad_tcp_checksum_pct = (pkts_recvd != 0) ? (bad_tcp_checksums*1.0 / pkts_recvd*1.0) : 0; local bad_tcp_checksum_pct = (pkts_recvd != 0) ? (bad_tcp_checksums*1.0 / pkts_recvd*1.0) : 0;
local bad_udp_checksum_pct = (pkts_recvd != 0) ? (bad_udp_checksums*1.0 / pkts_recvd*1.0) : 0; local bad_udp_checksum_pct = (pkts_recvd != 0) ? (bad_udp_checksums*1.0 / pkts_recvd*1.0) : 0;

View file

@ -47,7 +47,7 @@ export {
## S2 Connection established and close attempt by originator seen (but no reply from responder). ## S2 Connection established and close attempt by originator seen (but no reply from responder).
## S3 Connection established and close attempt by responder seen (but no reply from originator). ## S3 Connection established and close attempt by responder seen (but no reply from originator).
## RSTO Connection established, originator aborted (sent a RST). ## RSTO Connection established, originator aborted (sent a RST).
## RSTR Established, responder aborted. ## RSTR Responder sent a RST.
## RSTOS0 Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder. ## RSTOS0 Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder.
## RSTRH Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator. ## RSTRH Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator.
## SH Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was "half" open). ## SH Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was "half" open).

View file

@ -26,6 +26,7 @@ export {
[49] = "DHCID", [99] = "SPF", [100] = "DINFO", [101] = "UID", [49] = "DHCID", [99] = "SPF", [100] = "DINFO", [101] = "UID",
[102] = "GID", [103] = "UNSPEC", [249] = "TKEY", [250] = "TSIG", [102] = "GID", [103] = "UNSPEC", [249] = "TKEY", [250] = "TSIG",
[251] = "IXFR", [252] = "AXFR", [253] = "MAILB", [254] = "MAILA", [251] = "IXFR", [252] = "AXFR", [253] = "MAILB", [254] = "MAILA",
[257] = "CAA",
[32768] = "TA", [32769] = "DLV", [32768] = "TA", [32769] = "DLV",
[ANY] = "*", [ANY] = "*",
} &default = function(n: count): string { return fmt("query-%d", n); }; } &default = function(n: count): string { return fmt("query-%d", n); };

View file

@ -26,6 +26,10 @@ export {
## the DNS query. Also used in responses to match up replies to ## the DNS query. Also used in responses to match up replies to
## outstanding queries. ## outstanding queries.
trans_id: count &log &optional; trans_id: count &log &optional;
## Round trip time for the query and response. This indicates
## the delay between when the request was seen until the
## answer started.
rtt: interval &log &optional;
## The domain name that is the subject of the DNS query. ## The domain name that is the subject of the DNS query.
query: string &log &optional; query: string &log &optional;
## The QCLASS value specifying the class of the query. ## The QCLASS value specifying the class of the query.
@ -52,7 +56,7 @@ export {
## The Recursion Available bit in a response message indicates ## The Recursion Available bit in a response message indicates
## that the name server supports recursive queries. ## that the name server supports recursive queries.
RA: bool &log &default=F; RA: bool &log &default=F;
## A reserved field that is currently supposed to be zero in all ## A reserved field that is usually zero in
## queries and responses. ## queries and responses.
Z: count &log &default=0; Z: count &log &default=0;
## The set of resource descriptions in the query answer. ## The set of resource descriptions in the query answer.
@ -311,6 +315,16 @@ hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
c$dns$AA = msg$AA; c$dns$AA = msg$AA;
c$dns$RA = msg$RA; c$dns$RA = msg$RA;
if ( ! c$dns?$rtt )
{
c$dns$rtt = network_time() - c$dns$ts;
# This could mean that only a reply was seen since
# we assume there must be some passage of time between
# request and response.
if ( c$dns$rtt == 0secs )
delete c$dns$rtt;
}
if ( reply != "" ) if ( reply != "" )
{ {
if ( ! c$dns?$answers ) if ( ! c$dns?$answers )

View file

@ -213,7 +213,7 @@ event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &prior
# on a different file could be checked, but the file size will # on a different file could be checked, but the file size will
# be overwritten by the server response to the RETR command # be overwritten by the server response to the RETR command
# if that's given as well which would be more correct. # if that's given as well which would be more correct.
c$ftp$file_size = extract_count(msg); c$ftp$file_size = extract_count(msg, F);
} }
# PASV and EPSV processing # PASV and EPSV processing
@ -241,10 +241,10 @@ event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &prior
if ( [c$ftp$cmdarg$cmd, code] in directory_cmds ) if ( [c$ftp$cmdarg$cmd, code] in directory_cmds )
{ {
if ( c$ftp$cmdarg$cmd == "CWD" ) if ( c$ftp$cmdarg$cmd == "CWD" )
c$ftp$cwd = build_path(c$ftp$cwd, c$ftp$cmdarg$arg); c$ftp$cwd = build_path_compressed(c$ftp$cwd, c$ftp$cmdarg$arg);
else if ( c$ftp$cmdarg$cmd == "CDUP" ) else if ( c$ftp$cmdarg$cmd == "CDUP" )
c$ftp$cwd = cat(c$ftp$cwd, "/.."); c$ftp$cwd = build_path_compressed(c$ftp$cwd, "/..");
else if ( c$ftp$cmdarg$cmd == "PWD" || c$ftp$cmdarg$cmd == "XPWD" ) else if ( c$ftp$cmdarg$cmd == "PWD" || c$ftp$cmdarg$cmd == "XPWD" )
c$ftp$cwd = extract_path(msg); c$ftp$cwd = extract_path(msg);

View file

@ -17,12 +17,18 @@ export {
## An ordered vector of file unique IDs. ## An ordered vector of file unique IDs.
orig_fuids: vector of string &log &optional; orig_fuids: vector of string &log &optional;
## An order vector of filenames from the client.
orig_filenames: vector of string &log &optional;
## An ordered vector of mime types. ## An ordered vector of mime types.
orig_mime_types: vector of string &log &optional; orig_mime_types: vector of string &log &optional;
## An ordered vector of file unique IDs. ## An ordered vector of file unique IDs.
resp_fuids: vector of string &log &optional; resp_fuids: vector of string &log &optional;
## An order vector of filenames from the server.
resp_filenames: vector of string &log &optional;
## An ordered vector of mime types. ## An ordered vector of mime types.
resp_mime_types: vector of string &log &optional; resp_mime_types: vector of string &log &optional;
@ -82,13 +88,31 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
c$http$orig_fuids = string_vec(f$id); c$http$orig_fuids = string_vec(f$id);
else else
c$http$orig_fuids[|c$http$orig_fuids|] = f$id; c$http$orig_fuids[|c$http$orig_fuids|] = f$id;
if ( f$info?$filename )
{
if ( ! c$http?$orig_filenames )
c$http$orig_filenames = string_vec(f$info$filename);
else
c$http$orig_filenames[|c$http$orig_filenames|] = f$info$filename;
} }
}
else else
{ {
if ( ! c$http?$resp_fuids ) if ( ! c$http?$resp_fuids )
c$http$resp_fuids = string_vec(f$id); c$http$resp_fuids = string_vec(f$id);
else else
c$http$resp_fuids[|c$http$resp_fuids|] = f$id; c$http$resp_fuids[|c$http$resp_fuids|] = f$id;
if ( f$info?$filename )
{
if ( ! c$http?$resp_filenames )
c$http$resp_filenames = string_vec(f$info$filename);
else
c$http$resp_filenames[|c$http$resp_filenames|] = f$info$filename;
}
} }
} }
} }

View file

@ -21,6 +21,7 @@ export {
## not. ## not.
const default_capture_password = F &redef; const default_capture_password = F &redef;
## The record type which contains the fields of the HTTP log.
type Info: record { type Info: record {
## Timestamp for when the request happened. ## Timestamp for when the request happened.
ts: time &log; ts: time &log;
@ -59,9 +60,6 @@ export {
info_code: count &log &optional; info_code: count &log &optional;
## Last seen 1xx informational reply message returned by the server. ## Last seen 1xx informational reply message returned by the server.
info_msg: string &log &optional; info_msg: string &log &optional;
## Filename given in the Content-Disposition header sent by the
## server.
filename: string &log &optional;
## A set of indicators of various attributes discovered and ## A set of indicators of various attributes discovered and
## related to a particular request/response pair. ## related to a particular request/response pair.
tags: set[Tags] &log; tags: set[Tags] &log;

View file

@ -0,0 +1,5 @@
Support for the Internet Message Access Protocol (IMAP).
Note that currently the IMAP analyzer only supports analyzing IMAP sessions
until they do or do not switch to TLS using StartTLS. Hence, we do not get
mails from IMAP sessions, only X509 certificates.

View file

@ -0,0 +1,2 @@
@load ./main

View file

@ -0,0 +1,11 @@
module IMAP;
const ports = { 143/tcp };
redef likely_server_ports += { ports };
event bro_init() &priority=5
{
Analyzer::register_for_ports(Analyzer::ANALYZER_IMAP, ports);
}

View file

@ -0,0 +1 @@
Support for Remote FrameBuffer analysis. This includes all VNC servers.

View file

@ -0,0 +1,3 @@
# Generated by binpac_quickstart
@load ./main
@load-sigs ./dpd.sig

View file

@ -0,0 +1,12 @@
signature dpd_rfb_server {
ip-proto == tcp
payload /^RFB/
requires-reverse-signature dpd_rfb_client
enable "rfb"
}
signature dpd_rfb_client {
ip-proto == tcp
payload /^RFB/
tcp-state originator
}

View file

@ -0,0 +1,165 @@
module RFB;
export {
redef enum Log::ID += { LOG };
## The record type which contains the fields of the RFB log.
type Info: record {
## Timestamp for when the event happened.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Major version of the client.
client_major_version: string &log &optional;
## Minor version of the client.
client_minor_version: string &log &optional;
## Major version of the server.
server_major_version: string &log &optional;
## Major version of the client.
server_minor_version: string &log &optional;
## Identifier of authentication method used.
authentication_method: string &log &optional;
## Whether or not authentication was succesful.
auth: bool &log &optional;
## Whether the client has an exclusive or a shared session.
share_flag: bool &log &optional;
## Name of the screen that is being shared.
desktop_name: string &log &optional;
## Width of the screen that is being shared.
width: count &log &optional;
## Height of the screen that is being shared.
height: count &log &optional;
## Internally used value to determine if this connection
## has already been logged.
done: bool &default=F;
};
global log_rfb: event(rec: Info);
}
function friendly_auth_name(auth: count): string
{
switch (auth) {
case 0:
return "Invalid";
case 1:
return "None";
case 2:
return "VNC";
case 16:
return "Tight";
case 17:
return "Ultra";
case 18:
return "TLS";
case 19:
return "VeNCrypt";
case 20:
return "GTK-VNC SASL";
case 21:
return "MD5 hash authentication";
case 22:
return "Colin Dean xvp";
case 30:
return "Apple Remote Desktop";
}
return "RealVNC";
}
redef record connection += {
rfb: Info &optional;
};
event bro_init() &priority=5
{
Log::create_stream(RFB::LOG, [$columns=Info, $ev=log_rfb, $path="rfb"]);
}
function write_log(c:connection)
{
local state = c$rfb;
if ( state$done )
{
return;
}
Log::write(RFB::LOG, c$rfb);
c$rfb$done = T;
}
function set_session(c: connection)
{
if ( ! c?$rfb )
{
local info: Info;
info$ts = network_time();
info$uid = c$uid;
info$id = c$id;
c$rfb = info;
}
}
event rfb_event(c: connection) &priority=5
{
set_session(c);
}
event rfb_client_version(c: connection, major_version: string, minor_version: string) &priority=5
{
set_session(c);
c$rfb$client_major_version = major_version;
c$rfb$client_minor_version = minor_version;
}
event rfb_server_version(c: connection, major_version: string, minor_version: string) &priority=5
{
set_session(c);
c$rfb$server_major_version = major_version;
c$rfb$server_minor_version = minor_version;
}
event rfb_authentication_type(c: connection, authtype: count) &priority=5
{
set_session(c);
c$rfb$authentication_method = friendly_auth_name(authtype);
}
event rfb_server_parameters(c: connection, name: string, width: count, height: count) &priority=5
{
set_session(c);
c$rfb$desktop_name = name;
c$rfb$width = width;
c$rfb$height = height;
}
event rfb_server_parameters(c: connection, name: string, width: count, height: count) &priority=-5
{
write_log(c);
}
event rfb_auth_result(c: connection, result: bool) &priority=5
{
c$rfb$auth = !result;
}
event rfb_share_flag(c: connection, flag: bool) &priority=5
{
c$rfb$share_flag = flag;
}
event connection_state_remove(c: connection) &priority=-5
{
if ( c?$rfb )
{
write_log(c);
}
}

Some files were not shown because too many files have changed in this diff Show more