mirror of
https://github.com/zeek/zeek.git
synced 2025-10-09 18:18:19 +00:00
Merge remote-tracking branch 'origin/master' into topic/bernhard/software
Conflicts: scripts/base/frameworks/software/main.bro scripts/policy/protocols/ftp/software.bro
This commit is contained in:
commit
eacdffff90
262 changed files with 16869 additions and 5714 deletions
|
@ -9,10 +9,10 @@ redef peer_description = Cluster::node;
|
|||
# Add a cluster prefix.
|
||||
@prefixes += cluster
|
||||
|
||||
## If this script isn't found anywhere, the cluster bombs out.
|
||||
## Loading the cluster framework requires that a script by this name exists
|
||||
## somewhere in the BROPATH. The only thing in the file should be the
|
||||
## cluster definition in the :bro:id:`Cluster::nodes` variable.
|
||||
# If this script isn't found anywhere, the cluster bombs out.
|
||||
# Loading the cluster framework requires that a script by this name exists
|
||||
# somewhere in the BROPATH. The only thing in the file should be the
|
||||
# cluster definition in the :bro:id:`Cluster::nodes` variable.
|
||||
@load cluster-layout
|
||||
|
||||
@if ( Cluster::node in Cluster::nodes )
|
||||
|
|
|
@ -1,21 +1,45 @@
|
|||
##! A framework for establishing and controlling a cluster of Bro instances.
|
||||
##! In order to use the cluster framework, a script named
|
||||
##! ``cluster-layout.bro`` must exist somewhere in Bro's script search path
|
||||
##! which has a cluster definition of the :bro:id:`Cluster::nodes` variable.
|
||||
##! The ``CLUSTER_NODE`` environment variable or :bro:id:`Cluster::node`
|
||||
##! must also be sent and the cluster framework loaded as a package like
|
||||
##! ``@load base/frameworks/cluster``.
|
||||
|
||||
@load base/frameworks/control
|
||||
|
||||
module Cluster;
|
||||
|
||||
export {
|
||||
## The cluster logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
|
||||
## The record type which contains the column fields of the cluster log.
|
||||
type Info: record {
|
||||
## The time at which a cluster message was generated.
|
||||
ts: time;
|
||||
## A message indicating information about the cluster's operation.
|
||||
message: string;
|
||||
} &log;
|
||||
|
||||
|
||||
## Types of nodes that are allowed to participate in the cluster
|
||||
## configuration.
|
||||
type NodeType: enum {
|
||||
## A dummy node type indicating the local node is not operating
|
||||
## within a cluster.
|
||||
NONE,
|
||||
## A node type which is allowed to view/manipulate the configuration
|
||||
## of other nodes in the cluster.
|
||||
CONTROL,
|
||||
## A node type responsible for log and policy management.
|
||||
MANAGER,
|
||||
## A node type for relaying worker node communication and synchronizing
|
||||
## worker node state.
|
||||
PROXY,
|
||||
## The node type doing all the actual traffic analysis.
|
||||
WORKER,
|
||||
## A node acting as a traffic recorder using the
|
||||
## `Time Machine <http://tracker.bro-ids.org/time-machine>`_ software.
|
||||
TIME_MACHINE,
|
||||
};
|
||||
|
||||
|
@ -49,30 +73,38 @@ export {
|
|||
|
||||
## Record type to indicate a node in a cluster.
|
||||
type Node: record {
|
||||
## Identifies the type of cluster node in this node's configuration.
|
||||
node_type: NodeType;
|
||||
## The IP address of the cluster node.
|
||||
ip: addr;
|
||||
## The port to which the this local node can connect when
|
||||
## establishing communication.
|
||||
p: port;
|
||||
|
||||
## Identifier for the interface a worker is sniffing.
|
||||
interface: string &optional;
|
||||
|
||||
## Manager node this node uses. For workers and proxies.
|
||||
## Name of the manager node this node uses. For workers and proxies.
|
||||
manager: string &optional;
|
||||
## Proxy node this node uses. For workers and managers.
|
||||
## Name of the proxy node this node uses. For workers and managers.
|
||||
proxy: string &optional;
|
||||
## Worker nodes that this node connects with. For managers and proxies.
|
||||
## Names of worker nodes that this node connects with.
|
||||
## For managers and proxies.
|
||||
workers: set[string] &optional;
|
||||
## Name of a time machine node with which this node connects.
|
||||
time_machine: string &optional;
|
||||
};
|
||||
|
||||
## This function can be called at any time to determine if the cluster
|
||||
## framework is being enabled for this run.
|
||||
##
|
||||
## Returns: True if :bro:id:`Cluster::node` has been set.
|
||||
global is_enabled: function(): bool;
|
||||
|
||||
## This function can be called at any time to determine what type of
|
||||
## cluster node the current Bro instance is going to be acting as.
|
||||
## If :bro:id:`Cluster::is_enabled` returns false, then
|
||||
## :bro:enum:`Cluster::NONE` is returned.
|
||||
##
|
||||
## Returns: The :bro:type:`Cluster::NodeType` the calling node acts as.
|
||||
global local_node_type: function(): NodeType;
|
||||
|
||||
## This gives the value for the number of workers currently connected to,
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
##! Redefines the options common to all proxy nodes within a Bro cluster.
|
||||
##! In particular, proxies are not meant to produce logs locally and they
|
||||
##! do not forward events anywhere, they mainly synchronize state between
|
||||
##! worker nodes.
|
||||
|
||||
@prefixes += cluster-proxy
|
||||
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
##! Redefines some options common to all worker nodes within a Bro cluster.
|
||||
##! In particular, worker nodes do not produce logs locally, instead they
|
||||
##! send them off to a manager node for processing.
|
||||
|
||||
@prefixes += cluster-worker
|
||||
|
||||
## Don't do any local logging.
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
##! This script establishes communication among all nodes in a cluster
|
||||
##! as defined by :bro:id:`Cluster::nodes`.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/communication
|
||||
|
||||
|
@ -41,7 +44,7 @@ event bro_init() &priority=9
|
|||
{
|
||||
if ( n$node_type == WORKER && n$proxy == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $connect=F, $class=i, $events=worker2proxy_events];
|
||||
[$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events];
|
||||
|
||||
# accepts connections from the previous one.
|
||||
# (This is not ideal for setups with many proxies)
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
##! Connect to remote Bro or Broccoli instances to share state and/or transfer
|
||||
##! events.
|
||||
##! Facilitates connecting to remote Bro or Broccoli instances to share state
|
||||
##! and/or transfer events.
|
||||
|
||||
@load base/frameworks/packet-filter
|
||||
|
||||
module Communication;
|
||||
|
||||
export {
|
||||
|
||||
## The communication logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Which interface to listen on (0.0.0.0 for any interface).
|
||||
|
@ -21,14 +23,25 @@ export {
|
|||
## compression.
|
||||
global compression_level = 0 &redef;
|
||||
|
||||
## A record type containing the column fields of the communication log.
|
||||
type Info: record {
|
||||
## The network time at which a communication event occurred.
|
||||
ts: time &log;
|
||||
## The peer name (if any) for which a communication event is concerned.
|
||||
peer: string &log &optional;
|
||||
## Where the communication event message originated from, that is,
|
||||
## either from the scripting layer or inside the Bro process.
|
||||
src_name: string &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_desc: string &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_addr: addr &log &optional;
|
||||
## .. todo:: currently unused.
|
||||
connected_peer_port: port &log &optional;
|
||||
## The severity of the communication event message.
|
||||
level: string &log &optional;
|
||||
## A message describing the communication event between Bro or
|
||||
## Broccoli instances.
|
||||
message: string &log;
|
||||
};
|
||||
|
||||
|
@ -77,7 +90,7 @@ export {
|
|||
auth: bool &default = F;
|
||||
|
||||
## If not set, no capture filter is sent.
|
||||
## If set to "", the default cature filter is sent.
|
||||
## If set to "", the default capture filter is sent.
|
||||
capture_filter: string &optional;
|
||||
|
||||
## Whether to use SSL-based communication.
|
||||
|
@ -96,11 +109,25 @@ export {
|
|||
## The table of Bro or Broccoli nodes that Bro will initiate connections
|
||||
## to or respond to connections from.
|
||||
global nodes: table[string] of Node &redef;
|
||||
|
||||
|
||||
## A table of peer nodes for which this node issued a
|
||||
## :bro:id:`Communication::connect_peer` call but with which a connection
|
||||
## has not yet been established or with which a connection has been
|
||||
## closed and is currently in the process of retrying to establish.
|
||||
## When a connection is successfully established, the peer is removed
|
||||
## from the table.
|
||||
global pending_peers: table[peer_id] of Node;
|
||||
|
||||
## A table of peer nodes for which this node has an established connection.
|
||||
## Peers are automatically removed if their connection is closed and
|
||||
## automatically added back if a connection is re-established later.
|
||||
global connected_peers: table[peer_id] of Node;
|
||||
|
||||
## Connect to nodes[node], independent of its "connect" flag.
|
||||
## Connect to a node in :bro:id:`Communication::nodes` independent
|
||||
## of its "connect" flag.
|
||||
##
|
||||
## peer: the string used to index a particular node within the
|
||||
## :bro:id:`Communication::nodes` table.
|
||||
global connect_peer: function(peer: string);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,43 +1,30 @@
|
|||
##! This is a utility script that sends the current values of all &redef'able
|
||||
##! consts to a remote Bro then sends the :bro:id:`configuration_update` event
|
||||
##! and terminates processing.
|
||||
##!
|
||||
##! Intended to be used from the command line like this when starting a controller::
|
||||
##!
|
||||
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
|
||||
##!
|
||||
##! A controllee only needs to load the controllee script in addition
|
||||
##! to the specific analysis scripts desired. It may also need a node
|
||||
##! configured as a controller node in the communications nodes configuration::
|
||||
##!
|
||||
##! bro <scripts> frameworks/control/controllee
|
||||
##!
|
||||
##! To use the framework as a controllee, it only needs to be loaded and
|
||||
##! the controlled node need to accept all events in the "Control::" namespace
|
||||
##! from the host where the control actions will be performed from along with
|
||||
##! using the "control" class.
|
||||
##! The control framework provides the foundation for providing "commands"
|
||||
##! that can be taken remotely at runtime to modify a running Bro instance
|
||||
##! or collect information from the running instance.
|
||||
|
||||
module Control;
|
||||
|
||||
export {
|
||||
## This is the address of the host that will be controlled.
|
||||
## The address of the host that will be controlled.
|
||||
const host = 0.0.0.0 &redef;
|
||||
|
||||
## This is the port of the host that will be controlled.
|
||||
## The port of the host that will be controlled.
|
||||
const host_port = 0/tcp &redef;
|
||||
|
||||
## This is the command that is being done. It's typically set on the
|
||||
## command line and influences whether this instance starts up as a
|
||||
## controller or controllee.
|
||||
## The command that is being done. It's typically set on the
|
||||
## command line.
|
||||
const cmd = "" &redef;
|
||||
|
||||
## This can be used by commands that take an argument.
|
||||
const arg = "" &redef;
|
||||
|
||||
## Events that need to be handled by controllers.
|
||||
const controller_events = /Control::.*_request/ &redef;
|
||||
|
||||
## Events that need to be handled by controllees.
|
||||
const controllee_events = /Control::.*_response/ &redef;
|
||||
|
||||
## These are the commands that can be given on the command line for
|
||||
## The commands that can currently be given on the command line for
|
||||
## remote control.
|
||||
const commands: set[string] = {
|
||||
"id_value",
|
||||
|
@ -45,15 +32,15 @@ export {
|
|||
"net_stats",
|
||||
"configuration_update",
|
||||
"shutdown",
|
||||
};
|
||||
} &redef;
|
||||
|
||||
## Variable IDs that are to be ignored by the update process.
|
||||
const ignore_ids: set[string] = {
|
||||
};
|
||||
const ignore_ids: set[string] = { };
|
||||
|
||||
## Event for requesting the value of an ID (a variable).
|
||||
global id_value_request: event(id: string);
|
||||
## Event for returning the value of an ID after an :bro:id:`id_request` event.
|
||||
## Event for returning the value of an ID after an
|
||||
## :bro:id:`Control::id_value_request` event.
|
||||
global id_value_response: event(id: string, val: string);
|
||||
|
||||
## Requests the current communication status.
|
||||
|
@ -68,7 +55,8 @@ export {
|
|||
|
||||
## Inform the remote Bro instance that it's configuration may have been updated.
|
||||
global configuration_update_request: event();
|
||||
## This event is a wrapper and alias for the :bro:id:`configuration_update_request` event.
|
||||
## This event is a wrapper and alias for the
|
||||
## :bro:id:`Control::configuration_update_request` event.
|
||||
## This event is also a primary hooking point for the control framework.
|
||||
global configuration_update: event();
|
||||
## Message in response to a configuration update request.
|
||||
|
|
|
@ -80,15 +80,15 @@ signature irc_server_reply {
|
|||
tcp-state responder
|
||||
}
|
||||
|
||||
signature irc_sig3 {
|
||||
signature irc_server_to_server1 {
|
||||
ip-proto == tcp
|
||||
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
|
||||
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
|
||||
}
|
||||
|
||||
signature irc_sig4 {
|
||||
signature irc_server_to_server2 {
|
||||
ip-proto == tcp
|
||||
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
|
||||
requires-reverse-signature irc_sig3
|
||||
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
|
||||
requires-reverse-signature irc_server_to_server1
|
||||
enable "irc"
|
||||
}
|
||||
|
||||
|
|
|
@ -7,14 +7,16 @@ module DPD;
|
|||
redef signature_files += "base/frameworks/dpd/dpd.sig";
|
||||
|
||||
export {
|
||||
## Add the DPD logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## The record type defining the columns to log in the DPD logging stream.
|
||||
type Info: record {
|
||||
## Timestamp for when protocol analysis failed.
|
||||
ts: time &log;
|
||||
## Connection unique ID.
|
||||
uid: string &log;
|
||||
## Connection ID.
|
||||
## Connection ID containing the 4-tuple which identifies endpoints.
|
||||
id: conn_id &log;
|
||||
## Transport protocol for the violation.
|
||||
proto: transport_proto &log;
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
# user_name
|
||||
# file_name
|
||||
# file_md5
|
||||
# x509_cert - DER encoded, not PEM (ascii armored)
|
||||
# x509_md5
|
||||
|
||||
# Example tags:
|
||||
# infrastructure
|
||||
|
@ -25,6 +25,7 @@
|
|||
module Intel;
|
||||
|
||||
export {
|
||||
## The intel logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
redef enum Notice::Type += {
|
||||
|
@ -33,72 +34,117 @@ export {
|
|||
Detection,
|
||||
};
|
||||
|
||||
## Record type used for logging information from the intelligence framework.
|
||||
## Primarily for problems or oddities with inserting and querying data.
|
||||
## This is important since the content of the intelligence framework can
|
||||
## change quite dramatically during runtime and problems may be introduced
|
||||
## into the data.
|
||||
type Info: record {
|
||||
## The current network time.
|
||||
ts: time &log;
|
||||
## Represents the severity of the message.
|
||||
## This value should be one of: "info", "warn", "error"
|
||||
level: string &log;
|
||||
## The message.
|
||||
message: string &log;
|
||||
};
|
||||
|
||||
## Record to represent metadata associated with a single piece of
|
||||
## intelligence.
|
||||
type MetaData: record {
|
||||
## A description for the data.
|
||||
desc: string &optional;
|
||||
## A URL where more information may be found about the intelligence.
|
||||
url: string &optional;
|
||||
## The time at which the data was first declared to be intelligence.
|
||||
first_seen: time &optional;
|
||||
## When this data was most recent inserted into the framework.
|
||||
latest_seen: time &optional;
|
||||
## Arbitrary text tags for the data.
|
||||
tags: set[string];
|
||||
};
|
||||
|
||||
## Record to represent a singular piece of intelligence.
|
||||
type Item: record {
|
||||
## If the data is an IP address, this hold the address.
|
||||
ip: addr &optional;
|
||||
## If the data is textual, this holds the text.
|
||||
str: string &optional;
|
||||
## If the data is numeric, this holds the number.
|
||||
num: int &optional;
|
||||
## The subtype of the data for when either the $str or $num fields are
|
||||
## given. If one of those fields are given, this field must be present.
|
||||
subtype: string &optional;
|
||||
|
||||
## The next five fields are temporary until a better model for
|
||||
## attaching metadata to an intelligence item is created.
|
||||
desc: string &optional;
|
||||
url: string &optional;
|
||||
first_seen: time &optional;
|
||||
latest_seen: time &optional;
|
||||
tags: set[string];
|
||||
|
||||
## These single string tags are throw away until pybroccoli supports sets
|
||||
## These single string tags are throw away until pybroccoli supports sets.
|
||||
tag1: string &optional;
|
||||
tag2: string &optional;
|
||||
tag3: string &optional;
|
||||
};
|
||||
|
||||
## Record model used for constructing queries against the intelligence
|
||||
## framework.
|
||||
type QueryItem: record {
|
||||
ip: addr &optional;
|
||||
str: string &optional;
|
||||
num: int &optional;
|
||||
subtype: string &optional;
|
||||
## If an IP address is being queried for, this field should be given.
|
||||
ip: addr &optional;
|
||||
## If a string is being queried for, this field should be given.
|
||||
str: string &optional;
|
||||
## If numeric data is being queried for, this field should be given.
|
||||
num: int &optional;
|
||||
## If either a string or number is being queried for, this field should
|
||||
## indicate the subtype of the data.
|
||||
subtype: string &optional;
|
||||
|
||||
or_tags: set[string] &optional;
|
||||
and_tags: set[string] &optional;
|
||||
## A set of tags where if a single metadata record attached to an item
|
||||
## has any one of the tags defined in this field, it will match.
|
||||
or_tags: set[string] &optional;
|
||||
## A set of tags where a single metadata record attached to an item
|
||||
## must have all of the tags defined in this field.
|
||||
and_tags: set[string] &optional;
|
||||
|
||||
## The predicate can be given when searching for a match. It will
|
||||
## be tested against every :bro:type:`MetaData` item associated with
|
||||
## the data being matched on. If it returns T a single time, the
|
||||
## matcher will consider that the item has matched.
|
||||
pred: function(meta: Intel::MetaData): bool &optional;
|
||||
## be tested against every :bro:type:`Intel::MetaData` item associated
|
||||
## with the data being matched on. If it returns T a single time, the
|
||||
## matcher will consider that the item has matched. This field can
|
||||
## be used for constructing arbitrarily complex queries that may not
|
||||
## be possible with the $or_tags or $and_tags fields.
|
||||
pred: function(meta: Intel::MetaData): bool &optional;
|
||||
};
|
||||
|
||||
|
||||
## Function to insert data into the intelligence framework.
|
||||
##
|
||||
## item: The data item.
|
||||
##
|
||||
## Returns: T if the data was successfully inserted into the framework,
|
||||
## otherwise it returns F.
|
||||
global insert: function(item: Item): bool;
|
||||
|
||||
## A wrapper for the :bro:id:`Intel::insert` function. This is primarily
|
||||
## used as the external API for inserting data into the intelligence
|
||||
## using Broccoli.
|
||||
global insert_event: event(item: Item);
|
||||
|
||||
## Function for matching data within the intelligence framework.
|
||||
global matcher: function(item: QueryItem): bool;
|
||||
|
||||
type MetaDataStore: table[count] of MetaData;
|
||||
type DataStore: record {
|
||||
ip_data: table[addr] of MetaDataStore;
|
||||
## The first string is the actual value and the second string is the subtype.
|
||||
string_data: table[string, string] of MetaDataStore;
|
||||
int_data: table[int, string] of MetaDataStore;
|
||||
};
|
||||
global data_store: DataStore;
|
||||
|
||||
|
||||
}
|
||||
|
||||
type MetaDataStore: table[count] of MetaData;
|
||||
type DataStore: record {
|
||||
ip_data: table[addr] of MetaDataStore;
|
||||
# The first string is the actual value and the second string is the subtype.
|
||||
string_data: table[string, string] of MetaDataStore;
|
||||
int_data: table[int, string] of MetaDataStore;
|
||||
};
|
||||
global data_store: DataStore;
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Log::create_stream(Intel::LOG, [$columns=Info]);
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
##! The Bro logging interface.
|
||||
##!
|
||||
##! See XXX for a introduction to Bro's logging framework.
|
||||
##! See :doc:`/logging` for a introduction to Bro's logging framework.
|
||||
|
||||
module Log;
|
||||
|
||||
# Log::ID and Log::Writer are defined in bro.init due to circular dependencies.
|
||||
# Log::ID and Log::Writer are defined in types.bif due to circular dependencies.
|
||||
|
||||
export {
|
||||
## If true, is local logging is by default enabled for all filters.
|
||||
## If true, local logging is by default enabled for all filters.
|
||||
const enable_local_logging = T &redef;
|
||||
|
||||
## If true, is remote logging is by default enabled for all filters.
|
||||
## If true, remote logging is by default enabled for all filters.
|
||||
const enable_remote_logging = T &redef;
|
||||
|
||||
## Default writer to use if a filter does not specify
|
||||
|
@ -23,21 +23,24 @@ export {
|
|||
columns: any;
|
||||
|
||||
## Event that will be raised once for each log entry.
|
||||
## The event receives a single same parameter, an instance of type ``columns``.
|
||||
## The event receives a single same parameter, an instance of type
|
||||
## ``columns``.
|
||||
ev: any &optional;
|
||||
};
|
||||
|
||||
## Default function for building the path values for log filters if not
|
||||
## speficied otherwise by a filter. The default implementation uses ``id``
|
||||
## Builds the default path values for log filters if not otherwise
|
||||
## specified by a filter. The default implementation uses *id*
|
||||
## to derive a name.
|
||||
##
|
||||
## id: The log stream.
|
||||
## id: The ID associated with the log stream.
|
||||
##
|
||||
## path: A suggested path value, which may be either the filter's
|
||||
## ``path`` if defined, else a previous result from the function.
|
||||
## If no ``path`` is defined for the filter, then the first call
|
||||
## to the function will contain an empty string.
|
||||
##
|
||||
## rec: An instance of the streams's ``columns`` type with its
|
||||
## fields set to the values to logged.
|
||||
## fields set to the values to be logged.
|
||||
##
|
||||
## Returns: The path to be used for the filter.
|
||||
global default_path_func: function(id: ID, path: string, rec: any) : string &redef;
|
||||
|
@ -46,7 +49,7 @@ export {
|
|||
|
||||
## Information passed into rotation callback functions.
|
||||
type RotationInfo: record {
|
||||
writer: Writer; ##< Writer.
|
||||
writer: Writer; ##< The :bro:type:`Log::Writer` being used.
|
||||
fname: string; ##< Full name of the rotated file.
|
||||
path: string; ##< Original path value.
|
||||
open: time; ##< Time when opened.
|
||||
|
@ -57,25 +60,26 @@ export {
|
|||
## Default rotation interval. Zero disables rotation.
|
||||
const default_rotation_interval = 0secs &redef;
|
||||
|
||||
## Default naming format for timestamps embedded into filenames. Uses a strftime() style.
|
||||
## Default naming format for timestamps embedded into filenames.
|
||||
## Uses a ``strftime()`` style.
|
||||
const default_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
|
||||
|
||||
## Default shell command to run on rotated files. Empty for none.
|
||||
const default_rotation_postprocessor_cmd = "" &redef;
|
||||
|
||||
## Specifies the default postprocessor function per writer type. Entries in this
|
||||
## table are initialized by each writer type.
|
||||
## Specifies the default postprocessor function per writer type.
|
||||
## Entries in this table are initialized by each writer type.
|
||||
const default_rotation_postprocessors: table[Writer] of function(info: RotationInfo) : bool &redef;
|
||||
|
||||
## Filter customizing logging.
|
||||
## A filter type describes how to customize logging streams.
|
||||
type Filter: record {
|
||||
## Descriptive name to reference this filter.
|
||||
name: string;
|
||||
|
||||
## The writer to use.
|
||||
## The logging writer implementation to use.
|
||||
writer: Writer &default=default_writer;
|
||||
|
||||
## Predicate indicating whether a log entry should be recorded.
|
||||
## Indicates whether a log entry should be recorded.
|
||||
## If not given, all entries are recorded.
|
||||
##
|
||||
## rec: An instance of the streams's ``columns`` type with its
|
||||
|
@ -101,13 +105,15 @@ export {
|
|||
## easy to flood the disk by returning a new string for each
|
||||
## connection ...
|
||||
##
|
||||
## id: The log stream.
|
||||
## id: The ID associated with the log stream.
|
||||
##
|
||||
## path: A suggested path value, which may be either the filter's
|
||||
## ``path`` if defined, else a previous result from the function.
|
||||
## If no ``path`` is defined for the filter, then the first call
|
||||
## to the function will contain an empty string.
|
||||
##
|
||||
## rec: An instance of the streams's ``columns`` type with its
|
||||
## fields set to the values to logged.
|
||||
## fields set to the values to be logged.
|
||||
##
|
||||
## Returns: The path to be used for the filter.
|
||||
path_func: function(id: ID, path: string, rec: any): string &optional;
|
||||
|
@ -129,27 +135,183 @@ export {
|
|||
## Rotation interval.
|
||||
interv: interval &default=default_rotation_interval;
|
||||
|
||||
## Callback function to trigger for rotated files. If not set,
|
||||
## the default comes out of default_rotation_postprocessors.
|
||||
## Callback function to trigger for rotated files. If not set, the
|
||||
## default comes out of :bro:id:`Log::default_rotation_postprocessors`.
|
||||
postprocessor: function(info: RotationInfo) : bool &optional;
|
||||
};
|
||||
|
||||
## Sentinel value for indicating that a filter was not found when looked up.
|
||||
const no_filter: Filter = [$name="<not found>"]; # Sentinel.
|
||||
const no_filter: Filter = [$name="<not found>"];
|
||||
|
||||
# TODO: Document.
|
||||
## Creates a new logging stream with the default filter.
|
||||
##
|
||||
## id: The ID enum to be associated with the new logging stream.
|
||||
##
|
||||
## stream: A record defining the content that the new stream will log.
|
||||
##
|
||||
## Returns: True if a new logging stream was successfully created and
|
||||
## a default filter added to it.
|
||||
##
|
||||
## .. bro:see:: Log::add_default_filter Log::remove_default_filter
|
||||
global create_stream: function(id: ID, stream: Stream) : bool;
|
||||
|
||||
## Enables a previously disabled logging stream. Disabled streams
|
||||
## will not be written to until they are enabled again. New streams
|
||||
## are enabled by default.
|
||||
##
|
||||
## id: The ID associated with the logging stream to enable.
|
||||
##
|
||||
## Returns: True if the stream is re-enabled or was not previously disabled.
|
||||
##
|
||||
## .. bro:see:: Log::disable_stream
|
||||
global enable_stream: function(id: ID) : bool;
|
||||
|
||||
## Disables a currently enabled logging stream. Disabled streams
|
||||
## will not be written to until they are enabled again. New streams
|
||||
## are enabled by default.
|
||||
##
|
||||
## id: The ID associated with the logging stream to disable.
|
||||
##
|
||||
## Returns: True if the stream is now disabled or was already disabled.
|
||||
##
|
||||
## .. bro:see:: Log::enable_stream
|
||||
global disable_stream: function(id: ID) : bool;
|
||||
|
||||
## Adds a custom filter to an existing logging stream. If a filter
|
||||
## with a matching ``name`` field already exists for the stream, it
|
||||
## is removed when the new filter is successfully added.
|
||||
##
|
||||
## id: The ID associated with the logging stream to filter.
|
||||
##
|
||||
## filter: A record describing the desired logging parameters.
|
||||
##
|
||||
## Returns: True if the filter was sucessfully added, false if
|
||||
## the filter was not added or the *filter* argument was not
|
||||
## the correct type.
|
||||
##
|
||||
## .. bro:see:: Log::remove_filter Log::add_default_filter
|
||||
## Log::remove_default_filter
|
||||
global add_filter: function(id: ID, filter: Filter) : bool;
|
||||
|
||||
## Removes a filter from an existing logging stream.
|
||||
##
|
||||
## id: The ID associated with the logging stream from which to
|
||||
## remove a filter.
|
||||
##
|
||||
## name: A string to match against the ``name`` field of a
|
||||
## :bro:type:`Log::Filter` for identification purposes.
|
||||
##
|
||||
## Returns: True if the logging stream's filter was removed or
|
||||
## if no filter associated with *name* was found.
|
||||
##
|
||||
## .. bro:see:: Log::remove_filter Log::add_default_filter
|
||||
## Log::remove_default_filter
|
||||
global remove_filter: function(id: ID, name: string) : bool;
|
||||
global get_filter: function(id: ID, name: string) : Filter; # Returns no_filter if not found.
|
||||
|
||||
## Gets a filter associated with an existing logging stream.
|
||||
##
|
||||
## id: The ID associated with a logging stream from which to
|
||||
## obtain one of its filters.
|
||||
##
|
||||
## name: A string to match against the ``name`` field of a
|
||||
## :bro:type:`Log::Filter` for identification purposes.
|
||||
##
|
||||
## Returns: A filter attached to the logging stream *id* matching
|
||||
## *name* or, if no matches are found returns the
|
||||
## :bro:id:`Log::no_filter` sentinel value.
|
||||
##
|
||||
## .. bro:see:: Log::add_filter Log::remove_filter Log::add_default_filter
|
||||
## Log::remove_default_filter
|
||||
global get_filter: function(id: ID, name: string) : Filter;
|
||||
|
||||
## Writes a new log line/entry to a logging stream.
|
||||
##
|
||||
## id: The ID associated with a logging stream to be written to.
|
||||
##
|
||||
## columns: A record value describing the values of each field/column
|
||||
## to write to the log stream.
|
||||
##
|
||||
## Returns: True if the stream was found and no error occurred in writing
|
||||
## to it or if the stream was disabled and nothing was written.
|
||||
## False if the stream was was not found, or the *columns*
|
||||
## argument did not match what the stream was initially defined
|
||||
## to handle, or one of the stream's filters has an invalid
|
||||
## ``path_func``.
|
||||
##
|
||||
## .. bro:see: Log::enable_stream Log::disable_stream
|
||||
global write: function(id: ID, columns: any) : bool;
|
||||
|
||||
## Sets the buffering status for all the writers of a given logging stream.
|
||||
## A given writer implementation may or may not support buffering and if it
|
||||
## doesn't then toggling buffering with this function has no effect.
|
||||
##
|
||||
## id: The ID associated with a logging stream for which to
|
||||
## enable/disable buffering.
|
||||
##
|
||||
## buffered: Whether to enable or disable log buffering.
|
||||
##
|
||||
## Returns: True if buffering status was set, false if the logging stream
|
||||
## does not exist.
|
||||
##
|
||||
## .. bro:see:: Log::flush
|
||||
global set_buf: function(id: ID, buffered: bool): bool;
|
||||
|
||||
## Flushes any currently buffered output for all the writers of a given
|
||||
## logging stream.
|
||||
##
|
||||
## id: The ID associated with a logging stream for which to flush buffered
|
||||
## data.
|
||||
##
|
||||
## Returns: True if all writers of a log stream were signalled to flush
|
||||
## buffered data or if the logging stream is disabled,
|
||||
## false if the logging stream does not exist.
|
||||
##
|
||||
## .. bro:see:: Log::set_buf Log::enable_stream Log::disable_stream
|
||||
global flush: function(id: ID): bool;
|
||||
|
||||
## Adds a default :bro:type:`Log::Filter` record with ``name`` field
|
||||
## set as "default" to a given logging stream.
|
||||
##
|
||||
## id: The ID associated with a logging stream for which to add a default
|
||||
## filter.
|
||||
##
|
||||
## Returns: The status of a call to :bro:id:`Log::add_filter` using a
|
||||
## default :bro:type:`Log::Filter` argument with ``name`` field
|
||||
## set to "default".
|
||||
##
|
||||
## .. bro:see:: Log::add_filter Log::remove_filter
|
||||
## Log::remove_default_filter
|
||||
global add_default_filter: function(id: ID) : bool;
|
||||
|
||||
## Removes the :bro:type:`Log::Filter` with ``name`` field equal to
|
||||
## "default".
|
||||
##
|
||||
## id: The ID associated with a logging stream from which to remove the
|
||||
## default filter.
|
||||
##
|
||||
## Returns: The status of a call to :bro:id:`Log::remove_filter` using
|
||||
## "default" as the argument.
|
||||
##
|
||||
## .. bro:see:: Log::add_filter Log::remove_filter Log::add_default_filter
|
||||
global remove_default_filter: function(id: ID) : bool;
|
||||
|
||||
## Runs a command given by :bro:id:`Log::default_rotation_postprocessor_cmd`
|
||||
## on a rotated file. Meant to be called from postprocessor functions
|
||||
## that are added to :bro:id:`Log::default_rotation_postprocessors`.
|
||||
##
|
||||
## info: A record holding meta-information about the log being rotated.
|
||||
##
|
||||
## npath: The new path of the file (after already being rotated/processed
|
||||
## by writer-specific postprocessor as defined in
|
||||
## :bro:id:`Log::default_rotation_postprocessors`.
|
||||
##
|
||||
## Returns: True when :bro:id:`Log::default_rotation_postprocessor_cmd`
|
||||
## is empty or the system command given by it has been invoked
|
||||
## to postprocess a rotated log file.
|
||||
##
|
||||
## .. bro:see:: Log::default_rotation_date_format
|
||||
## Log::default_rotation_postprocessor_cmd
|
||||
## Log::default_rotation_postprocessors
|
||||
global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool;
|
||||
}
|
||||
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
@load ./scp
|
||||
@load ./sftp
|
||||
|
|
|
@ -1,30 +1,56 @@
|
|||
##! This script defines a postprocessing function that can be applied
|
||||
##! to a logging filter in order to automatically SCP (secure copy)
|
||||
##! a log stream (or a subset of it) to a remote host at configurable
|
||||
##! rotation time intervals.
|
||||
##! rotation time intervals. Generally, to use this functionality
|
||||
##! you must handle the :bro:id:`bro_init` event and do the following
|
||||
##! in your handler:
|
||||
##!
|
||||
##! 1) Create a new :bro:type:`Log::Filter` record that defines a name/path,
|
||||
##! rotation interval, and set the ``postprocessor`` to
|
||||
##! :bro:id:`Log::scp_postprocessor`.
|
||||
##! 2) Add the filter to a logging stream using :bro:id:`Log::add_filter`.
|
||||
##! 3) Add a table entry to :bro:id:`Log::scp_destinations` for the filter's
|
||||
##! writer/path pair which defines a set of :bro:type:`Log::SCPDestination`
|
||||
##! records.
|
||||
|
||||
module Log;
|
||||
|
||||
export {
|
||||
## This postprocessor SCP's the rotated-log to all the remote hosts
|
||||
## Secure-copies the rotated-log to all the remote hosts
|
||||
## defined in :bro:id:`Log::scp_destinations` and then deletes
|
||||
## the local copy of the rotated-log. It's not active when
|
||||
## reading from trace files.
|
||||
##
|
||||
## info: A record holding meta-information about the log file to be
|
||||
## postprocessed.
|
||||
##
|
||||
## Returns: True if secure-copy system command was initiated or
|
||||
## if no destination was configured for the log as described
|
||||
## by *info*.
|
||||
global scp_postprocessor: function(info: Log::RotationInfo): bool;
|
||||
|
||||
## A container that describes the remote destination for the SCP command
|
||||
## argument as ``user@host:path``.
|
||||
type SCPDestination: record {
|
||||
## The remote user to log in as. A trust mechanism should be
|
||||
## pre-established.
|
||||
user: string;
|
||||
## The remote host to which to transfer logs.
|
||||
host: string;
|
||||
## The path/directory on the remote host to send logs.
|
||||
path: string;
|
||||
};
|
||||
|
||||
## A table indexed by a particular log writer and filter path, that yields
|
||||
## a set remote destinations. The :bro:id:`Log::scp_postprocessor`
|
||||
## function queries this table upon log rotation and performs a secure
|
||||
## copy of the rotated-log to each destination in the set.
|
||||
## copy of the rotated-log to each destination in the set. This
|
||||
## table can be modified at run-time.
|
||||
global scp_destinations: table[Writer, string] of set[SCPDestination];
|
||||
|
||||
## Default naming format for timestamps embedded into log filenames
|
||||
## that use the SCP rotator.
|
||||
const scp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
|
||||
}
|
||||
|
||||
function scp_postprocessor(info: Log::RotationInfo): bool
|
||||
|
@ -34,7 +60,11 @@ function scp_postprocessor(info: Log::RotationInfo): bool
|
|||
|
||||
local command = "";
|
||||
for ( d in scp_destinations[info$writer, info$path] )
|
||||
command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, d$path);
|
||||
{
|
||||
local dst = fmt("%s/%s.%s.log", d$path, info$path,
|
||||
strftime(Log::scp_rotation_date_format, info$open));
|
||||
command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, dst);
|
||||
}
|
||||
|
||||
command += fmt("/bin/rm %s", info$fname);
|
||||
system(command);
|
||||
|
|
73
scripts/base/frameworks/logging/postprocessors/sftp.bro
Normal file
73
scripts/base/frameworks/logging/postprocessors/sftp.bro
Normal file
|
@ -0,0 +1,73 @@
|
|||
##! This script defines a postprocessing function that can be applied
|
||||
##! to a logging filter in order to automatically SFTP
|
||||
##! a log stream (or a subset of it) to a remote host at configurable
|
||||
##! rotation time intervals. Generally, to use this functionality
|
||||
##! you must handle the :bro:id:`bro_init` event and do the following
|
||||
##! in your handler:
|
||||
##!
|
||||
##! 1) Create a new :bro:type:`Log::Filter` record that defines a name/path,
|
||||
##! rotation interval, and set the ``postprocessor`` to
|
||||
##! :bro:id:`Log::sftp_postprocessor`.
|
||||
##! 2) Add the filter to a logging stream using :bro:id:`Log::add_filter`.
|
||||
##! 3) Add a table entry to :bro:id:`Log::sftp_destinations` for the filter's
|
||||
##! writer/path pair which defines a set of :bro:type:`Log::SFTPDestination`
|
||||
##! records.
|
||||
|
||||
module Log;
|
||||
|
||||
export {
|
||||
## Securely transfers the rotated-log to all the remote hosts
|
||||
## defined in :bro:id:`Log::sftp_destinations` and then deletes
|
||||
## the local copy of the rotated-log. It's not active when
|
||||
## reading from trace files.
|
||||
##
|
||||
## info: A record holding meta-information about the log file to be
|
||||
## postprocessed.
|
||||
##
|
||||
## Returns: True if sftp system command was initiated or
|
||||
## if no destination was configured for the log as described
|
||||
## by *info*.
|
||||
global sftp_postprocessor: function(info: Log::RotationInfo): bool;
|
||||
|
||||
## A container that describes the remote destination for the SFTP command,
|
||||
## comprised of the username, host, and path at which to upload the file.
|
||||
type SFTPDestination: record {
|
||||
## The remote user to log in as. A trust mechanism should be
|
||||
## pre-established.
|
||||
user: string;
|
||||
## The remote host to which to transfer logs.
|
||||
host: string;
|
||||
## The path/directory on the remote host to send logs.
|
||||
path: string;
|
||||
};
|
||||
|
||||
## A table indexed by a particular log writer and filter path, that yields
|
||||
## a set remote destinations. The :bro:id:`Log::sftp_postprocessor`
|
||||
## function queries this table upon log rotation and performs a secure
|
||||
## transfer of the rotated-log to each destination in the set. This
|
||||
## table can be modified at run-time.
|
||||
global sftp_destinations: table[Writer, string] of set[SFTPDestination];
|
||||
|
||||
## Default naming format for timestamps embedded into log filenames
|
||||
## that use the SFTP rotator.
|
||||
const sftp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
|
||||
}
|
||||
|
||||
function sftp_postprocessor(info: Log::RotationInfo): bool
|
||||
{
|
||||
if ( reading_traces() || [info$writer, info$path] !in sftp_destinations )
|
||||
return T;
|
||||
|
||||
local command = "";
|
||||
for ( d in sftp_destinations[info$writer, info$path] )
|
||||
{
|
||||
local dst = fmt("%s/%s.%s.log", d$path, info$path,
|
||||
strftime(Log::sftp_rotation_date_format, info$open));
|
||||
command += fmt("echo put %s %s | sftp -b - %s@%s;", info$fname, dst,
|
||||
d$user, d$host);
|
||||
}
|
||||
|
||||
command += fmt("/bin/rm %s", info$fname);
|
||||
system(command);
|
||||
return T;
|
||||
}
|
|
@ -1,4 +1,5 @@
|
|||
##! Interface for the ascii log writer.
|
||||
##! Interface for the ASCII log writer. Redefinable options are available
|
||||
##! to tweak the output format of ASCII logs.
|
||||
|
||||
module LogAscii;
|
||||
|
||||
|
@ -7,7 +8,8 @@ export {
|
|||
## into files. This is primarily for debugging purposes.
|
||||
const output_to_stdout = F &redef;
|
||||
|
||||
## If true, include a header line with column names.
|
||||
## If true, include a header line with column names and description
|
||||
## of the other ASCII logging options that were used.
|
||||
const include_header = T &redef;
|
||||
|
||||
## Prefix for the header line if included.
|
||||
|
@ -19,8 +21,9 @@ export {
|
|||
## Separator between set elements.
|
||||
const set_separator = "," &redef;
|
||||
|
||||
## String to use for empty fields.
|
||||
const empty_field = "-" &redef;
|
||||
## String to use for empty fields. This should be different from
|
||||
## *unset_field* to make the output non-ambigious.
|
||||
const empty_field = "(empty)" &redef;
|
||||
|
||||
## String to use for an unset &optional field.
|
||||
const unset_field = "-" &redef;
|
||||
|
|
|
@ -13,11 +13,11 @@
|
|||
module Metrics;
|
||||
|
||||
export {
|
||||
## This value allows a user to decide how large of result groups the
|
||||
## workers should transmit values.
|
||||
## Allows a user to decide how large of result groups the
|
||||
## workers should transmit values for cluster metric aggregation.
|
||||
const cluster_send_in_groups_of = 50 &redef;
|
||||
|
||||
## This is the percent of the full threshold value that needs to be met
|
||||
## The percent of the full threshold value that needs to be met
|
||||
## on a single worker for that worker to send the value to its manager in
|
||||
## order for it to request a global view for that value. There is no
|
||||
## requirement that the manager requests a global view for the index
|
||||
|
@ -25,11 +25,11 @@ export {
|
|||
## recently.
|
||||
const cluster_request_global_view_percent = 0.1 &redef;
|
||||
|
||||
## This event is sent by the manager in a cluster to initiate the
|
||||
## Event sent by the manager in a cluster to initiate the
|
||||
## collection of metrics values for a filter.
|
||||
global cluster_filter_request: event(uid: string, id: ID, filter_name: string);
|
||||
|
||||
## This event is sent by nodes that are collecting metrics after receiving
|
||||
## Event sent by nodes that are collecting metrics after receiving
|
||||
## a request for the metric filter from the manager.
|
||||
global cluster_filter_response: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool);
|
||||
|
||||
|
@ -40,12 +40,12 @@ export {
|
|||
global cluster_index_request: event(uid: string, id: ID, filter_name: string, index: Index);
|
||||
|
||||
## This event is sent by nodes in response to a
|
||||
## :bro:id:`cluster_index_request` event.
|
||||
## :bro:id:`Metrics::cluster_index_request` event.
|
||||
global cluster_index_response: event(uid: string, id: ID, filter_name: string, index: Index, val: count);
|
||||
|
||||
## This is sent by workers to indicate that they crossed the percent of the
|
||||
## current threshold by the percentage defined globally in
|
||||
## :bro:id:`cluster_request_global_view_percent`
|
||||
## :bro:id:`Metrics::cluster_request_global_view_percent`
|
||||
global cluster_index_intermediate_response: event(id: Metrics::ID, filter_name: string, index: Metrics::Index, val: count);
|
||||
|
||||
## This event is scheduled internally on workers to send result chunks.
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
##! This is the implementation of the metrics framework.
|
||||
##! The metrics framework provides a way to count and measure data.
|
||||
|
||||
@load base/frameworks/notice
|
||||
|
||||
module Metrics;
|
||||
|
||||
export {
|
||||
## The metrics logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Identifiers for metrics to collect.
|
||||
type ID: enum {
|
||||
## Blank placeholder value.
|
||||
NOTHING,
|
||||
};
|
||||
|
||||
|
@ -15,10 +18,13 @@ export {
|
|||
## current value to the logging stream.
|
||||
const default_break_interval = 15mins &redef;
|
||||
|
||||
## This is the interval for how often notices will happen after they have
|
||||
## already fired.
|
||||
## This is the interval for how often threshold based notices will happen
|
||||
## after they have already fired.
|
||||
const renotice_interval = 1hr &redef;
|
||||
|
||||
## Represents a thing which is having metrics collected for it. An instance
|
||||
## of this record type and a :bro:type:`Metrics::ID` together represent a
|
||||
## single measurement.
|
||||
type Index: record {
|
||||
## Host is the value to which this metric applies.
|
||||
host: addr &optional;
|
||||
|
@ -37,17 +43,30 @@ export {
|
|||
network: subnet &optional;
|
||||
} &log;
|
||||
|
||||
## The record type that is used for logging metrics.
|
||||
type Info: record {
|
||||
## Timestamp at which the metric was "broken".
|
||||
ts: time &log;
|
||||
## What measurement the metric represents.
|
||||
metric_id: ID &log;
|
||||
## The name of the filter being logged. :bro:type:`Metrics::ID` values
|
||||
## can have multiple filters which represent different perspectives on
|
||||
## the data so this is necessary to understand the value.
|
||||
filter_name: string &log;
|
||||
## What the metric value applies to.
|
||||
index: Index &log;
|
||||
## The simple numeric value of the metric.
|
||||
value: count &log;
|
||||
};
|
||||
|
||||
# TODO: configure a metrics filter logging stream to log the current
|
||||
# TODO: configure a metrics filter logging stream to log the current
|
||||
# metrics configuration in case someone is looking through
|
||||
# old logs and the configuration has changed since then.
|
||||
|
||||
## Filters define how the data from a metric is aggregated and handled.
|
||||
## Filters can be used to set how often the measurements are cut or "broken"
|
||||
## and logged or how the data within them is aggregated. It's also
|
||||
## possible to disable logging and use filters for thresholding.
|
||||
type Filter: record {
|
||||
## The :bro:type:`Metrics::ID` that this filter applies to.
|
||||
id: ID &optional;
|
||||
|
@ -62,7 +81,7 @@ export {
|
|||
aggregation_mask: count &optional;
|
||||
## This is essentially a mapping table between addresses and subnets.
|
||||
aggregation_table: table[subnet] of subnet &optional;
|
||||
## The interval at which the metric should be "broken" and written
|
||||
## The interval at which this filter should be "broken" and written
|
||||
## to the logging stream. The counters are also reset to zero at
|
||||
## this time so any threshold based detection needs to be set to a
|
||||
## number that should be expected to happen within this period.
|
||||
|
@ -79,7 +98,7 @@ export {
|
|||
notice_threshold: count &optional;
|
||||
## A series of thresholds at which to generate notices.
|
||||
notice_thresholds: vector of count &optional;
|
||||
## How often this notice should be raised for this metric index. It
|
||||
## How often this notice should be raised for this filter. It
|
||||
## will be generated everytime it crosses a threshold, but if the
|
||||
## $break_interval is set to 5mins and this is set to 1hr the notice
|
||||
## only be generated once per hour even if something crosses the
|
||||
|
@ -87,15 +106,43 @@ export {
|
|||
notice_freq: interval &optional;
|
||||
};
|
||||
|
||||
## Function to associate a metric filter with a metric ID.
|
||||
##
|
||||
## id: The metric ID that the filter should be associated with.
|
||||
##
|
||||
## filter: The record representing the filter configuration.
|
||||
global add_filter: function(id: ID, filter: Filter);
|
||||
|
||||
## Add data into a :bro:type:`Metrics::ID`. This should be called when
|
||||
## a script has measured some point value and is ready to increment the
|
||||
## counters.
|
||||
##
|
||||
## id: The metric ID that the data represents.
|
||||
##
|
||||
## index: The metric index that the value is to be added to.
|
||||
##
|
||||
## increment: How much to increment the counter by.
|
||||
global add_data: function(id: ID, index: Index, increment: count);
|
||||
|
||||
## Helper function to represent a :bro:type:`Metrics::Index` value as
|
||||
## a simple string
|
||||
##
|
||||
## index: The metric index that is to be converted into a string.
|
||||
##
|
||||
## Returns: A string reprentation of the metric index.
|
||||
global index2str: function(index: Index): string;
|
||||
|
||||
# This is the event that is used to "finish" metrics and adapt the metrics
|
||||
# framework for clustered or non-clustered usage.
|
||||
## Event that is used to "finish" metrics and adapt the metrics
|
||||
## framework for clustered or non-clustered usage.
|
||||
##
|
||||
## ..note: This is primarily intended for internal use.
|
||||
global log_it: event(filter: Filter);
|
||||
|
||||
## Event to access metrics records as they are passed to the logging framework.
|
||||
global log_metrics: event(rec: Info);
|
||||
|
||||
## Type to store a table of metrics values. Interal use only!
|
||||
type MetricTable: table[Index] of count &default=0;
|
||||
}
|
||||
|
||||
redef record Notice::Info += {
|
||||
|
@ -105,7 +152,6 @@ redef record Notice::Info += {
|
|||
global metric_filters: table[ID] of vector of Filter = table();
|
||||
global filter_store: table[ID, string] of Filter = table();
|
||||
|
||||
type MetricTable: table[Index] of count &default=0;
|
||||
# This is indexed by metric ID and stream filter name.
|
||||
global store: table[ID, string] of MetricTable = table() &default=table();
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ export {
|
|||
## Add a helper to the notice policy for looking up GeoIP data.
|
||||
redef Notice::policy += {
|
||||
[$pred(n: Notice::Info) = { return (n$note in Notice::lookup_location_types); },
|
||||
$action = ACTION_ADD_GEODATA,
|
||||
$priority = 10],
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
##! Adds a new notice action type which can be used to email notices
|
||||
##! to the administrators of a particular address space as set by
|
||||
##! :bro:id:`Site::local_admins` if the notice contains a source
|
||||
##! or destination address that lies within their space.
|
||||
|
||||
@load ../main
|
||||
@load base/utils/site
|
||||
|
||||
|
@ -6,8 +11,8 @@ module Notice;
|
|||
export {
|
||||
redef enum Action += {
|
||||
## Indicate that the generated email should be addressed to the
|
||||
## appropriate email addresses as found in the
|
||||
## :bro:id:`Site::addr_to_emails` variable based on the relevant
|
||||
## appropriate email addresses as found by the
|
||||
## :bro:id:`Site::get_emails` function based on the relevant
|
||||
## address or addresses indicated in the notice.
|
||||
ACTION_EMAIL_ADMIN
|
||||
};
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
##! Allows configuration of a pager email address to which notices can be sent.
|
||||
|
||||
@load ../main
|
||||
|
||||
module Notice;
|
||||
|
@ -5,7 +7,7 @@ module Notice;
|
|||
export {
|
||||
redef enum Action += {
|
||||
## Indicates that the notice should be sent to the pager email address
|
||||
## configured in the :bro:id:`mail_page_dest` variable.
|
||||
## configured in the :bro:id:`Notice::mail_page_dest` variable.
|
||||
ACTION_PAGE
|
||||
};
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#! Notice extension that mails out a pretty-printed version of alarm.log
|
||||
#! in regular intervals, formatted for better human readability. If activated,
|
||||
#! that replaces the default summary mail having the raw log output.
|
||||
##! Notice extension that mails out a pretty-printed version of alarm.log
|
||||
##! in regular intervals, formatted for better human readability. If activated,
|
||||
##! that replaces the default summary mail having the raw log output.
|
||||
|
||||
@load base/frameworks/cluster
|
||||
@load ../main
|
||||
|
@ -10,18 +10,21 @@ module Notice;
|
|||
export {
|
||||
## Activate pretty-printed alarm summaries.
|
||||
const pretty_print_alarms = T &redef;
|
||||
|
||||
|
||||
## Address to send the pretty-printed reports to. Default if not set is
|
||||
## :bro:id:`Notice::mail_dest`.
|
||||
const mail_dest_pretty_printed = "" &redef;
|
||||
|
||||
## If an address from one of these networks is reported, we mark
|
||||
## the entry with an addition quote symbol (i.e., ">"). Many MUAs
|
||||
## If an address from one of these networks is reported, we mark
|
||||
## the entry with an additional quote symbol (i.e., ">"). Many MUAs
|
||||
## then highlight such lines differently.
|
||||
global flag_nets: set[subnet] &redef;
|
||||
|
||||
|
||||
## Function that renders a single alarm. Can be overidden.
|
||||
global pretty_print_alarm: function(out: file, n: Info) &redef;
|
||||
|
||||
## Force generating mail file, even if reading from traces or no mail
|
||||
## destination is defined. This is mainly for testing.
|
||||
global force_email_summaries = F &redef;
|
||||
}
|
||||
|
||||
# We maintain an old-style file recording the pretty-printed alarms.
|
||||
|
@ -32,6 +35,9 @@ global pp_alarms_open: bool = F;
|
|||
# Returns True if pretty-printed alarm summaries are activated.
|
||||
function want_pp() : bool
|
||||
{
|
||||
if ( force_email_summaries )
|
||||
return T;
|
||||
|
||||
return (pretty_print_alarms && ! reading_traces()
|
||||
&& (mail_dest != "" || mail_dest_pretty_printed != ""));
|
||||
}
|
||||
|
@ -41,38 +47,49 @@ function pp_open()
|
|||
{
|
||||
if ( pp_alarms_open )
|
||||
return;
|
||||
|
||||
|
||||
pp_alarms_open = T;
|
||||
pp_alarms = open(pp_alarms_name);
|
||||
|
||||
local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed
|
||||
: mail_dest;
|
||||
|
||||
local headers = email_headers("Alarm summary", dest);
|
||||
write_file(pp_alarms, headers + "\n");
|
||||
}
|
||||
|
||||
# Closes and mails out the current output file.
|
||||
function pp_send()
|
||||
function pp_send(rinfo: Log::RotationInfo)
|
||||
{
|
||||
if ( ! pp_alarms_open )
|
||||
return;
|
||||
|
||||
|
||||
write_file(pp_alarms, "\n\n--\n[Automatically generated]\n\n");
|
||||
close(pp_alarms);
|
||||
|
||||
system(fmt("/bin/cat %s | %s -t -oi && /bin/rm %s",
|
||||
pp_alarms_name, sendmail, pp_alarms_name));
|
||||
|
||||
pp_alarms_open = F;
|
||||
|
||||
local from = strftime("%H:%M:%S", rinfo$open);
|
||||
local to = strftime("%H:%M:%S", rinfo$close);
|
||||
local subject = fmt("Alarm summary from %s-%s", from, to);
|
||||
local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed
|
||||
: mail_dest;
|
||||
|
||||
if ( dest == "" )
|
||||
# No mail destination configured, just leave the file alone. This is mainly for
|
||||
# testing.
|
||||
return;
|
||||
|
||||
local headers = email_headers(subject, dest);
|
||||
|
||||
local header_name = pp_alarms_name + ".tmp";
|
||||
local header = open(header_name);
|
||||
write_file(header, headers + "\n");
|
||||
close(header);
|
||||
|
||||
system(fmt("/bin/cat %s %s | %s -t -oi && /bin/rm -f %s %s",
|
||||
header_name, pp_alarms_name, sendmail, header_name, pp_alarms_name));
|
||||
}
|
||||
|
||||
# Postprocessor function that triggers the email.
|
||||
function pp_postprocessor(info: Log::RotationInfo): bool
|
||||
{
|
||||
if ( want_pp() )
|
||||
pp_send();
|
||||
|
||||
pp_send(info);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
|
@ -80,7 +97,7 @@ event bro_init()
|
|||
{
|
||||
if ( ! want_pp() )
|
||||
return;
|
||||
|
||||
|
||||
# This replaces the standard non-pretty-printing filter.
|
||||
Log::add_filter(Notice::ALARM_LOG,
|
||||
[$name="alarm-mail", $writer=Log::WRITER_NONE,
|
||||
|
@ -92,13 +109,13 @@ event notice(n: Notice::Info) &priority=-5
|
|||
{
|
||||
if ( ! want_pp() )
|
||||
return;
|
||||
|
||||
if ( ACTION_LOG !in n$actions )
|
||||
|
||||
if ( ACTION_ALARM !in n$actions )
|
||||
return;
|
||||
|
||||
|
||||
if ( ! pp_alarms_open )
|
||||
pp_open();
|
||||
|
||||
|
||||
pretty_print_alarm(pp_alarms, n);
|
||||
}
|
||||
|
||||
|
@ -108,12 +125,12 @@ function do_msg(out: file, n: Info, line1: string, line2: string, line3: string,
|
|||
@ifdef ( Notice::ACTION_ADD_GEODATA ) # Make tests happy, cyclic dependency.
|
||||
if ( n?$remote_location && n$remote_location?$country_code )
|
||||
country = fmt(" (remote location %s)", n$remote_location$country_code);
|
||||
@endif
|
||||
|
||||
@endif
|
||||
|
||||
line1 = cat(line1, country);
|
||||
|
||||
|
||||
local resolved = "";
|
||||
|
||||
|
||||
if ( host1 != 0.0.0.0 )
|
||||
resolved = fmt("%s # %s = %s", resolved, host1, name1);
|
||||
|
||||
|
@ -133,64 +150,64 @@ function do_msg(out: file, n: Info, line1: string, line2: string, line3: string,
|
|||
function pretty_print_alarm(out: file, n: Info)
|
||||
{
|
||||
local pdescr = "";
|
||||
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
pdescr = "local";
|
||||
|
||||
|
||||
if ( n?$src_peer )
|
||||
pdescr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host);
|
||||
|
||||
pdescr = fmt("<%s> ", pdescr);
|
||||
@endif
|
||||
|
||||
|
||||
local msg = fmt( "%s%s", pdescr, n$msg);
|
||||
|
||||
|
||||
local who = "";
|
||||
local h1 = 0.0.0.0;
|
||||
local h2 = 0.0.0.0;
|
||||
|
||||
|
||||
local orig_p = "";
|
||||
local resp_p = "";
|
||||
|
||||
|
||||
if ( n?$id )
|
||||
{
|
||||
orig_p = fmt(":%s", n$id$orig_p);
|
||||
resp_p = fmt(":%s", n$id$resp_p);
|
||||
h1 = n$id$orig_h;
|
||||
h2 = n$id$resp_h;
|
||||
who = fmt("%s:%s -> %s:%s", h1, n$id$orig_p, h2, n$id$resp_p);
|
||||
}
|
||||
|
||||
if ( n?$src && n?$dst )
|
||||
else if ( n?$src && n?$dst )
|
||||
{
|
||||
h1 = n$src;
|
||||
h2 = n$dst;
|
||||
who = fmt("%s%s -> %s%s", h1, orig_p, h2, resp_p);
|
||||
|
||||
if ( n?$uid )
|
||||
who = fmt("%s (uid %s)", who, n$uid );
|
||||
who = fmt("%s -> %s", h1, h2);
|
||||
}
|
||||
|
||||
else if ( n?$src )
|
||||
{
|
||||
local p = "";
|
||||
|
||||
if ( n?$p )
|
||||
p = fmt(":%s", n$p);
|
||||
|
||||
h1 = n$src;
|
||||
who = fmt("%s%s", h1, p);
|
||||
who = fmt("%s%s", h1, (n?$p ? fmt(":%s", n$p) : ""));
|
||||
}
|
||||
|
||||
|
||||
if ( n?$uid )
|
||||
who = fmt("%s (uid %s)", who, n$uid );
|
||||
|
||||
local flag = (h1 in flag_nets || h2 in flag_nets);
|
||||
|
||||
|
||||
local line1 = fmt(">%s %D %s %s", (flag ? ">" : " "), network_time(), n$note, who);
|
||||
local line2 = fmt(" %s", msg);
|
||||
local line3 = n?$sub ? fmt(" %s", n$sub) : "";
|
||||
|
||||
|
||||
if ( h1 == 0.0.0.0 )
|
||||
{
|
||||
do_msg(out, n, line1, line2, line3, h1, "", h2, "");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if ( reading_traces() )
|
||||
{
|
||||
do_msg(out, n, line1, line2, line3, h1, "<skipped>", h2, "<skipped>");
|
||||
return;
|
||||
}
|
||||
|
||||
when ( local h1name = lookup_addr(h1) )
|
||||
{
|
||||
if ( h2 == 0.0.0.0 )
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
##! Implements notice functionality across clusters.
|
||||
##! Implements notice functionality across clusters. Worker nodes
|
||||
##! will disable notice/alarm logging streams and forward notice
|
||||
##! events to the manager node for logging/processing.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/cluster
|
||||
|
@ -7,10 +9,15 @@ module Notice;
|
|||
|
||||
export {
|
||||
## This is the event used to transport notices on the cluster.
|
||||
##
|
||||
## n: The notice information to be sent to the cluster manager for
|
||||
## further processing.
|
||||
global cluster_notice: event(n: Notice::Info);
|
||||
}
|
||||
|
||||
## Manager can communicate notice suppression to workers.
|
||||
redef Cluster::manager2worker_events += /Notice::begin_suppression/;
|
||||
## Workers needs need ability to forward notices to manager.
|
||||
redef Cluster::worker2manager_events += /Notice::cluster_notice/;
|
||||
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
|
|
|
@ -1,32 +1,52 @@
|
|||
##! Loading this script extends the :bro:enum:`Notice::ACTION_EMAIL` action
|
||||
##! by appending to the email the hostnames associated with
|
||||
##! :bro:type:`Notice::Info`'s *src* and *dst* fields as determined by a
|
||||
##! DNS lookup.
|
||||
|
||||
@load ../main
|
||||
|
||||
module Notice;
|
||||
|
||||
# This probably doesn't actually work due to the async lookup_addr.
|
||||
# We have to store references to the notices here because the when statement
|
||||
# clones the frame which doesn't give us access to modify values outside
|
||||
# of it's execution scope. (we get a clone of the notice instead of a
|
||||
# reference to the original notice)
|
||||
global tmp_notice_storage: table[string] of Notice::Info &create_expire=max_email_delay+10secs;
|
||||
|
||||
event Notice::notice(n: Notice::Info) &priority=10
|
||||
{
|
||||
if ( ! n?$src && ! n?$dst )
|
||||
return;
|
||||
|
||||
|
||||
# This should only be done for notices that are being sent to email.
|
||||
if ( ACTION_EMAIL !in n$actions )
|
||||
return;
|
||||
|
||||
|
||||
# I'm not recovering gracefully from the when statements because I want
|
||||
# the notice framework to detect that something has exceeded the maximum
|
||||
# allowed email delay and tell the user.
|
||||
local uid = unique_id("");
|
||||
tmp_notice_storage[uid] = n;
|
||||
|
||||
local output = "";
|
||||
if ( n?$src )
|
||||
{
|
||||
add n$email_delay_tokens["hostnames-src"];
|
||||
when ( local src_name = lookup_addr(n$src) )
|
||||
{
|
||||
output = string_cat("orig_h/src hostname: ", src_name, "\n");
|
||||
n$email_body_sections[|n$email_body_sections|] = output;
|
||||
output = string_cat("orig/src hostname: ", src_name, "\n");
|
||||
tmp_notice_storage[uid]$email_body_sections[|tmp_notice_storage[uid]$email_body_sections|] = output;
|
||||
delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-src"];
|
||||
}
|
||||
}
|
||||
if ( n?$dst )
|
||||
{
|
||||
add n$email_delay_tokens["hostnames-dst"];
|
||||
when ( local dst_name = lookup_addr(n$dst) )
|
||||
{
|
||||
output = string_cat("resp_h/dst hostname: ", dst_name, "\n");
|
||||
n$email_body_sections[|n$email_body_sections|] = output;
|
||||
output = string_cat("resp/dst hostname: ", dst_name, "\n");
|
||||
tmp_notice_storage[uid]$email_body_sections[|tmp_notice_storage[uid]$email_body_sections|] = output;
|
||||
delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-dst"];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,15 +2,14 @@
|
|||
##! are odd or potentially bad. Decisions of the meaning of various notices
|
||||
##! need to be done per site because Bro does not ship with assumptions about
|
||||
##! what is bad activity for sites. More extensive documetation about using
|
||||
##! the notice framework can be found in the documentation section of the
|
||||
##! http://www.bro-ids.org/ website.
|
||||
##! the notice framework can be found in :doc:`/notice`.
|
||||
|
||||
module Notice;
|
||||
|
||||
export {
|
||||
redef enum Log::ID += {
|
||||
redef enum Log::ID += {
|
||||
## This is the primary logging stream for notices.
|
||||
LOG,
|
||||
LOG,
|
||||
## This is the notice policy auditing log. It records what the current
|
||||
## notice policy is at Bro init time.
|
||||
POLICY_LOG,
|
||||
|
@ -18,25 +17,25 @@ export {
|
|||
ALARM_LOG,
|
||||
};
|
||||
|
||||
## Scripts creating new notices need to redef this enum to add their own
|
||||
## Scripts creating new notices need to redef this enum to add their own
|
||||
## specific notice types which would then get used when they call the
|
||||
## :bro:id:`NOTICE` function. The convention is to give a general category
|
||||
## along with the specific notice separating words with underscores and using
|
||||
## leading capitals on each word except for abbreviations which are kept in
|
||||
## all capitals. For example, SSH::Login is for heuristically guessed
|
||||
## successful SSH logins.
|
||||
## along with the specific notice separating words with underscores and
|
||||
## using leading capitals on each word except for abbreviations which are
|
||||
## kept in all capitals. For example, SSH::Login is for heuristically
|
||||
## guessed successful SSH logins.
|
||||
type Type: enum {
|
||||
## Notice reporting a count of how often a notice occurred.
|
||||
Tally,
|
||||
};
|
||||
|
||||
|
||||
## These are values representing actions that can be taken with notices.
|
||||
type Action: enum {
|
||||
## Indicates that there is no action to be taken.
|
||||
ACTION_NONE,
|
||||
## Indicates that the notice should be sent to the notice logging stream.
|
||||
ACTION_LOG,
|
||||
## Indicates that the notice should be sent to the email address(es)
|
||||
## Indicates that the notice should be sent to the email address(es)
|
||||
## configured in the :bro:id:`Notice::mail_dest` variable.
|
||||
ACTION_EMAIL,
|
||||
## Indicates that the notice should be alarmed. A readable ASCII
|
||||
|
@ -47,30 +46,45 @@ export {
|
|||
## duplicate notice suppression that the notice framework does.
|
||||
ACTION_NO_SUPPRESS,
|
||||
};
|
||||
|
||||
## The notice framework is able to do automatic notice supression by
|
||||
## utilizing the $identifier field in :bro:type:`Info` records.
|
||||
|
||||
## The notice framework is able to do automatic notice supression by
|
||||
## utilizing the $identifier field in :bro:type:`Notice::Info` records.
|
||||
## Set this to "0secs" to completely disable automated notice suppression.
|
||||
const default_suppression_interval = 1hrs &redef;
|
||||
|
||||
|
||||
type Info: record {
|
||||
## An absolute time indicating when the notice occurred, defaults
|
||||
## to the current network time.
|
||||
ts: time &log &optional;
|
||||
|
||||
## A connection UID which uniquely identifies the endpoints
|
||||
## concerned with the notice.
|
||||
uid: string &log &optional;
|
||||
|
||||
## A connection 4-tuple identifying the endpoints concerned with the
|
||||
## notice.
|
||||
id: conn_id &log &optional;
|
||||
|
||||
## These are shorthand ways of giving the uid and id to a notice. The
|
||||
## A shorthand way of giving the uid and id to a notice. The
|
||||
## reference to the actual connection will be deleted after applying
|
||||
## the notice policy.
|
||||
conn: connection &optional;
|
||||
## A shorthand way of giving the uid and id to a notice. The
|
||||
## reference to the actual connection will be deleted after applying
|
||||
## the notice policy.
|
||||
iconn: icmp_conn &optional;
|
||||
|
||||
## The :bro:enum:`Notice::Type` of the notice.
|
||||
|
||||
## The transport protocol. Filled automatically when either conn, iconn
|
||||
## or p is specified.
|
||||
proto: transport_proto &log &optional;
|
||||
|
||||
## The :bro:type:`Notice::Type` of the notice.
|
||||
note: Type &log;
|
||||
## The human readable message for the notice.
|
||||
msg: string &log &optional;
|
||||
## The human readable sub-message.
|
||||
sub: string &log &optional;
|
||||
|
||||
|
||||
## Source address, if we don't have a :bro:type:`conn_id`.
|
||||
src: addr &log &optional;
|
||||
## Destination address.
|
||||
|
@ -79,33 +93,39 @@ export {
|
|||
p: port &log &optional;
|
||||
## Associated count, or perhaps a status code.
|
||||
n: count &log &optional;
|
||||
|
||||
|
||||
## Peer that raised this notice.
|
||||
src_peer: event_peer &optional;
|
||||
## Textual description for the peer that raised this notice.
|
||||
peer_descr: string &log &optional;
|
||||
|
||||
|
||||
## The actions which have been applied to this notice.
|
||||
actions: set[Notice::Action] &log &optional;
|
||||
|
||||
|
||||
## These are policy items that returned T and applied their action
|
||||
## to the notice.
|
||||
policy_items: set[count] &log &optional;
|
||||
|
||||
|
||||
## By adding chunks of text into this element, other scripts can
|
||||
## expand on notices that are being emailed. The normal way to add text
|
||||
## is to extend the vector by handling the :bro:id:`Notice::notice`
|
||||
## event and modifying the notice in place.
|
||||
email_body_sections: vector of string &default=vector();
|
||||
|
||||
email_body_sections: vector of string &optional;
|
||||
|
||||
## Adding a string "token" to this set will cause the notice framework's
|
||||
## built-in emailing functionality to delay sending the email until
|
||||
## either the token has been removed or the email has been delayed
|
||||
## for :bro:id:`Notice::max_email_delay`.
|
||||
email_delay_tokens: set[string] &optional;
|
||||
|
||||
## This field is to be provided when a notice is generated for the
|
||||
## purpose of deduplicating notices. The identifier string should
|
||||
## be unique for a single instance of the notice. This field should be
|
||||
## filled out in almost all cases when generating notices to define
|
||||
## be unique for a single instance of the notice. This field should be
|
||||
## filled out in almost all cases when generating notices to define
|
||||
## when a notice is conceptually a duplicate of a previous notice.
|
||||
##
|
||||
## For example, an SSL certificate that is going to expire soon should
|
||||
## always have the same identifier no matter the client IP address
|
||||
##
|
||||
## For example, an SSL certificate that is going to expire soon should
|
||||
## always have the same identifier no matter the client IP address
|
||||
## that connected and resulted in the certificate being exposed. In
|
||||
## this case, the resp_h, resp_p, and hash of the certificate would be
|
||||
## used to create this value. The hash of the cert is included
|
||||
|
@ -114,19 +134,19 @@ export {
|
|||
## Another example might be a host downloading a file which triggered
|
||||
## a notice because the MD5 sum of the file it downloaded was known
|
||||
## by some set of intelligence. In that case, the orig_h (client)
|
||||
## and MD5 sum would be used in this field to dedup because if the
|
||||
## and MD5 sum would be used in this field to dedup because if the
|
||||
## same file is downloaded over and over again you really only want to
|
||||
## know about it a single time. This makes it possible to send those
|
||||
## notices to email without worrying so much about sending thousands
|
||||
## of emails.
|
||||
identifier: string &optional;
|
||||
|
||||
|
||||
## This field indicates the length of time that this
|
||||
## unique notice should be suppressed. This field is automatically
|
||||
## unique notice should be suppressed. This field is automatically
|
||||
## filled out and should not be written to by any other script.
|
||||
suppress_for: interval &log &optional;
|
||||
};
|
||||
|
||||
|
||||
## Ignored notice types.
|
||||
const ignored_types: set[Notice::Type] = {} &redef;
|
||||
## Emailed notice types.
|
||||
|
@ -135,27 +155,28 @@ export {
|
|||
const alarmed_types: set[Notice::Type] = {} &redef;
|
||||
## Types that should be suppressed for the default suppression interval.
|
||||
const not_suppressed_types: set[Notice::Type] = {} &redef;
|
||||
## This table can be used as a shorthand way to modify suppression
|
||||
## This table can be used as a shorthand way to modify suppression
|
||||
## intervals for entire notice types.
|
||||
const type_suppression_intervals: table[Notice::Type] of interval = {} &redef;
|
||||
|
||||
|
||||
## This is the record that defines the items that make up the notice policy.
|
||||
type PolicyItem: record {
|
||||
## This is the exact positional order in which the :bro:type:`PolicyItem`
|
||||
## records are checked. This is set internally by the notice framework.
|
||||
## This is the exact positional order in which the
|
||||
## :bro:type:`Notice::PolicyItem` records are checked.
|
||||
## This is set internally by the notice framework.
|
||||
position: count &log &optional;
|
||||
## Define the priority for this check. Items are checked in ordered
|
||||
## from highest value (10) to lowest value (0).
|
||||
priority: count &log &default=5;
|
||||
## An action given to the notice if the predicate return true.
|
||||
action: Notice::Action &log &default=ACTION_NONE;
|
||||
## The pred (predicate) field is a function that returns a boolean T
|
||||
## or F value. If the predicate function return true, the action in
|
||||
## this record is applied to the notice that is given as an argument
|
||||
## to the predicate function. If no predicate is supplied, it's
|
||||
## The pred (predicate) field is a function that returns a boolean T
|
||||
## or F value. If the predicate function return true, the action in
|
||||
## this record is applied to the notice that is given as an argument
|
||||
## to the predicate function. If no predicate is supplied, it's
|
||||
## assumed that the PolicyItem always applies.
|
||||
pred: function(n: Notice::Info): bool &log &optional;
|
||||
## Indicates this item should terminate policy processing if the
|
||||
## Indicates this item should terminate policy processing if the
|
||||
## predicate returns T.
|
||||
halt: bool &log &default=F;
|
||||
## This defines the length of time that this particular notice should
|
||||
|
@ -163,8 +184,8 @@ export {
|
|||
suppress_for: interval &log &optional;
|
||||
};
|
||||
|
||||
## This is the where the :bro:id:`Notice::policy` is defined. All notice
|
||||
## processing is done through this variable.
|
||||
## Defines a notice policy that is extensible on a per-site basis.
|
||||
## All notice processing is done through this variable.
|
||||
const policy: set[PolicyItem] = {
|
||||
[$pred(n: Notice::Info) = { return (n$note in Notice::ignored_types); },
|
||||
$halt=T, $priority = 9],
|
||||
|
@ -177,84 +198,118 @@ export {
|
|||
[$pred(n: Notice::Info) = { return (n$note in Notice::emailed_types); },
|
||||
$action = ACTION_EMAIL,
|
||||
$priority = 8],
|
||||
[$pred(n: Notice::Info) = {
|
||||
if (n$note in Notice::type_suppression_intervals)
|
||||
[$pred(n: Notice::Info) = {
|
||||
if (n$note in Notice::type_suppression_intervals)
|
||||
{
|
||||
n$suppress_for=Notice::type_suppression_intervals[n$note];
|
||||
return T;
|
||||
}
|
||||
return F;
|
||||
return F;
|
||||
},
|
||||
$action = ACTION_NONE,
|
||||
$priority = 8],
|
||||
[$action = ACTION_LOG,
|
||||
$priority = 0],
|
||||
} &redef;
|
||||
|
||||
|
||||
## Local system sendmail program.
|
||||
const sendmail = "/usr/sbin/sendmail" &redef;
|
||||
## Email address to send notices with the :bro:enum:`ACTION_EMAIL` action
|
||||
## or to send bulk alarm logs on rotation with :bro:enum:`ACTION_ALARM`.
|
||||
## Email address to send notices with the :bro:enum:`Notice::ACTION_EMAIL`
|
||||
## action or to send bulk alarm logs on rotation with
|
||||
## :bro:enum:`Notice::ACTION_ALARM`.
|
||||
const mail_dest = "" &redef;
|
||||
|
||||
|
||||
## Address that emails will be from.
|
||||
const mail_from = "Big Brother <bro@localhost>" &redef;
|
||||
## Reply-to address used in outbound email.
|
||||
const reply_to = "" &redef;
|
||||
## Text string prefixed to the subject of all emails sent out.
|
||||
const mail_subject_prefix = "[Bro]" &redef;
|
||||
## The maximum amount of time a plugin can delay email from being sent.
|
||||
const max_email_delay = 15secs &redef;
|
||||
|
||||
## A log postprocessing function that implements emailing the contents
|
||||
## of a log upon rotation to any configured :bro:id:`Notice::mail_dest`.
|
||||
## The rotated log is removed upon being sent.
|
||||
##
|
||||
## info: A record containing the rotated log file information.
|
||||
##
|
||||
## Returns: True.
|
||||
global log_mailing_postprocessor: function(info: Log::RotationInfo): bool;
|
||||
|
||||
## This is the event that is called as the entry point to the
|
||||
## notice framework by the global :bro:id:`NOTICE` function. By the time
|
||||
## This is the event that is called as the entry point to the
|
||||
## notice framework by the global :bro:id:`NOTICE` function. By the time
|
||||
## this event is generated, default values have already been filled out in
|
||||
## the :bro:type:`Notice::Info` record and synchronous functions in the
|
||||
## :bro:id:`Notice:sync_functions` have already been called. The notice
|
||||
## :bro:id:`Notice::sync_functions` have already been called. The notice
|
||||
## policy has also been applied.
|
||||
##
|
||||
## n: The record containing notice data.
|
||||
global notice: event(n: Info);
|
||||
|
||||
## This is a set of functions that provide a synchronous way for scripts
|
||||
## This is a set of functions that provide a synchronous way for scripts
|
||||
## extending the notice framework to run before the normal event based
|
||||
## notice pathway that most of the notice framework takes. This is helpful
|
||||
## in cases where an action against a notice needs to happen immediately
|
||||
## and can't wait the short time for the event to bubble up to the top of
|
||||
## the event queue. An example is the IP address dropping script that
|
||||
## can block IP addresses that have notices generated because it
|
||||
## the event queue. An example is the IP address dropping script that
|
||||
## can block IP addresses that have notices generated because it
|
||||
## needs to operate closer to real time than the event queue allows it to.
|
||||
## Normally the event based extension model using the
|
||||
## Normally the event based extension model using the
|
||||
## :bro:id:`Notice::notice` event will work fine if there aren't harder
|
||||
## real time constraints.
|
||||
const sync_functions: set[function(n: Notice::Info)] = set() &redef;
|
||||
|
||||
|
||||
## This event is generated when a notice begins to be suppressed.
|
||||
##
|
||||
## n: The record containing notice data regarding the notice type
|
||||
## about to be suppressed.
|
||||
global begin_suppression: event(n: Notice::Info);
|
||||
|
||||
## This event is generated on each occurence of an event being suppressed.
|
||||
##
|
||||
## n: The record containing notice data regarding the notice type
|
||||
## being suppressed.
|
||||
global suppressed: event(n: Notice::Info);
|
||||
|
||||
## This event is generated when a notice stops being suppressed.
|
||||
##
|
||||
## n: The record containing notice data regarding the notice type
|
||||
## that was being suppressed.
|
||||
global end_suppression: event(n: Notice::Info);
|
||||
|
||||
|
||||
## Call this function to send a notice in an email. It is already used
|
||||
## by default with the built in :bro:enum:`ACTION_EMAIL` and
|
||||
## :bro:enum:`ACTION_PAGE` actions.
|
||||
## by default with the built in :bro:enum:`Notice::ACTION_EMAIL` and
|
||||
## :bro:enum:`Notice::ACTION_PAGE` actions.
|
||||
##
|
||||
## n: The record of notice data to email.
|
||||
##
|
||||
## dest: The intended recipient of the notice email.
|
||||
##
|
||||
## extend: Whether to extend the email using the ``email_body_sections``
|
||||
## field of *n*.
|
||||
global email_notice_to: function(n: Info, dest: string, extend: bool);
|
||||
|
||||
## Constructs mail headers to which an email body can be appended for
|
||||
## sending with sendmail.
|
||||
##
|
||||
## subject_desc: a subject string to use for the mail
|
||||
##
|
||||
## dest: recipient string to use for the mail
|
||||
##
|
||||
## Returns: a string of mail headers to which an email body can be appended
|
||||
global email_headers: function(subject_desc: string, dest: string): string;
|
||||
|
||||
## This event can be handled to access the :bro:type:`Info`
|
||||
## This event can be handled to access the :bro:type:`Notice::Info`
|
||||
## record as it is sent on to the logging framework.
|
||||
##
|
||||
## rec: The record containing notice data before it is logged.
|
||||
global log_notice: event(rec: Info);
|
||||
|
||||
## This is an internal wrapper for the global NOTICE function. Please
|
||||
## This is an internal wrapper for the global :bro:id:`NOTICE` function;
|
||||
## disregard.
|
||||
##
|
||||
## n: The record of notice data.
|
||||
global internal_NOTICE: function(n: Notice::Info);
|
||||
}
|
||||
|
||||
|
@ -264,22 +319,22 @@ function per_notice_suppression_interval(t: table[Notice::Type, string] of Notic
|
|||
local n: Notice::Type;
|
||||
local s: string;
|
||||
[n,s] = idx;
|
||||
|
||||
|
||||
local suppress_time = t[n,s]$suppress_for - (network_time() - t[n,s]$ts);
|
||||
if ( suppress_time < 0secs )
|
||||
suppress_time = 0secs;
|
||||
|
||||
|
||||
# If there is no more suppression time left, the notice needs to be sent
|
||||
# to the end_suppression event.
|
||||
if ( suppress_time == 0secs )
|
||||
event Notice::end_suppression(t[n,s]);
|
||||
|
||||
|
||||
return suppress_time;
|
||||
}
|
||||
|
||||
# This is the internally maintained notice suppression table. It's
|
||||
# This is the internally maintained notice suppression table. It's
|
||||
# indexed on the Notice::Type and the $identifier field from the notice.
|
||||
global suppressing: table[Type, string] of Notice::Info = {}
|
||||
global suppressing: table[Type, string] of Notice::Info = {}
|
||||
&create_expire=0secs
|
||||
&expire_func=per_notice_suppression_interval;
|
||||
|
||||
|
@ -306,7 +361,7 @@ function log_mailing_postprocessor(info: Log::RotationInfo): bool
|
|||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice]);
|
||||
|
||||
|
||||
Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info]);
|
||||
# If Bro is configured for mailing notices, set up mailing for alarms.
|
||||
# Make sure that this alarm log is also output as text so that it can
|
||||
|
@ -347,25 +402,49 @@ function email_headers(subject_desc: string, dest: string): string
|
|||
return header_text;
|
||||
}
|
||||
|
||||
event delay_sending_email(n: Notice::Info, dest: string, extend: bool)
|
||||
{
|
||||
email_notice_to(n, dest, extend);
|
||||
}
|
||||
|
||||
function email_notice_to(n: Notice::Info, dest: string, extend: bool)
|
||||
{
|
||||
if ( reading_traces() || dest == "" )
|
||||
return;
|
||||
|
||||
|
||||
if ( extend )
|
||||
{
|
||||
if ( |n$email_delay_tokens| > 0 )
|
||||
{
|
||||
# If we still are within the max_email_delay, keep delaying.
|
||||
if ( n$ts + max_email_delay > network_time() )
|
||||
{
|
||||
schedule 1sec { delay_sending_email(n, dest, extend) };
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
event reporter_info(network_time(),
|
||||
fmt("Notice email delay tokens weren't released in time (%s).", n$email_delay_tokens),
|
||||
"");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
local email_text = email_headers(fmt("%s", n$note), dest);
|
||||
|
||||
|
||||
# First off, finish the headers and include the human readable messages
|
||||
# then leave a blank line after the message.
|
||||
email_text = string_cat(email_text, "\nMessage: ", n$msg);
|
||||
if ( n?$sub )
|
||||
email_text = string_cat(email_text, "\nSub-message: ", n$sub);
|
||||
|
||||
|
||||
email_text = string_cat(email_text, "\n\n");
|
||||
|
||||
|
||||
# Next, add information about the connection if it exists.
|
||||
if ( n?$id )
|
||||
{
|
||||
email_text = string_cat(email_text, "Connection: ",
|
||||
email_text = string_cat(email_text, "Connection: ",
|
||||
fmt("%s", n$id$orig_h), ":", fmt("%d", n$id$orig_p), " -> ",
|
||||
fmt("%s", n$id$resp_h), ":", fmt("%d", n$id$resp_p), "\n");
|
||||
if ( n?$uid )
|
||||
|
@ -373,17 +452,18 @@ function email_notice_to(n: Notice::Info, dest: string, extend: bool)
|
|||
}
|
||||
else if ( n?$src )
|
||||
email_text = string_cat(email_text, "Address: ", fmt("%s", n$src), "\n");
|
||||
|
||||
|
||||
# Add the extended information if it's requested.
|
||||
if ( extend )
|
||||
{
|
||||
email_text = string_cat(email_text, "\nEmail Extensions\n");
|
||||
email_text = string_cat(email_text, "----------------\n");
|
||||
for ( i in n$email_body_sections )
|
||||
{
|
||||
email_text = string_cat(email_text, "******************\n");
|
||||
email_text = string_cat(email_text, n$email_body_sections[i], "\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
email_text = string_cat(email_text, "\n\n--\n[Automatically generated]\n\n");
|
||||
piped_exec(fmt("%s -t -oi", sendmail), email_text);
|
||||
}
|
||||
|
@ -396,10 +476,10 @@ event notice(n: Notice::Info) &priority=-5
|
|||
Log::write(Notice::LOG, n);
|
||||
if ( ACTION_ALARM in n$actions )
|
||||
Log::write(Notice::ALARM_LOG, n);
|
||||
|
||||
|
||||
# Normally suppress further notices like this one unless directed not to.
|
||||
# n$identifier *must* be specified for suppression to function at all.
|
||||
if ( n?$identifier &&
|
||||
if ( n?$identifier &&
|
||||
ACTION_NO_SUPPRESS !in n$actions &&
|
||||
[n$note, n$identifier] !in suppressing &&
|
||||
n$suppress_for != 0secs )
|
||||
|
@ -410,7 +490,8 @@ event notice(n: Notice::Info) &priority=-5
|
|||
}
|
||||
|
||||
## This determines if a notice is being suppressed. It is only used
|
||||
## internally as part of the mechanics for the global NOTICE function.
|
||||
## internally as part of the mechanics for the global :bro:id:`NOTICE`
|
||||
## function.
|
||||
function is_being_suppressed(n: Notice::Info): bool
|
||||
{
|
||||
if ( n?$identifier && [n$note, n$identifier] in suppressing )
|
||||
|
@ -421,7 +502,7 @@ function is_being_suppressed(n: Notice::Info): bool
|
|||
else
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
# Executes a script with all of the notice fields put into the
|
||||
# new process' environment as "BRO_ARG_<field>" variables.
|
||||
function execute_with_notice(cmd: string, n: Notice::Info)
|
||||
|
@ -430,9 +511,9 @@ function execute_with_notice(cmd: string, n: Notice::Info)
|
|||
#local tgs = tags(n);
|
||||
#system_env(cmd, tags);
|
||||
}
|
||||
|
||||
# This is run synchronously as a function before all of the other
|
||||
# notice related functions and events. It also modifies the
|
||||
|
||||
# This is run synchronously as a function before all of the other
|
||||
# notice related functions and events. It also modifies the
|
||||
# :bro:type:`Notice::Info` record in place.
|
||||
function apply_policy(n: Notice::Info)
|
||||
{
|
||||
|
@ -447,7 +528,7 @@ function apply_policy(n: Notice::Info)
|
|||
if ( ! n?$uid )
|
||||
n$uid = n$conn$uid;
|
||||
}
|
||||
|
||||
|
||||
if ( n?$id )
|
||||
{
|
||||
if ( ! n?$src )
|
||||
|
@ -458,8 +539,12 @@ function apply_policy(n: Notice::Info)
|
|||
n$p = n$id$resp_p;
|
||||
}
|
||||
|
||||
if ( n?$p )
|
||||
n$proto = get_port_transport_proto(n$p);
|
||||
|
||||
if ( n?$iconn )
|
||||
{
|
||||
n$proto = icmp;
|
||||
if ( ! n?$src )
|
||||
n$src = n$iconn$orig_h;
|
||||
if ( ! n?$dst )
|
||||
|
@ -469,15 +554,20 @@ function apply_policy(n: Notice::Info)
|
|||
if ( ! n?$src_peer )
|
||||
n$src_peer = get_event_peer();
|
||||
if ( ! n?$peer_descr )
|
||||
n$peer_descr = n$src_peer?$descr ?
|
||||
n$peer_descr = n$src_peer?$descr ?
|
||||
n$src_peer$descr : fmt("%s", n$src_peer$host);
|
||||
|
||||
|
||||
if ( ! n?$actions )
|
||||
n$actions = set();
|
||||
|
||||
|
||||
if ( ! n?$email_body_sections )
|
||||
n$email_body_sections = vector();
|
||||
if ( ! n?$email_delay_tokens )
|
||||
n$email_delay_tokens = set();
|
||||
|
||||
if ( ! n?$policy_items )
|
||||
n$policy_items = set();
|
||||
|
||||
|
||||
for ( i in ordered_policy )
|
||||
{
|
||||
# If there's no predicate or the predicate returns F.
|
||||
|
@ -485,51 +575,51 @@ function apply_policy(n: Notice::Info)
|
|||
{
|
||||
add n$actions[ordered_policy[i]$action];
|
||||
add n$policy_items[int_to_count(i)];
|
||||
|
||||
# If the predicate matched and there was a suppression interval,
|
||||
|
||||
# If the predicate matched and there was a suppression interval,
|
||||
# apply it to the notice now.
|
||||
if ( ordered_policy[i]?$suppress_for )
|
||||
n$suppress_for = ordered_policy[i]$suppress_for;
|
||||
|
||||
|
||||
# If the policy item wants to halt policy processing, do it now!
|
||||
if ( ordered_policy[i]$halt )
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Apply the suppression time after applying the policy so that policy
|
||||
# items can give custom suppression intervals. If there is no
|
||||
# items can give custom suppression intervals. If there is no
|
||||
# suppression interval given yet, the default is applied.
|
||||
if ( ! n?$suppress_for )
|
||||
n$suppress_for = default_suppression_interval;
|
||||
|
||||
|
||||
# Delete the connection record if it's there so we aren't sending that
|
||||
# to remote machines. It can cause problems due to the size of the
|
||||
# to remote machines. It can cause problems due to the size of the
|
||||
# connection record.
|
||||
if ( n?$conn )
|
||||
delete n$conn;
|
||||
if ( n?$iconn )
|
||||
delete n$iconn;
|
||||
}
|
||||
|
||||
# Create the ordered notice policy automatically which will be used at runtime
|
||||
|
||||
# Create the ordered notice policy automatically which will be used at runtime
|
||||
# for prioritized matching of the notice policy.
|
||||
event bro_init() &priority=10
|
||||
{
|
||||
# Create the policy log here because it's only written to in this handler.
|
||||
Log::create_stream(Notice::POLICY_LOG, [$columns=PolicyItem]);
|
||||
|
||||
|
||||
local tmp: table[count] of set[PolicyItem] = table();
|
||||
for ( pi in policy )
|
||||
{
|
||||
if ( pi$priority < 0 || pi$priority > 10 )
|
||||
Reporter::fatal("All Notice::PolicyItem priorities must be within 0 and 10");
|
||||
|
||||
|
||||
if ( pi$priority !in tmp )
|
||||
tmp[pi$priority] = set();
|
||||
add tmp[pi$priority][pi];
|
||||
}
|
||||
|
||||
|
||||
local rev_count = vector(10,9,8,7,6,5,4,3,2,1,0);
|
||||
for ( i in rev_count )
|
||||
{
|
||||
|
@ -545,7 +635,7 @@ event bro_init() &priority=10
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function internal_NOTICE(n: Notice::Info)
|
||||
{
|
||||
# Suppress this notice if necessary.
|
||||
|
|
|
@ -1,3 +1,12 @@
|
|||
##! This script provides a default set of actions to take for "weird activity"
|
||||
##! events generated from Bro's event engine. Weird activity is defined as
|
||||
##! unusual or exceptional activity that can indicate malformed connections,
|
||||
##! traffic that doesn't conform to a particular protocol, malfunctioning
|
||||
##! or misconfigured hardware, or even an attacker attempting to avoid/confuse
|
||||
##! a sensor. Without context, it's hard to judge whether a particular
|
||||
##! category of weird activity is interesting, but this script provides
|
||||
##! a starting point for the user.
|
||||
|
||||
@load base/utils/conn-ids
|
||||
@load base/utils/site
|
||||
@load ./main
|
||||
|
@ -5,6 +14,7 @@
|
|||
module Weird;
|
||||
|
||||
export {
|
||||
## The weird logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
redef enum Notice::Type += {
|
||||
|
@ -12,6 +22,7 @@ export {
|
|||
Activity,
|
||||
};
|
||||
|
||||
## The record type which contains the column fields of the weird log.
|
||||
type Info: record {
|
||||
## The time when the weird occurred.
|
||||
ts: time &log;
|
||||
|
@ -32,19 +43,32 @@ export {
|
|||
peer: string &log &optional;
|
||||
};
|
||||
|
||||
## Types of actions that may be taken when handling weird activity events.
|
||||
type Action: enum {
|
||||
## A dummy action indicating the user does not care what internal
|
||||
## decision is made regarding a given type of weird.
|
||||
ACTION_UNSPECIFIED,
|
||||
## No action is to be taken.
|
||||
ACTION_IGNORE,
|
||||
## Log the weird event every time it occurs.
|
||||
ACTION_LOG,
|
||||
## Log the weird event only once.
|
||||
ACTION_LOG_ONCE,
|
||||
## Log the weird event once per connection.
|
||||
ACTION_LOG_PER_CONN,
|
||||
## Log the weird event once per originator host.
|
||||
ACTION_LOG_PER_ORIG,
|
||||
## Always generate a notice associated with the weird event.
|
||||
ACTION_NOTICE,
|
||||
## Generate a notice associated with the weird event only once.
|
||||
ACTION_NOTICE_ONCE,
|
||||
## Generate a notice for the weird event once per connection.
|
||||
ACTION_NOTICE_PER_CONN,
|
||||
## Generate a notice for the weird event once per originator host.
|
||||
ACTION_NOTICE_PER_ORIG,
|
||||
};
|
||||
|
||||
## A table specifying default/recommended actions per weird type.
|
||||
const actions: table[string] of Action = {
|
||||
["unsolicited_SYN_response"] = ACTION_IGNORE,
|
||||
["above_hole_data_without_any_acks"] = ACTION_LOG,
|
||||
|
@ -201,7 +225,7 @@ export {
|
|||
["fragment_overlap"] = ACTION_LOG_PER_ORIG,
|
||||
["fragment_protocol_inconsistency"] = ACTION_LOG,
|
||||
["fragment_size_inconsistency"] = ACTION_LOG_PER_ORIG,
|
||||
## These do indeed happen!
|
||||
# These do indeed happen!
|
||||
["fragment_with_DF"] = ACTION_LOG,
|
||||
["incompletely_captured_fragment"] = ACTION_LOG,
|
||||
["bad_IP_checksum"] = ACTION_LOG_PER_ORIG,
|
||||
|
@ -215,8 +239,8 @@ export {
|
|||
## and weird name into this set.
|
||||
const ignore_hosts: set[addr, string] &redef;
|
||||
|
||||
# But don't ignore these (for the weird file), it's handy keeping
|
||||
# track of clustered checksum errors.
|
||||
## Don't ignore repeats for weirds in this set. For example,
|
||||
## it's handy keeping track of clustered checksum errors.
|
||||
const weird_do_not_ignore_repeats = {
|
||||
"bad_IP_checksum", "bad_TCP_checksum", "bad_UDP_checksum",
|
||||
"bad_ICMP_checksum",
|
||||
|
@ -236,7 +260,11 @@ export {
|
|||
## A state set which tracks unique weirds solely by the name to reduce
|
||||
## duplicate notices from being raised.
|
||||
global did_notice: set[string, string] &create_expire=1day &redef;
|
||||
|
||||
|
||||
## Handlers of this event are invoked one per write to the weird
|
||||
## logging stream before the data is actually written.
|
||||
##
|
||||
## rec: The weird columns about to be logged to the weird stream.
|
||||
global log_weird: event(rec: Info);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,17 +9,22 @@
|
|||
module PacketFilter;
|
||||
|
||||
export {
|
||||
## Add the packet filter logging stream.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
|
||||
## Add notice types related to packet filter errors.
|
||||
redef enum Notice::Type += {
|
||||
## This notice is generated if a packet filter is unable to be compiled.
|
||||
Compile_Failure,
|
||||
|
||||
## This notice is generated if a packet filter is unable to be installed.
|
||||
## This notice is generated if a packet filter is fails to install.
|
||||
Install_Failure,
|
||||
};
|
||||
|
||||
|
||||
## The record type defining columns to be logged in the packet filter
|
||||
## logging stream.
|
||||
type Info: record {
|
||||
## The time at which the packet filter installation attempt was made.
|
||||
ts: time &log;
|
||||
|
||||
## This is a string representation of the node that applied this
|
||||
|
@ -40,7 +45,7 @@ export {
|
|||
## By default, Bro will examine all packets. If this is set to false,
|
||||
## it will dynamically build a BPF filter that only select protocols
|
||||
## for which the user has loaded a corresponding analysis script.
|
||||
## The latter used to be default for Bro versions < 1.6. That has now
|
||||
## The latter used to be default for Bro versions < 2.0. That has now
|
||||
## changed however to enable port-independent protocol analysis.
|
||||
const all_packets = T &redef;
|
||||
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
##! This script reports on packet loss from the various packet sources.
|
||||
##! When Bro is reading input from trace files, this script will not
|
||||
##! report any packet loss statistics.
|
||||
|
||||
@load base/frameworks/notice
|
||||
|
||||
|
@ -6,7 +8,7 @@ module PacketFilter;
|
|||
|
||||
export {
|
||||
redef enum Notice::Type += {
|
||||
## Bro reported packets dropped by the packet filter.
|
||||
## Indicates packets were dropped by the packet filter.
|
||||
Dropped_Packets,
|
||||
};
|
||||
|
||||
|
|
|
@ -1,21 +1,36 @@
|
|||
##! This framework is intended to create an output and filtering path for
|
||||
##! internal messages/warnings/errors. It should typically be loaded to
|
||||
##! avoid Bro spewing internal messages to standard error.
|
||||
##! avoid Bro spewing internal messages to standard error and instead log
|
||||
##! them to a file in a standard way. Note that this framework deals with
|
||||
##! the handling of internally-generated reporter messages, for the
|
||||
##! interface into actually creating reporter messages from the scripting
|
||||
##! layer, use the built-in functions in :doc:`/scripts/base/reporter.bif`.
|
||||
|
||||
module Reporter;
|
||||
|
||||
export {
|
||||
## The reporter logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
|
||||
## An indicator of reporter message severity.
|
||||
type Level: enum {
|
||||
## Informational, not needing specific attention.
|
||||
INFO,
|
||||
## Warning of a potential problem.
|
||||
WARNING,
|
||||
## A non-fatal error that should be addressed, but doesn't
|
||||
## terminate program execution.
|
||||
ERROR
|
||||
};
|
||||
|
||||
|
||||
## The record type which contains the column fields of the reporter log.
|
||||
type Info: record {
|
||||
## The network time at which the reporter event was generated.
|
||||
ts: time &log;
|
||||
## The severity of the reporter message.
|
||||
level: Level &log;
|
||||
## An info/warning/error message that could have either been
|
||||
## generated from the internal Bro core or at the scripting-layer.
|
||||
message: string &log;
|
||||
## This is the location in a Bro script where the message originated.
|
||||
## Not all reporter messages will have locations in them though.
|
||||
|
|
|
@ -1,30 +1,36 @@
|
|||
##! Script level signature support.
|
||||
##! Script level signature support. See the
|
||||
##! :doc:`signature documentation </signatures>` for more information about
|
||||
##! Bro's signature engine.
|
||||
|
||||
@load base/frameworks/notice
|
||||
|
||||
module Signatures;
|
||||
|
||||
export {
|
||||
## Add various signature-related notice types.
|
||||
redef enum Notice::Type += {
|
||||
## Generic for alarm-worthy
|
||||
## Generic notice type for notice-worthy signature matches.
|
||||
Sensitive_Signature,
|
||||
## Host has triggered many signatures on the same host. The number of
|
||||
## signatures is defined by the :bro:id:`vert_scan_thresholds` variable.
|
||||
## signatures is defined by the
|
||||
## :bro:id:`Signatures::vert_scan_thresholds` variable.
|
||||
Multiple_Signatures,
|
||||
## Host has triggered the same signature on multiple hosts as defined by the
|
||||
## :bro:id:`horiz_scan_thresholds` variable.
|
||||
## Host has triggered the same signature on multiple hosts as defined
|
||||
## by the :bro:id:`Signatures::horiz_scan_thresholds` variable.
|
||||
Multiple_Sig_Responders,
|
||||
## The same signature has triggered multiple times for a host. The number
|
||||
## of times the signature has be trigger is defined by the
|
||||
## :bro:id:`count_thresholds` variable. To generate this notice, the
|
||||
## :bro:enum:`SIG_COUNT_PER_RESP` action must be set for the signature.
|
||||
## The same signature has triggered multiple times for a host. The
|
||||
## number of times the signature has been triggered is defined by the
|
||||
## :bro:id:`Signatures::count_thresholds` variable. To generate this
|
||||
## notice, the :bro:enum:`Signatures::SIG_COUNT_PER_RESP` action must
|
||||
## bet set for the signature.
|
||||
Count_Signature,
|
||||
## Summarize the number of times a host triggered a signature. The
|
||||
## interval between summaries is defined by the :bro:id:`summary_interval`
|
||||
## variable.
|
||||
## interval between summaries is defined by the
|
||||
## :bro:id:`Signatures::summary_interval` variable.
|
||||
Signature_Summary,
|
||||
};
|
||||
|
||||
## The signature logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## These are the default actions you can apply to signature matches.
|
||||
|
@ -39,8 +45,8 @@ export {
|
|||
SIG_QUIET,
|
||||
## Generate a notice.
|
||||
SIG_LOG,
|
||||
## The same as :bro:enum:`SIG_FILE`, but ignore for aggregate/scan
|
||||
## processing.
|
||||
## The same as :bro:enum:`Signatures::SIG_LOG`, but ignore for
|
||||
## aggregate/scan processing.
|
||||
SIG_FILE_BUT_NO_SCAN,
|
||||
## Generate a notice and set it to be alarmed upon.
|
||||
SIG_ALARM,
|
||||
|
@ -49,22 +55,33 @@ export {
|
|||
## Alarm once and then never again.
|
||||
SIG_ALARM_ONCE,
|
||||
## Count signatures per responder host and alarm with the
|
||||
## :bro:enum:`Count_Signature` notice if a threshold defined by
|
||||
## :bro:id:`count_thresholds` is reached.
|
||||
## :bro:enum:`Signatures::Count_Signature` notice if a threshold
|
||||
## defined by :bro:id:`Signatures::count_thresholds` is reached.
|
||||
SIG_COUNT_PER_RESP,
|
||||
## Don't alarm, but generate per-orig summary.
|
||||
SIG_SUMMARY,
|
||||
};
|
||||
|
||||
|
||||
## The record type which contains the column fields of the signature log.
|
||||
type Info: record {
|
||||
## The network time at which a signature matching type of event to
|
||||
## be logged has occurred.
|
||||
ts: time &log;
|
||||
## The host which triggered the signature match event.
|
||||
src_addr: addr &log &optional;
|
||||
## The host port on which the signature-matching activity occurred.
|
||||
src_port: port &log &optional;
|
||||
## The destination host which was sent the payload that triggered the
|
||||
## signature match.
|
||||
dst_addr: addr &log &optional;
|
||||
## The destination host port which was sent the payload that triggered
|
||||
## the signature match.
|
||||
dst_port: port &log &optional;
|
||||
## Notice associated with signature event
|
||||
note: Notice::Type &log;
|
||||
## The name of the signature that matched.
|
||||
sig_id: string &log &optional;
|
||||
## A more descriptive message of the signature-matching event.
|
||||
event_msg: string &log &optional;
|
||||
## Extracted payload data or extra message.
|
||||
sub_msg: string &log &optional;
|
||||
|
@ -82,22 +99,26 @@ export {
|
|||
## Signature IDs that should always be ignored.
|
||||
const ignored_ids = /NO_DEFAULT_MATCHES/ &redef;
|
||||
|
||||
## Alarm if, for a pair [orig, signature], the number of different
|
||||
## responders has reached one of the thresholds.
|
||||
## Generate a notice if, for a pair [orig, signature], the number of
|
||||
## different responders has reached one of the thresholds.
|
||||
const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef;
|
||||
|
||||
## Alarm if, for a pair [orig, resp], the number of different signature
|
||||
## matches has reached one of the thresholds.
|
||||
## Generate a notice if, for a pair [orig, resp], the number of different
|
||||
## signature matches has reached one of the thresholds.
|
||||
const vert_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef;
|
||||
|
||||
## Alarm if a :bro:enum:`SIG_COUNT_PER_RESP` signature is triggered as
|
||||
## often as given by one of these thresholds.
|
||||
## Generate a notice if a :bro:enum:`Signatures::SIG_COUNT_PER_RESP`
|
||||
## signature is triggered as often as given by one of these thresholds.
|
||||
const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef;
|
||||
|
||||
## The interval between when :bro:id:`Signature_Summary` notices are
|
||||
## generated.
|
||||
## The interval between when :bro:enum:`Signatures::Signature_Summary`
|
||||
## notice are generated.
|
||||
const summary_interval = 1 day &redef;
|
||||
|
||||
|
||||
## This event can be handled to access/alter data about to be logged
|
||||
## to the signature logging stream.
|
||||
##
|
||||
## rec: The record of signature data about to be logged.
|
||||
global log_signature: event(rec: Info);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
##! This script provides the framework for software version detection and
|
||||
##! parsing, but doesn't actually do any detection on it's own. It relys on
|
||||
##! parsing but doesn't actually do any detection on it's own. It relys on
|
||||
##! other protocol specific scripts to parse out software from the protocols
|
||||
##! that they analyze. The entry point for providing new software detections
|
||||
##! to this framework is through the :bro:id:`Software::found` function.
|
||||
|
@ -10,24 +10,27 @@
|
|||
module Software;
|
||||
|
||||
export {
|
||||
|
||||
## The software logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
|
||||
## Scripts detecting new types of software need to redef this enum to add
|
||||
## their own specific software types which would then be used when they
|
||||
## create :bro:type:`Software::Info` records.
|
||||
type Type: enum {
|
||||
## A placeholder type for when the type of software is not known.
|
||||
UNKNOWN,
|
||||
OPERATING_SYSTEM,
|
||||
DATABASE_SERVER,
|
||||
# There are a number of ways to detect printers on the
|
||||
# network, we just need to codify them in a script and move
|
||||
# this out of here. It isn't currently used for anything.
|
||||
PRINTER,
|
||||
};
|
||||
|
||||
|
||||
## A structure to represent the numeric version of software.
|
||||
type Version: record {
|
||||
major: count &optional; ##< Major version number
|
||||
minor: count &optional; ##< Minor version number
|
||||
minor2: count &optional; ##< Minor subversion number
|
||||
addl: string &optional; ##< Additional version string (e.g. "beta42")
|
||||
## Major version number
|
||||
major: count &optional;
|
||||
## Minor version number
|
||||
minor: count &optional;
|
||||
## Minor subversion number
|
||||
minor2: count &optional;
|
||||
## Additional version string (e.g. "beta42")
|
||||
addl: string &optional;
|
||||
} &log;
|
||||
|
||||
type SoftwareDescription: record {
|
||||
|
@ -36,24 +39,24 @@ export {
|
|||
unparsed_version: string;
|
||||
};
|
||||
|
||||
## Record that is used to add and log software information.
|
||||
|
||||
## The record type that is used for representing and logging software.
|
||||
type Info: record {
|
||||
## The time at which the software was first detected.
|
||||
ts: time &log &optional;
|
||||
## The time at which the software was detected.
|
||||
ts: time &log;
|
||||
## The IP address detected running the software.
|
||||
host: addr &log;
|
||||
host: addr &log;
|
||||
## The Port on which the software is running. Only sensible for server software.
|
||||
host_p: port &log &optional;
|
||||
## The transport protocol that is being used. Only sensible for server software.
|
||||
proto: transport_proto &log &optional;
|
||||
## The type of software detected (e.g. WEB_SERVER)
|
||||
## The type of software detected (e.g. :bro:enum:`HTTP::SERVER`).
|
||||
software_type: Type &log &default=UNKNOWN;
|
||||
## Name of the software (e.g. Apache)
|
||||
name: string &log &optional;
|
||||
## Version of the software
|
||||
version: Version &log &optional;
|
||||
## Name of the software (e.g. Apache).
|
||||
name: string &log;
|
||||
## Version of the software.
|
||||
version: Version &log;
|
||||
## The full unparsed version string found because the version parsing
|
||||
## doesn't work 100% reliably and this acts as a fall back in the logs.
|
||||
## doesn't always work reliably in all cases and this acts as a
|
||||
## fallback in the logs.
|
||||
unparsed_version: string &log &optional;
|
||||
|
||||
## This can indicate that this software being detected should
|
||||
|
@ -65,36 +68,43 @@ export {
|
|||
## needs to happen in a specific way to the software.
|
||||
force_log: bool &default=F;
|
||||
};
|
||||
|
||||
## The hosts whose software should be detected and tracked.
|
||||
|
||||
## Hosts whose software should be detected and tracked.
|
||||
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
|
||||
const asset_tracking = LOCAL_HOSTS &redef;
|
||||
|
||||
|
||||
## Other scripts should call this function when they detect software.
|
||||
## unparsed_version: This is the full string from which the
|
||||
## :bro:type:`Software::Info` was extracted.
|
||||
##
|
||||
## id: The connection id where the software was discovered.
|
||||
##
|
||||
## info: A record representing the software discovered.
|
||||
##
|
||||
## Returns: T if the software was logged, F otherwise.
|
||||
global found: function(id: conn_id, info: Info): bool;
|
||||
|
||||
## This function can take many software version strings and parse them
|
||||
## Take many common software version strings and parse them
|
||||
## into a sensible :bro:type:`Software::Version` record. There are
|
||||
## still many cases where scripts may have to have their own specific
|
||||
## version parsing though.
|
||||
##
|
||||
## unparsed_version: The raw version string.
|
||||
##
|
||||
## Returns: A complete record ready for the :bro:id:`Software::found` function.
|
||||
global parse: function(unparsed_version: string): SoftwareDescription;
|
||||
|
||||
## Compare two versions.
|
||||
|
||||
## Compare two version records.
|
||||
##
|
||||
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
|
||||
## If the numerical version numbers match, the addl string
|
||||
## is compared lexicographically.
|
||||
global cmp_versions: function(v1: Version, v2: Version): int;
|
||||
|
||||
## This type represents a set of software. It's used by the
|
||||
## :bro:id:`tracked` variable to store all known pieces of software
|
||||
## for a particular host. It's indexed with the name of a piece of
|
||||
## software such as "Firefox" and it yields a
|
||||
## :bro:type:`Software::Info` record with more information about the
|
||||
## software.
|
||||
## Type to represent a collection of :bro:type:`Software::Info` records.
|
||||
## It's indexed with the name of a piece of software such as "Firefox"
|
||||
## and it yields a :bro:type:`Software::Info` record with more information
|
||||
## about the software.
|
||||
type SoftwareSet: table[string] of Info;
|
||||
|
||||
## The set of software associated with an address. Data expires from
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue