Hopefully the last major script reorganization.

- policy/ renamed to scripts/

- By default BROPATH now contains:
	- scripts/
	- scripts/policy
	- scripts/site

- *Nearly* all tests pass.

- All of scripts/base/ is loaded by main.cc
	- Can be disabled by setting $BRO_NO_BASE_SCRIPTS
	- Scripts in scripts/base/ don't use relative path loading to ease use of BRO_NO_BASE_SCRIPTS (to copy and paste that script).

- The scripts in scripts/base/protocols/ only (or soon will only) do logging and state building.

- The scripts in scripts/base/frameworks/ add functionality without causing any additional overhead.

- All "detection" activity happens through scripts in scripts/policy/.

- Communications framework modified temporarily to need an environment variable to actually enable (ENABLE_COMMUNICATION=1)
	- This is so the communications framework can be loaded as part
	  of the base without causing trouble when it's not needed.
	- This will be removed once a resolution to ticket #540 is reached.
This commit is contained in:
Seth Hall 2011-08-05 23:09:53 -04:00
parent 68171cf179
commit 597a4d6704
257 changed files with 1311 additions and 1225 deletions

18
scripts/CMakeLists.txt Normal file
View file

@ -0,0 +1,18 @@
include(InstallPackageConfigFile)
install(DIRECTORY ./ DESTINATION ${POLICYDIR} FILES_MATCHING
PATTERN "all.bro" EXCLUDE
PATTERN "site/local.bro" EXCLUDE
PATTERN "bro.init"
PATTERN "*.bro"
PATTERN "*.sig"
PATTERN "*.osf"
)
# Install as a config file since the local.bro script is meant to be
# user modify-able.
InstallPackageConfigFile(
${CMAKE_CURRENT_SOURCE_DIR}/site/local.bro
${POLICYDIR}/site
local.bro)

23
scripts/all.bro Normal file
View file

@ -0,0 +1,23 @@
##! This script only aims at loading all of the base analysis scripts.
#@load protocols/conn
#@load protocols/dns
#@load protocols/ftp
#@load protocols/http
#@load protocols/irc
#@load protocols/mime
#@load protocols/smtp
#@load protocols/ssh
@load protocols/ssl
#@load protocols/syslog
@load frameworks/metrics
@load frameworks/notice
@load frameworks/signatures
@load frameworks/software
@load frameworks/reporter
@load frameworks/cluster
@load tuning/defaults
@load misc/loaded-scripts

44
scripts/base/all.bro Normal file
View file

@ -0,0 +1,44 @@
##! This script loads everything in the base/ script directory. If you want
##! to run Bro without all of these scripts loaded by default, you can define
##! the BRO_NO_BASE_SCRIPTS environment variable to any value. You can also
##! copy the "@load" lines from this script to your own script to load only
##! the scripts that you actually want.
@if ( getenv("BRO_NO_BASE_SCRIPTS") == "" )
@load base/utils/site
@load base/utils/addrs
@load base/utils/conn-ids
@load base/utils/directions-and-hosts
@load base/utils/files
@load base/utils/numbers
@load base/utils/paths
@load base/utils/patterns
@load base/utils/strings
@load base/utils/thresholds
# This has some weird interplay between types and BiFs so it's loaded in bro.init
#@load base/frameworks/logging
@load base/frameworks/notice
@load base/frameworks/dpd
@load base/frameworks/signatures
@load base/frameworks/packet-filter
@load base/frameworks/software
@load base/frameworks/intel
@load base/frameworks/metrics
@load base/frameworks/communication
@load base/frameworks/control
@load base/frameworks/cluster
@load base/frameworks/reporter
@load base/protocols/conn
@load base/protocols/dns
@load base/protocols/ftp
@load base/protocols/http
@load base/protocols/irc
@load base/protocols/smtp
@load base/protocols/ssh
@load base/protocols/ssl
@load base/protocols/syslog
@endif

1488
scripts/base/bro.init Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,46 @@
# Load the core cluster support.
@load ./main
@if ( Cluster::node != "" )
# Give the node being started up it's peer name.
redef peer_description = Cluster::node;
# Add a cluster prefix.
@prefixes += cluster
# Make this a controllable node since all cluster nodes are inherently
# controllable.
@load frameworks/control/controllee
## If this script isn't found anywhere, the cluster bombs out.
## Loading the cluster framework requires that a script by this name exists
## somewhere in the BROPATH. The only thing in the file should be the
## cluster definition in the :bro:id:`Cluster::nodes` variable.
@load cluster-layout
@if ( Cluster::node in Cluster::nodes )
@load ./setup-connections
# Don't start the listening process until we're a bit more sure that the
# cluster framework is actually being enabled.
@load frameworks/communication/listen-clear
## Set the port that this node is supposed to listen on.
redef Communication::listen_port_clear = Cluster::nodes[Cluster::node]$p;
@if ( Cluster::nodes[Cluster::node]$node_type == Cluster::MANAGER )
@load ./nodes/manager
@endif
@if ( Cluster::nodes[Cluster::node]$node_type == Cluster::PROXY )
@load ./nodes/proxy
@endif
@if ( Cluster::nodes[Cluster::node]$node_type == Cluster::WORKER )
@load ./nodes/worker
@endif
@endif
@endif

View file

@ -0,0 +1,68 @@
module Cluster;
export {
redef enum Log::ID += { CLUSTER };
type Info: record {
ts: time;
message: string;
} &log;
type NodeType: enum {
CONTROL,
MANAGER,
PROXY,
WORKER,
TIME_MACHINE,
};
## Events raised by the manager and handled by the workers.
const manager_events = /Drop::.*/ &redef;
## Events raised by the proxies and handled by the manager.
const proxy_events = /Notice::notice/ &redef;
## Events raised by workers and handled by the manager.
const worker_events = /(Notice::notice|TimeMachine::command|Drop::.*)/ &redef;
## Events sent by the control host (i.e. BroControl) when dynamically
## connecting to a running instance to update settings or request data.
const control_events = Control::controller_events &redef;
## Record type to indicate a node in a cluster.
type Node: record {
node_type: NodeType;
ip: addr;
p: port;
## Identifier for the interface a worker is sniffing.
interface: string &optional;
## Manager node this node uses. For workers and proxies.
manager: string &optional;
## Proxy node this node uses. For workers and managers.
proxy: string &optional;
## Worker nodes that this node connects with. For managers and proxies.
workers: set[string] &optional;
time_machine: string &optional;
};
const nodes: table[string] of Node = {} &redef;
## This is usually supplied on the command line for each instance
## of the cluster that is started up.
const node = getenv("CLUSTER_NODE") &redef;
}
event bro_init()
{
# If a node is given, but it's an unknown name we need to fail.
if ( node != "" && node !in nodes )
{
local msg = "You didn't supply a valid node in the Cluster::nodes configuration.";
event reporter_error(current_time(), msg, "");
terminate();
}
Log::create_stream(CLUSTER, [$columns=Info]);
}

View file

@ -0,0 +1,27 @@
##! This is the core Bro script to support the notion of a cluster manager.
##!
##! The manager is passive (the workers connect to us), and once connected
##! the manager registers for the events on the workers that are needed
##! to get the desired data from the workers. This script will be
##! automatically loaded if necessary based on the type of node being started.
##! This is where the cluster manager sets it's specific settings for other
##! frameworks and in the core.
@prefixes += cluster-manager
## Turn off remote logging since this is the manager and should only log here.
redef Log::enable_remote_logging = F;
## Use the cluster's archive logging script.
redef Log::default_rotation_postprocessor = "archive-log";
## We're processing essentially *only* remote events.
redef max_remote_events_processed = 10000;
# Reraise remote notices locally.
event Notice::notice(n: Notice::Info)
{
if ( is_remote_event() )
NOTICE(n);
}

View file

@ -0,0 +1,16 @@
@prefixes += cluster-proxy
## The proxy only syncs state; does not forward events.
redef forward_remote_events = F;
redef forward_remote_state_changes = T;
## Don't do any local logging.
redef Log::enable_local_logging = F;
## Make sure that remote logging is enabled.
redef Log::enable_remote_logging = T;
## Use the cluster's delete-log script.
redef Log::default_rotation_postprocessor = "delete-log";

View file

@ -0,0 +1,27 @@
@prefixes += cluster-worker
## Don't do any local logging.
redef Log::enable_local_logging = F;
## Make sure that remote logging is enabled.
redef Log::enable_remote_logging = T;
## Use the cluster's delete-log script.
redef Log::default_rotation_postprocessor = "delete-log";
## Record all packets into trace file.
# TODO: should we really be setting this to T?
redef record_all_packets = T;
# Workers need to have a filter for the notice log which doesn't
# do remote logging since we forward the notice event directly.
event bro_init()
{
Log::add_filter(Notice::NOTICE,
[
$name="cluster-worker",
$pred=function(rec: Notice::Info): bool { return F; }
]
);
}

View file

@ -0,0 +1,79 @@
module Cluster;
event bro_init() &priority=9
{
local me = nodes[node];
for ( i in Cluster::nodes )
{
local n = nodes[i];
# Connections from the control node for runtime control and update events.
# Every node in a cluster is eligible for control from this host.
if ( n$node_type == CONTROL )
Communication::nodes["control"] = [$host=n$ip, $connect=F,
$class="control", $events=control_events];
if ( me$node_type == MANAGER )
{
if ( n$node_type == WORKER && n$manager == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F,
$class=i, $events=worker_events, $request_logs=T];
if ( n$node_type == PROXY && n$manager == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F,
$class=i, $events=proxy_events, $request_logs=T];
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
$connect=T, $retry=1min];
}
else if ( me$node_type == PROXY )
{
if ( n$node_type == WORKER && n$proxy == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F, $class=i, $events=worker_events];
# accepts connections from the previous one.
# (This is not ideal for setups with many proxies)
# FIXME: Once we're using multiple proxies, we should also figure out some $class scheme ...
if ( n$node_type == PROXY )
{
if ( n?$proxy )
Communication::nodes[i]
= [$host=n$ip, $p=n$p,
$connect=T, $auth=F, $sync=T, $retry=1mins];
else if ( me?$proxy && me$proxy == i )
Communication::nodes[me$proxy]
= [$host=nodes[i]$ip, $connect=F, $auth=T, $sync=T];
}
# Finally the manager, to send it status updates.
if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
$connect=T, $retry=1mins,
$class=node];
}
else if ( me$node_type == WORKER )
{
if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
$connect=T, $retry=1mins,
$class=node];
if ( n$node_type == PROXY && me$proxy == i )
Communication::nodes["proxy"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
$connect=T, $retry=1mins,
$class=node];
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
$connect=T, $retry=1min];
}
}
}

View file

@ -0,0 +1,5 @@
# TODO: get rid of this as soon as the Expr.cc hack is changed.
@if ( getenv("ENABLE_COMMUNICATION") != "" )
@load ./main
@endif

View file

@ -0,0 +1,289 @@
##! Connect to remote Bro or Broccoli instances to share state and/or transfer
##! events.
module Communication;
export {
redef enum Log::ID += { COMMUNICATION };
const default_port_ssl = 47756/tcp &redef;
const default_port_clear = 47757/tcp &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## compression.
global default_compression = 0 &redef;
type Info: record {
ts: time &log;
peer: string &log &optional;
src_name: string &log &optional;
connected_peer_desc: string &log &optional;
connected_peer_addr: addr &log &optional;
connected_peer_port: port &log &optional;
level: string &log &optional;
message: string &log;
};
## A remote peer to which we would like to talk.
## If there's no entry for a peer, it may still connect
## and request state, but not send us any.
type Node: record {
## Remote address.
host: addr;
## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field.
p: port &optional;
## When accepting a connection, the configuration only
## applies if the class matches the one transmitted by
## the peer.
##
## When initiating a connection, the class is sent to
## the other side.
class: string &optional;
## Events requested from remote side.
events: pattern &optional;
## Whether we are going to connect (rather than waiting
## for the other sie to connect to us).
connect: bool &default = F;
## If disconnected, reconnect after this many seconds.
retry: interval &default = 0 secs;
## Whether to accept remote events.
accept_input: bool &default = T;
## Whether to perform state synchronization with peer.
sync: bool &default = F;
## Whether to request logs from the peer.
request_logs: bool &default = F;
## When performing state synchronization, whether we consider
## our state to be authoritative. If so, we will send the peer
## our current set when the connection is set up.
## (Only one side can be authoritative)
auth: bool &default = F;
## If not set, no capture filter is sent.
## If set to "", the default cature filter is sent.
capture_filter: string &optional;
## Whether to use SSL-based communication.
ssl: bool &default = F;
## Take-over state from this host (activated by loading hand-over.bro)
hand_over: bool &default = F;
## Compression level is 0-9, with 0 = no compression.
compression: count &default = default_compression;
## The remote peer.
peer: event_peer &optional;
## Indicates the status of the node.
connected: bool &default = F;
};
## The table of Bro or Broccoli nodes that Bro will initiate connections
## to or respond to connections from.
global nodes: table[string] of Node &redef;
global pending_peers: table[peer_id] of Node;
global connected_peers: table[peer_id] of Node;
## Connect to nodes[node], independent of its "connect" flag.
global connect_peer: function(peer: string);
}
const src_names = {
[REMOTE_SRC_CHILD] = "child",
[REMOTE_SRC_PARENT] = "parent",
[REMOTE_SRC_SCRIPT] = "script",
};
event bro_init()
{
Log::create_stream(COMMUNICATION, [$columns=Info]);
}
function do_script_log_common(level: count, src: count, msg: string)
{
Log::write(COMMUNICATION, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src],
$peer = get_event_peer()$descr,
$message = msg]);
}
# This is a core generated event.
event remote_log(level: count, src: count, msg: string)
{
do_script_log_common(level, src, msg);
}
function do_script_log(p: event_peer, msg: string)
{
do_script_log_common(REMOTE_LOG_INFO, REMOTE_SRC_SCRIPT, msg);
}
function connect_peer(peer: string)
{
local node = nodes[peer];
local p = node$ssl ? default_port_ssl : default_port_clear;
if ( node?$p )
p = node$p;
local class = node?$class ? node$class : "";
local id = connect(node$host, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE )
Log::write(COMMUNICATION, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = "can't trigger connect"]);
pending_peers[id] = node;
}
function setup_peer(p: event_peer, node: Node)
{
if ( node?$events )
{
do_script_log(p, fmt("requesting events matching %s", node$events));
request_remote_events(p, node$events);
}
if ( node?$capture_filter )
{
local filter = node$capture_filter;
if ( filter == "" )
filter = PacketFilter::default_filter;
do_script_log(p, fmt("sending capture_filter: %s", filter));
send_capture_filter(p, filter);
}
if ( node$accept_input )
{
do_script_log(p, "accepting state");
set_accept_state(p, T);
}
set_compression_level(p, node$compression);
if ( node$sync )
{
do_script_log(p, "requesting synchronized state");
request_remote_sync(p, node$auth);
}
if ( node$request_logs )
{
do_script_log(p, "requesting logs");
request_remote_logs(p);
}
node$peer = p;
node$connected = T;
connected_peers[p$id] = node;
}
event remote_connection_established(p: event_peer)
{
if ( is_remote_event() )
return;
do_script_log(p, "connection established");
if ( p$id in pending_peers )
{
# We issued the connect.
local node = pending_peers[p$id];
setup_peer(p, node);
delete pending_peers[p$id];
}
else
{ # The other side connected to us.
local found = F;
for ( i in nodes )
{
node = nodes[i];
if ( node$host == p$host )
{
local c = 0;
# See if classes match = either both have
# the same class, or neither of them has
# a class.
if ( p?$class && p$class != "" )
++c;
if ( node?$class && node$class != "" )
++c;
if ( c == 1 ||
(c == 2 && p$class != node$class) )
next;
found = T;
setup_peer(p, node);
break;
}
}
if ( ! found )
set_compression_level(p, default_compression);
}
complete_handshake(p);
}
event remote_connection_closed(p: event_peer)
{
if ( is_remote_event() )
return;
do_script_log(p, "connection closed");
if ( p$id in connected_peers )
{
local node = connected_peers[p$id];
node$connected = F;
delete connected_peers[p$id];
if ( node$retry != 0secs )
# The core will retry.
pending_peers[p$id] = node;
}
}
event remote_state_inconsistency(operation: string, id: string,
expected_old: string, real_old: string)
{
if ( is_remote_event() )
return;
local msg = fmt("state inconsistency: %s should be %s but is %s before %s",
id, expected_old, real_old, operation);
Log::write(COMMUNICATION, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = msg]);
}
# Actually initiate the connections that need to be established.
event bro_init() &priority = -10 # let others modify nodes
{
for ( tag in nodes )
{
if ( ! nodes[tag]$connect )
next;
connect_peer(tag);
}
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,87 @@
##! This is a utility script that sends the current values of all &redef'able
##! consts to a remote Bro then sends the :bro:id:`configuration_update` event
##! and terminates processing.
##!
##! Intended to be used from the command line like this when starting a controller:
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
##!
##! A controllee only needs to load the controllee script in addition
##! to the specific analysis scripts desired. It may also need a node
##! configured as a controller node in the communications nodes configuration.
##! bro <scripts> frameworks/control/controllee
##!
##! To use the framework as a controllee, it only needs to be loaded and
##! the controlled node need to accept all events in the "Control::" namespace
##! from the host where the control actions will be performed from along with
##! using the "control" class.
module Control;
export {
## This is the address of the host that will be controlled.
const host = 0.0.0.0 &redef;
## This is the port of the host that will be controlled.
const host_port = 0/tcp &redef;
## This is the command that is being done. It's typically set on the
## command line and influences whether this instance starts up as a
## controller or controllee.
const cmd = "" &redef;
## This can be used by commands that take an argument.
const arg = "" &redef;
const controller_events = /Control::.*_request/ &redef;
const controllee_events = /Control::.*_response/ &redef;
## These are the commands that can be given on the command line for
## remote control.
const commands: set[string] = {
"id_value",
"peer_status",
"net_stats",
"configuration_update",
"shutdown",
};
## Variable IDs that are to be ignored by the update process.
const ignore_ids: set[string] = {
# FIXME: Bro crashes if it tries to send this ID.
"Log::rotation_control",
};
## Event for requesting the value of an ID (a variable).
global id_value_request: event(id: string);
## Event for returning the value of an ID after an :bro:id:`id_request` event.
global id_value_response: event(id: string, val: string);
## Requests the current communication status.
global peer_status_request: event();
## Returns the current communication status.
global peer_status_response: event(s: string);
## Requests the current net_stats.
global net_stats_request: event();
## Returns the current net_stats.
global net_stats_response: event(s: string);
## Inform the remote Bro instance that it's configuration may have been updated.
global configuration_update_request: event();
## This event is a wrapper and alias for the :bro:id:`configuration_update_request` event.
## This event is also a primary hooking point for the control framework.
global configuration_update: event();
## Message in response to a configuration update request.
global configuration_update_response: event();
## Requests that the Bro instance begins shutting down.
global shutdown_request: event();
## Message in response to a shutdown request.
global shutdown_response: event();
}
event terminate_event()
{
terminate_communication();
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,151 @@
# Signatures to initiate dynamic protocol detection.
signature dpd_ftp_client {
ip-proto == tcp
payload /(|.*[\n\r]) *[uU][sS][eE][rR] /
tcp-state originator
}
# Match for server greeting (220, 120) and for login or passwd
# required (230, 331).
signature dpd_ftp_server {
ip-proto == tcp
payload /[\n\r ]*(120|220)[^0-9].*[\n\r] *(230|331)[^0-9]/
tcp-state responder
requires-reverse-signature dpd_ftp_client
enable "ftp"
}
signature dpd_http_client {
ip-proto == tcp
payload /^[[:space:]]*(GET|HEAD|POST)[[:space:]]*/
tcp-state originator
}
signature dpd_http_server {
ip-proto == tcp
payload /^HTTP\/[0-9]/
tcp-state responder
requires-reverse-signature dpd_http_client
enable "http"
}
signature dpd_bittorrenttracker_client {
ip-proto == tcp
payload /^.*\/announce\?.*info_hash/
tcp-state originator
}
signature dpd_bittorrenttracker_server {
ip-proto == tcp
payload /^HTTP\/[0-9]/
tcp-state responder
requires-reverse-signature dpd_bittorrenttracker_client
enable "bittorrenttracker"
}
signature dpd_bittorrent_peer1 {
ip-proto == tcp
payload /^\x13BitTorrent protocol/
tcp-state originator
}
signature dpd_bittorrent_peer2 {
ip-proto == tcp
payload /^\x13BitTorrent protocol/
tcp-state responder
requires-reverse-signature dpd_bittorrent_peer1
enable "bittorrent"
}
signature irc_client1 {
ip-proto == tcp
payload /(|.*[\r\n]) *[Uu][Ss][Ee][Rr] +.+[\n\r]+ *[Nn][Ii][Cc][Kk] +.*[\r\n]/
requires-reverse-signature irc_server_reply
tcp-state originator
enable "irc"
}
signature irc_client2 {
ip-proto == tcp
payload /(|.*[\r\n]) *[Nn][Ii][Cc][Kk] +.+[\r\n]+ *[Uu][Ss][Ee][Rr] +.+[\r\n]/
requires-reverse-signature irc_server_reply
tcp-state originator
enable "irc"
}
signature irc_server_reply {
ip-proto == tcp
payload /^(|.*[\n\r])(:[^ \n\r]+ )?[0-9][0-9][0-9] /
tcp-state responder
}
signature irc_sig3 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
}
signature irc_sig4 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
requires-reverse-signature irc_sig3
enable "irc"
}
signature dpd_smtp_client {
ip-proto == tcp
payload /(|.*[\n\r])[[:space:]]*([hH][eE][lL][oO]|[eE][hH][lL][oO])/
requires-reverse-signature dpd_smtp_server
enable "smtp"
tcp-state originator
}
signature dpd_smtp_server {
ip-proto == tcp
payload /^[[:space:]]*220[[:space:]-]/
tcp-state responder
}
signature dpd_ssh_client {
ip-proto == tcp
payload /^[sS][sS][hH]-/
requires-reverse-signature dpd_ssh_server
enable "ssh"
tcp-state originator
}
signature dpd_ssh_server {
ip-proto == tcp
payload /^[sS][sS][hH]-/
tcp-state responder
}
signature dpd_pop3_server {
ip-proto == tcp
payload /^\+OK/
requires-reverse-signature dpd_pop3_client
enable "pop3"
tcp-state responder
}
signature dpd_pop3_client {
ip-proto == tcp
payload /(|.*[\r\n])[[:space:]]*([uU][sS][eE][rR][[:space:]]|[aA][pP][oO][pP][[:space:]]|[cC][aA][pP][aA]|[aA][uU][tT][hH])/
tcp-state originator
}
signature dpd_ssl_server {
ip-proto == tcp
# Server hello.
payload /^(\x16\x03[\x00\x01\x02]..\x02...\x03[\x00\x01\x02]|...?\x04..\x00\x02).*/
requires-reverse-signature dpd_ssl_client
enable "ssl"
tcp-state responder
}
signature dpd_ssl_client {
ip-proto == tcp
# Client hello.
payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/
tcp-state originator
}

View file

@ -0,0 +1,108 @@
##! Activates port-independent protocol detection and selectively disables
##! analyzers if protocol violations occur.
module DPD;
## Add the DPD signatures to the signature framework.
redef signature_files += "base/frameworks/dpd/dpd.sig";
export {
redef enum Log::ID += { DPD };
type Info: record {
## Timestamp for when protocol analysis failed.
ts: time &log;
## Connection unique ID.
uid: string &log;
## Connection ID.
id: conn_id &log;
## Transport protocol for the violation.
proto: transport_proto &log;
## The analyzer that generated the violation.
analyzer: string &log;
## The textual reason for the analysis failure.
failure_reason: string &log;
## Disabled analyzer IDs. This is only for internal tracking
## so as to not attempt to disable analyzers multiple times.
# TODO: This is waiting on ticket #460 to remove the '0'.
disabled_aids: set[count] &default=set(0);
};
## Ignore violations which go this many bytes into the connection.
## Set to 0 to never ignore protocol violations.
const ignore_violations_after = 10 * 1024 &redef;
}
redef record connection += {
dpd: Info &optional;
};
event bro_init()
{
Log::create_stream(DPD, [$columns=Info]);
# Populate the internal DPD analysis variable.
for ( a in dpd_config )
{
for ( p in dpd_config[a]$ports )
{
if ( p !in dpd_analyzer_ports )
dpd_analyzer_ports[p] = set();
add dpd_analyzer_ports[p][a];
}
}
}
event protocol_confirmation(c: connection, atype: count, aid: count) &priority=10
{
local analyzer = analyzer_name(atype);
if ( fmt("-%s",analyzer) in c$service )
delete c$service[fmt("-%s", analyzer)];
add c$service[analyzer];
}
event protocol_violation(c: connection, atype: count, aid: count,
reason: string) &priority=10
{
local analyzer = analyzer_name(atype);
# If the service hasn't been confirmed yet, don't generate a log message
# for the protocol violation.
if ( analyzer !in c$service )
return;
delete c$service[analyzer];
add c$service[fmt("-%s", analyzer)];
local info: Info;
info$ts=network_time();
info$uid=c$uid;
info$id=c$id;
info$proto=get_conn_transport_proto(c$id);
info$analyzer=analyzer;
info$failure_reason=reason;
c$dpd = info;
}
event protocol_violation(c: connection, atype: count, aid: count, reason: string) &priority=5
{
if ( !c?$dpd || aid in c$dpd$disabled_aids )
return;
local size = c$orig$size + c$resp$size;
if ( ignore_violations_after > 0 && size > ignore_violations_after )
return;
# Disable the analyzer that raised the last core-generated event.
disable_analyzer(c$id, aid);
add c$dpd$disabled_aids[aid];
}
event protocol_violation(c: connection, atype: count, aid: count,
reason: string) &priority=-5
{
if ( c?$dpd )
Log::write(DPD, c$dpd);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,275 @@
##! The intelligence framework provides a way to store and query IP addresses,
##! strings (with a subtype), and numeric (with a subtype) data. Metadata
##! also be associated with the intelligence like tags which are arbitrary
##! strings, time values, and longer descriptive strings.
# Example string subtypes:
# url
# email
# domain
# software
# user_name
# file_name
# file_md5
# x509_cert - DER encoded, not PEM (ascii armored)
# Example tags:
# infrastructure
# malicious
# sensitive
# canary
# friend
module Intel;
export {
redef enum Log::ID += { INTEL };
redef enum Notice::Type += {
## This notice should be used in all detector scripts to indicate
## an intelligence based detection.
Detection,
};
type Info: record {
ts: time &log;
## This value should be one of: "info", "warn", "error"
level: string &log;
message: string &log;
};
type MetaData: record {
desc: string &optional;
url: string &optional;
first_seen: time &optional;
latest_seen: time &optional;
tags: set[string];
};
type Item: record {
ip: addr &optional;
str: string &optional;
num: int &optional;
subtype: string &optional;
desc: string &optional;
url: string &optional;
first_seen: time &optional;
latest_seen: time &optional;
tags: set[string];
## These single string tags are throw away until pybroccoli supports sets
tag1: string &optional;
tag2: string &optional;
tag3: string &optional;
};
type QueryItem: record {
ip: addr &optional;
str: string &optional;
num: int &optional;
subtype: string &optional;
or_tags: set[string] &optional;
and_tags: set[string] &optional;
## The predicate can be given when searching for a match. It will
## be tested against every :bro:type:`MetaData` item associated with
## the data being matched on. If it returns T a single time, the
## matcher will consider that the item has matched.
pred: function(meta: Intel::MetaData): bool &optional;
};
global insert: function(item: Item): bool;
global insert_event: event(item: Item);
global matcher: function(item: QueryItem): bool;
type MetaDataStore: table[count] of MetaData;
type DataStore: record {
ip_data: table[addr] of MetaDataStore;
## The first string is the actual value and the second string is the subtype.
string_data: table[string, string] of MetaDataStore;
int_data: table[int, string] of MetaDataStore;
};
global data_store: DataStore;
}
event bro_init()
{
Log::create_stream(INTEL, [$columns=Info]);
}
function insert(item: Item): bool
{
local err_msg = "";
if ( (item?$str || item?$num) && ! item?$subtype )
err_msg = "You must provide a subtype to insert_sync or this item doesn't make sense.";
if ( err_msg == "" )
{
# Create and fill out the meta data item.
local meta: MetaData;
if ( item?$first_seen )
meta$first_seen = item$first_seen;
if ( item?$latest_seen )
meta$latest_seen = item$latest_seen;
if ( item?$tags )
meta$tags = item$tags;
if ( item?$desc )
meta$desc = item$desc;
if ( item?$url )
meta$url = item$url;
# This is hopefully only temporary until pybroccoli supports sets.
if ( item?$tag1 )
add item$tags[item$tag1];
if ( item?$tag2 )
add item$tags[item$tag2];
if ( item?$tag3 )
add item$tags[item$tag3];
if ( item?$ip )
{
if ( item$ip !in data_store$ip_data )
data_store$ip_data[item$ip] = table();
data_store$ip_data[item$ip][|data_store$ip_data[item$ip]|] = meta;
return T;
}
else if ( item?$str )
{
if ( [item$str, item$subtype] !in data_store$string_data )
data_store$string_data[item$str, item$subtype] = table();
data_store$string_data[item$str, item$subtype][|data_store$string_data[item$str, item$subtype]|] = meta;
return T;
}
else if ( item?$num )
{
if ( [item$num, item$subtype] !in data_store$int_data )
data_store$int_data[item$num, item$subtype] = table();
data_store$int_data[item$num, item$subtype][|data_store$int_data[item$num, item$subtype]|] = meta;
return T;
}
else
err_msg = "Failed to insert intelligence item for some unknown reason.";
}
if ( err_msg != "" )
Log::write(INTEL, [$ts=network_time(), $level="warn", $message=fmt(err_msg)]);
return F;
}
event insert_event(item: Item)
{
insert(item);
}
function match_item_with_metadata(item: QueryItem, meta: MetaData): bool
{
if ( item?$and_tags )
{
local matched = T;
# Every tag given has to match in a single MetaData entry.
for ( tag in item$and_tags )
{
if ( tag !in meta$tags )
matched = F;
}
if ( matched )
return T;
}
else if ( item?$or_tags )
{
# For OR tags, only a single tag has to match.
for ( tag in item$or_tags )
{
if ( tag in meta$tags )
return T;
}
}
else if ( item?$pred )
return item$pred(meta);
# This indicates some sort of failure in the query
return F;
}
function matcher(item: QueryItem): bool
{
local err_msg = "";
if ( ! (item?$ip || item?$str || item?$num) )
err_msg = "You must supply one of the $ip, $str, or $num fields to search on";
else if ( (item?$or_tags || item?$and_tags) && item?$pred )
err_msg = "You can't match with both tags and a predicate.";
else if ( item?$or_tags && item?$and_tags )
err_msg = "You can't match with both OR'd together tags and AND'd together tags";
else if ( (item?$str || item?$num) && ! item?$subtype )
err_msg = "You must provide a subtype to matcher or this item doesn't make sense.";
else if ( item?$str && item?$num )
err_msg = "You must only provide $str or $num, not both.";
local meta: MetaData;
if ( err_msg == "" )
{
if ( item?$ip )
{
if ( item$ip in data_store$ip_data )
{
if ( ! item?$and_tags && ! item?$or_tags && ! item?$pred )
return T;
for ( i in data_store$ip_data[item$ip] )
{
meta = data_store$ip_data[item$ip][i];
if ( match_item_with_metadata(item, meta) )
return T;
}
}
}
else if ( item?$str )
{
if ( [item$str, item$subtype] in data_store$string_data )
{
if ( ! item?$and_tags && ! item?$or_tags && ! item?$pred )
return T;
for ( i in data_store$string_data[item$str, item$subtype] )
{
meta = data_store$string_data[item$str, item$subtype][i];
if ( match_item_with_metadata(item, meta) )
return T;
}
}
}
else if ( item?$num )
{
if ( [item$num, item$subtype] in data_store$int_data )
{
if ( ! item?$and_tags && ! item?$or_tags && ! item?$pred )
return T;
for ( i in data_store$int_data[item$num, item$subtype] )
{
meta = data_store$int_data[item$num, item$subtype][i];
if ( match_item_with_metadata(item, meta) )
return T;
}
}
}
else
err_msg = "Failed to query intelligence data for some unknown reason.";
}
if ( err_msg != "" )
Log::write(INTEL, [$ts=network_time(), $level="error", $message=fmt(err_msg)]);
return F;
}

View file

@ -0,0 +1,3 @@
@load ./main
@load ./writers/ascii

View file

@ -0,0 +1,206 @@
##! The Bro logging interface.
##!
##! See XXX for a introduction to Bro's logging framework.
module Log;
# Log::ID and Log::Writer are defined in bro.init due to circular dependencies.
export {
## If true, is local logging is by default enabled for all filters.
const enable_local_logging = T &redef;
## If true, is remote logging is by default enabled for all filters.
const enable_remote_logging = T &redef;
## Default writer to use if a filter does not specify
## anything else.
const default_writer = WRITER_ASCII &redef;
## Type defining the content of a logging stream.
type Stream: record {
## A record type defining the log's columns.
columns: any;
## Event that will be raised once for each log entry.
## The event receives a single same parameter, an instance of type ``columns``.
ev: any &optional;
};
## Filter customizing logging.
type Filter: record {
## Descriptive name to reference this filter.
name: string;
## The writer to use.
writer: Writer &default=default_writer;
## Predicate indicating whether a log entry should be recorded.
## If not given, all entries are recorded.
##
## rec: An instance of the streams's ``columns`` type with its
## fields set to the values to logged.
##
## Returns: True if the entry is to be recorded.
pred: function(rec: any): bool &optional;
## Output path for recording entries matching this
## filter.
##
## The specific interpretation of the string is up to
## the used writer, and may for example be the destination
## file name. Generally, filenames are expected to given
## without any extensions; writers will add appropiate
## extensions automatically.
path: string &optional;
## A function returning the output path for recording entries
## matching this filter. This is similar to ``path`` yet allows
## to compute the string dynamically. It is ok to return
## different strings for separate calls, but be careful: it's
## easy to flood the disk by returning a new string for each
## connection ...
path_func: function(id: ID, path: string): string &optional;
## Subset of column names to record. If not given, all
## columns are recorded.
include: set[string] &optional;
## Subset of column names to exclude from recording. If not given,
## all columns are recorded.
exclude: set[string] &optional;
## If true, entries are recorded locally.
log_local: bool &default=enable_local_logging;
## If true, entries are passed on to remote peers.
log_remote: bool &default=enable_remote_logging;
};
# Log rotation support.
## Information passed into rotation callback functions.
type RotationInfo: record {
writer: Writer; ##< Writer.
path: string; ##< Original path value.
open: time; ##< Time when opened.
close: time; ##< Time when closed.
};
## Default rotation interval. Zero disables rotation.
const default_rotation_interval = 0secs &redef;
## Default naming suffix format. Uses a strftime() style.
const default_rotation_date_format = "%y-%m-%d_%H.%M.%S" &redef;
## Default postprocessor for writers outputting into files.
const default_rotation_postprocessor = "" &redef;
## Default function to construct the name of a rotated output file.
## The default implementation appends info$date_fmt to the original
## file name.
##
## info: Meta-data about the file to be rotated.
global default_rotation_path_func: function(info: RotationInfo) : string &redef;
## Type for controlling file rotation.
type RotationControl: record {
## Rotation interval.
interv: interval &default=default_rotation_interval;
## Format for timestamps embedded into rotated file names.
date_fmt: string &default=default_rotation_date_format;
## Postprocessor process to run on rotate file.
postprocessor: string &default=default_rotation_postprocessor;
};
## Specifies rotation parameters per ``(id, path)`` tuple.
## If a pair is not found in this table, default values defined in
## ``RotationControl`` are used.
const rotation_control: table[Writer, string] of RotationControl &default=[] &redef;
## Sentinel value for indicating that a filter was not found when looked up.
const no_filter: Filter = [$name="<not found>"]; # Sentinel.
# TODO: Document.
global create_stream: function(id: ID, stream: Stream) : bool;
global enable_stream: function(id: ID) : bool;
global disable_stream: function(id: ID) : bool;
global add_filter: function(id: ID, filter: Filter) : bool;
global remove_filter: function(id: ID, name: string) : bool;
global get_filter: function(id: ID, name: string) : Filter; # Returns no_filter if not found.
global write: function(id: ID, columns: any) : bool;
global set_buf: function(id: ID, buffered: bool): bool;
global flush: function(id: ID): bool;
global add_default_filter: function(id: ID) : bool;
global remove_default_filter: function(id: ID) : bool;
}
# We keep a script-level copy of all filters so that we can manipulate them.
global filters: table[ID, string] of Filter;
@load logging.bif.bro # Needs Filter and Stream defined.
function default_rotation_path_func(info: RotationInfo) : string
{
local date_fmt = rotation_control[info$writer, info$path]$date_fmt;
return fmt("%s-%s", info$path, strftime(date_fmt, info$open));
}
function create_stream(id: ID, stream: Stream) : bool
{
if ( ! __create_stream(id, stream) )
return F;
return add_default_filter(id);
}
function disable_stream(id: ID) : bool
{
if ( ! __disable_stream(id) )
return F;
}
function add_filter(id: ID, filter: Filter) : bool
{
filters[id, filter$name] = filter;
return __add_filter(id, filter);
}
function remove_filter(id: ID, name: string) : bool
{
delete filters[id, name];
return __remove_filter(id, name);
}
function get_filter(id: ID, name: string) : Filter
{
if ( [id, name] in filters )
return filters[id, name];
return no_filter;
}
function write(id: ID, columns: any) : bool
{
return __write(id, columns);
}
function set_buf(id: ID, buffered: bool): bool
{
return __set_buf(id, buffered);
}
function flush(id: ID): bool
{
return __flush(id);
}
function add_default_filter(id: ID) : bool
{
return add_filter(id, [$name="default"]);
}
function remove_default_filter(id: ID) : bool
{
return remove_filter(id, "default");
}

View file

@ -0,0 +1,29 @@
##! Interface for the ascii log writer.
module LogAscii;
export {
## If true, output everything to stdout rather than
## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef;
## If true, include a header line with column names.
const include_header = T &redef;
## Prefix for the header line if included.
const header_prefix = "# " &redef;
## Separator between fields.
const separator = "\t" &redef;
## Separator between set elements.
const set_separator = "," &redef;
## String to use for empty fields.
const empty_field = "-" &redef;
## String to use for an unset &optional field.
const unset_field = "-" &redef;
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,19 @@
@load frameworks/metrics
redef enum Metrics::ID += {
CONNS_ORIGINATED,
CONNS_RESPONDED
};
event bro_init()
{
Metrics::configure(CONNS_ORIGINATED, [$aggregation_mask=24, $break_interval=5mins]);
Metrics::configure(CONNS_RESPONDED, [$aggregation_mask=24, $break_interval=5mins]);
}
event connection_established(c: connection)
{
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1);
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1);
}

View file

@ -0,0 +1,20 @@
@load frameworks/metrics
redef enum Metrics::ID += {
HTTP_REQUESTS_BY_STATUS_CODE,
HTTP_REQUESTS_BY_HOST,
};
event bro_init()
{
Metrics::configure(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_mask=24, $break_interval=10secs]);
Metrics::configure(HTTP_REQUESTS_BY_HOST, [$break_interval=10secs]);
}
event HTTP::log_http(rec: HTTP::Info)
{
if ( rec?$host )
Metrics::add_data(HTTP_REQUESTS_BY_HOST, [$index=rec$host], 1);
if ( rec?$status_code )
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)], 1);
}

View file

@ -0,0 +1,142 @@
##! This is the implementation of the metrics framework
module Metrics;
export {
redef enum Log::ID += { METRICS };
type ID: enum {
ALL,
};
const default_aggregation_mask = 24 &redef;
const default_break_interval = 5mins &redef;
# TODO: configure a metrics filter logging stream to log the current
# metrics configuration in case someone is looking through
# old logs and the configuration has changed since then.
type Filter: record {
name: ID &optional;
## Global mask by which you'd like to aggregate traffic.
aggregation_mask: count &optional;
## This is essentially applying names to various subnets.
aggregation_table: table[subnet] of string &optional;
break_interval: interval &default=default_break_interval;
};
type Index: record {
## Host is the value to which this metric applies.
host: addr &optional;
## A non-address related metric or a sub-key for an address based metric.
## An example might be successful SSH connections by client IP address
## where the client string would be the index value.
## Another example might be number of HTTP requests to a particular
## value in a Host header. This is an example of a non-host based
## metric since multiple IP addresses could respond for the same Host
## header value.
index: string &default="";
};
type Info: record {
ts: time &log;
name: ID &log;
index: string &log &optional;
agg_subnet: string &log &optional;
value: count &log;
};
global add_filter: function(name: ID, filter: Filter);
global add_data: function(name: ID, index: Index, increment: count);
global log_metrics: event(rec: Info);
}
global metric_filters: table[ID] of Filter = table();
type MetricIndex: table[string] of count &default=0;
type MetricTable: table[string] of MetricIndex;
global store: table[ID] of MetricTable = table();
event bro_init()
{
Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]);
}
function reset(name: ID)
{
store[name] = table();
}
event log_it(filter: Filter)
{
# If this node is the manager in a cluster, this needs to request values
# for this metric from all of the workers.
local name = filter$name;
for ( agg_subnet in store[name] )
{
local metric_values = store[name][agg_subnet];
for ( index in metric_values )
{
local val = metric_values[index];
local m: Info = [$ts=network_time(),
$name=name,
$agg_subnet=fmt("%s", agg_subnet),
$index=index,
$value=val];
if ( index == "" )
delete m$index;
if ( agg_subnet == "" )
delete m$agg_subnet;
Log::write(METRICS, m);
}
}
reset(name);
schedule filter$break_interval { log_it(filter) };
}
function add_filter(name: ID, filter: Filter)
{
if ( filter?$aggregation_table && filter?$aggregation_mask )
{
print "INVALID Metric filter: Defined $aggregation_table and $aggregation_mask.";
return;
}
filter$name = name;
metric_filters[name] = filter;
store[name] = table();
# Only do this on the manager if in a cluster.
schedule filter$break_interval { log_it(filter) };
}
function add_data(name: ID, index: Index, increment: count)
{
local conf = metric_filters[name];
local agg_subnet = "";
if ( index?$host )
{
if ( conf?$aggregation_mask )
{
local agg_mask = conf$aggregation_mask;
agg_subnet = fmt("%s", mask_addr(index$host, agg_mask));
}
else if ( conf?$aggregation_table )
agg_subnet = fmt("%s", conf$aggregation_table[index$host]);
else
agg_subnet = fmt("%s", index$host);
}
if ( agg_subnet !in store[name] )
store[name][agg_subnet] = table();
if ( index$index !in store[name][agg_subnet] )
store[name][agg_subnet][index$index] = 0;
store[name][agg_subnet][index$index] = store[name][agg_subnet][index$index] + increment;
}

View file

@ -0,0 +1,12 @@
@load ./main
@load ./weird
# There should be no overhead imposed by loading notice actions so we
# load them all.
@load ./actions/drop
@load ./actions/email_admin
@load ./actions/page
# Load the script to add hostnames to emails by default.
# NOTE: this exposes a memleak in async DNS lookups.
#@load ./extend-email/hostnames

View file

@ -0,0 +1,34 @@
##! This script extends the built in notice code to implement the IP address
##! dropping functionality.
module Notice;
export {
redef enum Action += {
## Drops the address via Drop::drop_address, and generates an alarm.
ACTION_DROP
};
redef record Info += {
## Indicate if the $src IP address was dropped and denied network access.
dropped: bool &log &default=F;
};
}
# This is a little awkward because we want to inject drop along with the
# synchronous functions.
event bro_init()
{
local drop_func = function(n: Notice::Info)
{
if ( ACTION_DROP in n$actions )
{
#local drop = React::drop_address(n$src, "");
#local addl = drop?$sub ? fmt(" %s", drop$sub) : "";
#n$dropped = drop$note != Drop::AddressDropIgnored;
#n$msg += fmt(" [%s%s]", drop$note, addl);
}
};
add Notice::sync_functions[drop_func];
}

View file

@ -0,0 +1,28 @@
module Notice;
export {
redef enum Action += {
## Indicate that the generated email should be addressed to the
## appropriate email addresses as found in the
## :bro:id:`Site::addr_to_emails` variable based on the relevant
## address or addresses indicated in the notice.
ACTION_EMAIL_ADMIN
};
}
event notice(n: Notice::Info) &priority=-5
{
if ( |Site::local_admins| > 0 &&
ACTION_EMAIL_ADMIN in n$actions )
{
local email = "";
if ( n?$src && |Site::get_emails(n$src)| > 0 )
email = fmt("%s, %s", email, Site::get_emails(n$src));
if ( n?$dst && |Site::get_emails(n$dst)| > 0 )
email = fmt("%s, %s", email, Site::get_emails(n$dst));
if ( email != "" )
email_notice_to(n, email, T);
}
}

View file

@ -0,0 +1,19 @@
module Notice;
export {
redef enum Action += {
## Indicates that the notice should be sent to the pager email address
## configured in the :bro:id:`mail_page_dest` variable.
ACTION_PAGE
};
## Email address to send notices with the :bro:enum:`Notice::ACTION_PAGE` action.
const mail_page_dest = "" &redef;
}
event notice(n: Notice::Info) &priority=-5
{
if ( ACTION_PAGE in n$actions )
email_notice_to(n, mail_page_dest, F);
}

View file

@ -0,0 +1,36 @@
module Notice;
# This probably doesn't actually work due to the async lookup_addr.
event Notice::notice(n: Notice::Info) &priority=10
{
if ( ! n?$src && ! n?$dst )
return;
local output = "";
if ( n?$src )
{
when ( local src_name = lookup_addr(n$src) )
{
output = cat(output, "orig_h/src: ", src_name, "\n");
}
timeout 5secs
{
output = cat(output, "orig_h/src: <timeout>\n");
}
}
if ( n?$dst )
{
when ( local dst_name = lookup_addr(n$dst) )
{
output = cat(output, "resp_h/dst: ", dst_name, "\n");
}
timeout 5secs
{
output = cat(output, "resp_h/dst: <timeout>\n");
}
}
if ( output != "" )
n$email_body_sections[|n$email_body_sections|] = output;
}

View file

@ -0,0 +1,389 @@
##! This is the notice framework which enables Bro to "notice" things which
##! are odd or potentially bad. Decisions of the meaning of various notices
##! need to be done per site because Bro does not ship with assumptions about
##! what is bad activity for sites. More extensive documetation about using
##! the notice framework can be found in the documentation section of the
##! http://www.bro-ids.org/ website.
module Notice;
export {
redef enum Log::ID += {
## This is the primary logging stream for notices. It must always be
## referenced with the module name included because the name is
## also used by the global function :bro:id:`NOTICE`.
NOTICE,
## This is the notice policy auditing log. It records what the current
## notice policy is at Bro init time.
NOTICE_POLICY,
## This is the alarm stream.
ALARM,
};
## Scripts creating new notices need to redef this enum to add their own
## specific notice types which would then get used when they call the
## :bro:id:`NOTICE` function. The convention is to give a general category
## along with the specific notice separating words with underscores and using
## leading capitals on each word except for abbreviations which are kept in
## all capitals. For example, SSH::Login is for heuristically guessed
## successful SSH logins.
type Type: enum {
## Notice reporting a count of how often a notice occurred.
Tally,
};
## These are values representing actions that can be taken with notices.
type Action: enum {
## Indicates that there is no action to be taken.
ACTION_NONE,
## Indicates that the notice should be sent to the notice logging stream.
ACTION_LOG,
## Indicates that the notice should be sent to the email address(es)
## configured in the :bro:id:`Notice::mail_dest` variable.
ACTION_EMAIL,
## Indicates that the notice should be alarmed.
ACTION_ALARM,
};
type Info: record {
ts: time &log &optional;
uid: string &log &optional;
id: conn_id &log &optional;
## These are shorthand ways of giving the uid and id to a notice. The
## reference to the actual connection will be deleted after applying
## the notice policy.
conn: connection &optional;
iconn: icmp_conn &optional;
## The :bro:enum:`Notice::Type` of the notice.
note: Type &log;
## The human readable message for the notice.
msg: string &log &optional;
## The human readable sub-message.
sub: string &log &optional;
## Source address, if we don't have a :bro:type:`conn_id`.
src: addr &log &optional;
## Destination address.
dst: addr &log &optional;
## Associated port, if we don't have a :bro:type:`conn_id`.
p: port &log &optional;
## Associated count, or perhaps a status code.
n: count &log &optional;
## Peer that raised this notice.
src_peer: event_peer &optional;
## Textual description for the peer that raised this notice.
peer_descr: string &log &optional;
## The actions which have been applied to this notice.
actions: set[Notice::Action] &log &optional;
## These are policy items that returned T and applied their action
## to the notice.
## TODO: this can't take set() as a default. (bug)
policy_items: set[count] &log &optional;
## By adding chunks of text into this element, other scripts can
## expand on notices that are being emailed. The normal way to add text
## is to extend the vector by handling the :bro:id:`Notice::notice`
## event and modifying the notice in place.
email_body_sections: vector of string &default=vector();
};
## Ignored notice types.
const ignored_types: set[Notice::Type] = {} &redef;
## Emailed notice types.
const emailed_types: set[Notice::Type] = {} &redef;
## Alarmed notice types.
const alarmed_types: set[Notice::Type] = {} &redef;
## This is the record that defines the items that make up the notice policy.
type PolicyItem: record {
## This is the exact positional order in which the :id:type:`PolicyItem`
## records are checked. This is set internally by the notice framework.
position: count &log &optional;
## Define the priority for this check. Items are checked in ordered
## from highest value (10) to lowest value (0).
priority: count &log &default=5;
## An action given to the notice if the predicate return true.
result: Notice::Action &log &default=ACTION_NONE;
## The pred (predicate) field is a function that returns a boolean T
## or F value. If the predicate function return true, the action in
## this record is applied to the notice that is given as an argument
## to the predicate function.
pred: function(n: Notice::Info): bool;
## Indicates this item should terminate policy processing if the
## predicate returns T.
halt: bool &log &default=F;
};
## This is the where the :bro:id:`Notice::policy` is defined. All notice
## processing is done through this variable.
const policy: set[PolicyItem] = {
[$pred(n: Notice::Info) = { return (n$note in Notice::ignored_types); },
$halt=T, $priority = 9],
[$pred(n: Notice::Info) = { return (n$note in Notice::alarmed_types); },
$priority = 8],
[$pred(n: Notice::Info) = { return (n$note in Notice::emailed_types); },
$result = ACTION_EMAIL,
$priority = 8],
[$pred(n: Notice::Info) = { return T; },
$result = ACTION_LOG,
$priority = 0],
} &redef;
## Local system sendmail program.
const sendmail = "/usr/sbin/sendmail" &redef;
## Email address to send notices with the :bro:enum:`ACTION_EMAIL` action.
const mail_dest = "" &redef;
## Address that emails will be from.
const mail_from = "Big Brother <bro@localhost>" &redef;
## Reply-to address used in outbound email.
const reply_to = "" &redef;
## Text string prefixed to the subject of all emails sent out.
const mail_subject_prefix = "[Bro]" &redef;
## This is the event that is called as the entry point to the
## notice framework by the global :bro:id:`NOTICE` function. By the time
## this event is generated, default values have already been filled out in
## the :bro:type:`Notice::Info` record and synchronous functions in the
## :bro:id:`Notice:sync_functions` have already been called. The notice
## policy has also been applied.
global notice: event(n: Info);
## This is a set of functions that provide a synchronous way for scripts
## extending the notice framework to run before the normal event based
## notice pathway that most of the notice framework takes. This is helpful
## in cases where an action against a notice needs to happen immediately
## and can't wait the short time for the event to bubble up to the top of
## the event queue. An example is the IP address dropping script that
## can block IP addresses that have notices generated because it
## needs to operate closer to real time than the event queue allows it to.
## Normally the event based extension model using the
## :bro:id:`Notice::notice` event will work fine if there aren't harder
## real time constraints.
const sync_functions: set[function(n: Notice::Info)] = set() &redef;
## Call this function to send a notice in an email. It is already used
## by default with the built in :bro:enum:`ACTION_EMAIL` and
## :bro:enum:`ACTION_PAGE` actions.
global email_notice_to: function(n: Info, dest: string, extend: bool);
## This is an internally used function, please ignore it. It's only used
## for filling out missing details of :bro:type:`Notice:Info` records
## before the synchronous and asynchronous event pathways have begun.
global apply_policy: function(n: Notice::Info);
## This event can be handled to access the :bro:type:`Info`
## record as it is sent on to the logging framework.
global log_notice: event(rec: Info);
}
# This is an internal variable used to store the notice policy ordered by
# priority.
global ordered_policy: vector of PolicyItem = vector();
event bro_init()
{
Log::create_stream(NOTICE_POLICY, [$columns=PolicyItem]);
Log::create_stream(Notice::NOTICE, [$columns=Info, $ev=log_notice]);
Log::create_stream(ALARM, [$columns=Notice::Info]);
# Make sure that this log is output as text so that it can be packaged
# up and emailed later.
Log::add_filter(ALARM, [$name="default", $writer=Log::WRITER_ASCII]);
}
# TODO: need a way to call a Bro script level callback during file rotation.
# we need more than a just $postprocessor.
#redef Log::rotation_control += {
# [Log::WRITER_ASCII, "alarm"] = [$postprocessor="mail-alarms"];
#};
# TODO: fix this.
#function notice_tags(n: Notice::Info) : table[string] of string
# {
# local tgs: table[string] of string = table();
# if ( is_remote_event() )
# {
# if ( n$src_peer$descr != "" )
# tgs["es"] = n$src_peer$descr;
# else
# tgs["es"] = fmt("%s/%s", n$src_peer$host, n$src_peer$p);
# }
# else
# {
# tgs["es"] = peer_description;
# }
# return tgs;
# }
function email_notice_to(n: Notice::Info, dest: string, extend: bool)
{
if ( reading_traces() || dest == "" )
return;
local email_text = string_cat(
"From: ", mail_from, "\n",
"Subject: ", mail_subject_prefix, " ", fmt("%s", n$note), "\n",
"To: ", dest, "\n",
# TODO: BiF to get version (the resource_usage Bif seems like overkill).
"User-Agent: Bro-IDS/?.?.?\n");
if ( reply_to != "" )
email_text = string_cat(email_text, "Reply-To: ", reply_to, "\n");
# The notice emails always start off with the human readable message.
email_text = string_cat(email_text, "\n", n$msg, "\n");
# Add the extended information if it's requested.
if ( extend )
{
for ( i in n$email_body_sections )
{
email_text = string_cat(email_text, "******************\n");
email_text = string_cat(email_text, n$email_body_sections[i], "\n");
}
}
email_text = string_cat(email_text, "\n\n--\n[Automatically generated]\n\n");
piped_exec(fmt("%s -t -oi", sendmail), email_text);
}
event notice(n: Notice::Info) &priority=-5
{
if ( ACTION_EMAIL in n$actions )
email_notice_to(n, mail_dest, T);
if ( ACTION_LOG in n$actions )
Log::write(Notice::NOTICE, n);
if ( ACTION_ALARM in n$actions )
Log::write(ALARM, n);
}
# Executes a script with all of the notice fields put into the
# new process' environment as "BRO_ARG_<field>" variables.
function execute_with_notice(cmd: string, n: Notice::Info)
{
# TODO: fix system calls
#local tgs = tags(n);
#system_env(cmd, tags);
}
# This is run synchronously as a function before all of the other
# notice related functions and events. It also modifies the
# :bro:type:`Notice::Info` record in place.
function apply_policy(n: Notice::Info)
{
# Fill in some defaults.
if ( ! n?$ts )
n$ts = network_time();
if ( n?$conn )
{
if ( ! n?$id )
n$id = n$conn$id;
if ( ! n?$uid )
n$uid = n$conn$uid;
}
if ( n?$id )
{
if ( ! n?$src )
n$src = n$id$orig_h;
if ( ! n?$dst )
n$dst = n$id$resp_h;
if ( ! n?$p )
n$p = n$id$resp_p;
}
if ( n?$iconn )
{
if ( ! n?$src )
n$src = n$iconn$orig_h;
if ( ! n?$dst )
n$dst = n$iconn$resp_h;
}
if ( ! n?$src_peer )
n$src_peer = get_event_peer();
n$peer_descr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host);
if ( ! n?$actions )
n$actions = set();
if ( ! n?$policy_items )
n$policy_items = set();
for ( i in ordered_policy )
{
if ( ordered_policy[i]$pred(n) )
{
add n$actions[ordered_policy[i]$result];
add n$policy_items[int_to_count(i)];
# If the policy item wants to halt policy processing, do it now!
if ( ordered_policy[i]$halt )
break;
}
}
# Delete the connection record if it's there so we aren't sending that
# to remote machines. It can cause problems due to the size of the
# connection record.
if ( n?$conn )
delete n$conn;
if ( n?$iconn )
delete n$iconn;
}
# Create the ordered notice policy automatically which will be used at runtime
# for prioritized matching of the notice policy.
event bro_init()
{
local tmp: table[count] of set[PolicyItem] = table();
for ( pi in policy )
{
if ( pi$priority < 0 || pi$priority > 10 )
{
print "All Notice::PolicyItem priorities must be within 0 and 10";
exit();
}
if ( pi$priority !in tmp )
tmp[pi$priority] = set();
add tmp[pi$priority][pi];
}
local rev_count = vector(10,9,8,7,6,5,4,3,2,1,0);
for ( i in rev_count )
{
local j = rev_count[i];
if ( j in tmp )
{
for ( pi in tmp[j] )
{
pi$position = |ordered_policy|;
ordered_policy[|ordered_policy|] = pi;
Log::write(NOTICE_POLICY, pi);
}
}
}
}
module GLOBAL;
## This is the entry point in the global namespace for notice framework.
function NOTICE(n: Notice::Info)
{
# Fill out fields that might be empty and do the policy processing.
Notice::apply_policy(n);
# Run the synchronous functions with the notice.
for ( func in Notice::sync_functions )
func(n);
# Generate the notice event with the notice.
event Notice::notice(n);
}

View file

@ -0,0 +1,428 @@
module Weird;
export {
redef enum Log::ID += { WEIRD };
redef enum Notice::Type += {
## Generic unusual but alarm-worthy activity.
Weird_Activity,
## Possible evasion; usually just chud.
Retransmission_Inconsistency,
## Could mean packet drop; could also be chud.
Ack_Above_Hole,
## Data has sequence hole; perhaps due to filtering.
Content_Gap,
};
type Info: record {
ts: time &log;
uid: string &log &optional;
id: conn_id &log &optional;
msg: string &log;
addl: string &log &optional;
notice: bool &log &default=F;
};
type WeirdAction: enum {
WEIRD_UNSPECIFIED, WEIRD_IGNORE, WEIRD_FILE,
WEIRD_NOTICE_ALWAYS, WEIRD_NOTICE_PER_CONN,
WEIRD_NOTICE_PER_ORIG, WEIRD_NOTICE_ONCE,
};
# Which of the above actions lead to logging. For internal use.
const notice_actions = {
WEIRD_NOTICE_ALWAYS, WEIRD_NOTICE_PER_CONN,
WEIRD_NOTICE_PER_ORIG, WEIRD_NOTICE_ONCE,
};
const weird_action: table[string] of WeirdAction = {
# tcp_weird
["above_hole_data_without_any_acks"] = WEIRD_FILE,
["active_connection_reuse"] = WEIRD_FILE,
["bad_HTTP_reply"] = WEIRD_FILE,
["bad_HTTP_version"] = WEIRD_FILE,
["bad_ICMP_checksum"] = WEIRD_FILE,
["bad_ident_port"] = WEIRD_FILE,
["bad_ident_reply"] = WEIRD_FILE,
["bad_ident_request"] = WEIRD_FILE,
["bad_rlogin_prolog"] = WEIRD_FILE,
["bad_rsh_prolog"] = WEIRD_FILE,
["rsh_text_after_rejected"] = WEIRD_FILE,
["bad_RPC"] = WEIRD_NOTICE_PER_ORIG,
["bad_RPC_program"] = WEIRD_FILE,
["bad_SYN_ack"] = WEIRD_FILE,
["bad_TCP_checksum"] = WEIRD_FILE,
["bad_UDP_checksum"] = WEIRD_FILE,
["baroque_SYN"] = WEIRD_FILE,
["base64_illegal_encoding"] = WEIRD_FILE,
["connection_originator_SYN_ack"] = WEIRD_FILE,
["corrupt_tcp_options"] = WEIRD_NOTICE_PER_ORIG,
["crud_trailing_HTTP_request"] = WEIRD_FILE,
["data_after_reset"] = WEIRD_FILE,
["data_before_established"] = WEIRD_FILE,
["data_without_SYN_ACK"] = WEIRD_FILE,
["DHCP_no_type_option"] = WEIRD_FILE,
["DHCP_wrong_msg_type"] = WEIRD_FILE,
["DHCP_wrong_op_type"] = WEIRD_FILE,
["DNS_AAAA_neg_length"] = WEIRD_FILE,
["DNS_Conn_count_too_large"] = WEIRD_FILE,
["DNS_NAME_too_long"] = WEIRD_FILE,
["DNS_RR_bad_length"] = WEIRD_FILE,
["DNS_RR_length_mismatch"] = WEIRD_FILE,
["DNS_RR_unknown_type"] = WEIRD_FILE,
["DNS_label_forward_compress_offset"] = WEIRD_NOTICE_PER_ORIG,
["DNS_label_len_gt_name_len"] = WEIRD_NOTICE_PER_ORIG,
["DNS_label_len_gt_pkt"] = WEIRD_NOTICE_PER_ORIG,
["DNS_label_too_long"] = WEIRD_NOTICE_PER_ORIG,
["DNS_truncated_RR_rdlength_lt_len"] = WEIRD_FILE,
["DNS_truncated_ans_too_short"] = WEIRD_FILE,
["DNS_truncated_len_lt_hdr_len"] = WEIRD_FILE,
["DNS_truncated_quest_too_short"] = WEIRD_FILE,
["excessive_data_without_further_acks"] = WEIRD_FILE,
["excess_RPC"] = WEIRD_NOTICE_PER_ORIG,
["excessive_RPC_len"] = WEIRD_NOTICE_PER_ORIG,
["FIN_advanced_last_seq"] = WEIRD_FILE,
["FIN_after_reset"] = WEIRD_IGNORE,
["FIN_storm"] = WEIRD_NOTICE_ALWAYS,
["HTTP_bad_chunk_size"] = WEIRD_FILE,
["HTTP_chunked_transfer_for_multipart_message"] = WEIRD_FILE,
["HTTP_overlapping_messages"] = WEIRD_FILE,
["HTTP_unknown_method"] = WEIRD_FILE,
["HTTP_version_mismatch"] = WEIRD_FILE,
["ident_request_addendum"] = WEIRD_FILE,
["inappropriate_FIN"] = WEIRD_FILE,
["inflate_data_failed"] = WEIRD_FILE,
["inflate_failed"] = WEIRD_FILE,
["invalid_irc_global_users_reply"] = WEIRD_FILE,
["irc_invalid_command"] = WEIRD_FILE,
["irc_invalid_dcc_message_format"] = WEIRD_FILE,
["irc_invalid_invite_message_format"] = WEIRD_FILE,
["irc_invalid_join_line"] = WEIRD_FILE,
["irc_invalid_kick_message_format"] = WEIRD_FILE,
["irc_invalid_line"] = WEIRD_FILE,
["irc_invalid_mode_message_format"] = WEIRD_FILE,
["irc_invalid_names_line"] = WEIRD_FILE,
["irc_invalid_njoin_line"] = WEIRD_FILE,
["irc_invalid_notice_message_format"] = WEIRD_FILE,
["irc_invalid_oper_message_format"] = WEIRD_FILE,
["irc_invalid_privmsg_message_format"] = WEIRD_FILE,
["irc_invalid_reply_number"] = WEIRD_FILE,
["irc_invalid_squery_message_format"] = WEIRD_FILE,
["irc_invalid_topic_reply"] = WEIRD_FILE,
["irc_invalid_who_line"] = WEIRD_FILE,
["irc_invalid_who_message_format"] = WEIRD_FILE,
["irc_invalid_whois_channel_line"] = WEIRD_FILE,
["irc_invalid_whois_message_format"] = WEIRD_FILE,
["irc_invalid_whois_operator_line"] = WEIRD_FILE,
["irc_invalid_whois_user_line"] = WEIRD_FILE,
["irc_line_size_exceeded"] = WEIRD_FILE,
["irc_line_too_short"] = WEIRD_FILE,
["irc_too_many_invalid"] = WEIRD_FILE,
["line_terminated_with_single_CR"] = WEIRD_FILE,
["line_terminated_with_single_LF"] = WEIRD_FILE,
["malformed_ssh_identification"] = WEIRD_FILE,
["malformed_ssh_version"] = WEIRD_FILE,
["matching_undelivered_data"] = WEIRD_FILE,
["multiple_HTTP_request_elements"] = WEIRD_FILE,
["multiple_RPCs"] = WEIRD_NOTICE_PER_ORIG,
["non_IPv4_packet"] = WEIRD_NOTICE_ONCE,
["NUL_in_line"] = WEIRD_FILE,
["originator_RPC_reply"] = WEIRD_NOTICE_PER_ORIG,
["partial_finger_request"] = WEIRD_FILE,
["partial_ftp_request"] = WEIRD_FILE,
["partial_ident_request"] = WEIRD_FILE,
["partial_RPC"] = WEIRD_NOTICE_PER_ORIG,
["partial_RPC_request"] = WEIRD_FILE,
["pending_data_when_closed"] = WEIRD_FILE,
["pop3_bad_base64_encoding"] = WEIRD_FILE,
["pop3_client_command_unknown"] = WEIRD_FILE,
["pop3_client_sending_server_commands"] = WEIRD_FILE,
["pop3_malformed_auth_plain"] = WEIRD_FILE,
["pop3_server_command_unknown"] = WEIRD_FILE,
["pop3_server_sending_client_commands"] = WEIRD_FILE,
["possible_split_routing"] = WEIRD_FILE,
["premature_connection_reuse"] = WEIRD_FILE,
["repeated_SYN_reply_wo_ack"] = WEIRD_FILE,
["repeated_SYN_with_ack"] = WEIRD_FILE,
["responder_RPC_call"] = WEIRD_NOTICE_PER_ORIG,
["rlogin_text_after_rejected"] = WEIRD_FILE,
["RPC_rexmit_inconsistency"] = WEIRD_FILE,
["RPC_underflow"] = WEIRD_FILE,
["RST_storm"] = WEIRD_NOTICE_ALWAYS,
["RST_with_data"] = WEIRD_FILE, # PC's do this
["simultaneous_open"] = WEIRD_NOTICE_PER_CONN,
["spontaneous_FIN"] = WEIRD_IGNORE,
["spontaneous_RST"] = WEIRD_IGNORE,
["SMB_parsing_error"] = WEIRD_FILE,
["no_smb_session_using_parsesambamsg"] = WEIRD_FILE,
["smb_andx_command_failed_to_parse"] = WEIRD_FILE,
["transaction_subcmd_missing"] = WEIRD_FILE,
["SSLv3_data_without_full_handshake"] = WEIRD_FILE,
["unexpected_SSLv3_record"] = WEIRD_FILE,
["successful_RPC_reply_to_invalid_request"] = WEIRD_NOTICE_PER_ORIG,
["SYN_after_close"] = WEIRD_FILE,
["SYN_after_partial"] = WEIRD_NOTICE_PER_ORIG,
["SYN_after_reset"] = WEIRD_FILE,
["SYN_inside_connection"] = WEIRD_FILE,
["SYN_seq_jump"] = WEIRD_FILE,
["SYN_with_data"] = WEIRD_FILE,
["TCP_christmas"] = WEIRD_FILE,
["truncated_ARP"] = WEIRD_FILE,
["truncated_NTP"] = WEIRD_FILE,
["UDP_datagram_length_mismatch"] = WEIRD_NOTICE_PER_ORIG,
["unexpected_client_HTTP_data"] = WEIRD_FILE,
["unexpected_multiple_HTTP_requests"] = WEIRD_FILE,
["unexpected_server_HTTP_data"] = WEIRD_FILE,
["unmatched_HTTP_reply"] = WEIRD_FILE,
["unpaired_RPC_response"] = WEIRD_FILE,
["unsolicited_SYN_response"] = WEIRD_IGNORE,
["window_recision"] = WEIRD_FILE,
["double_%_in_URI"] = WEIRD_FILE,
["illegal_%_at_end_of_URI"] = WEIRD_FILE,
["unescaped_%_in_URI"] = WEIRD_FILE,
["unescaped_special_URI_char"] = WEIRD_FILE,
["UDP_zone_transfer"] = WEIRD_NOTICE_ONCE,
["deficit_netbios_hdr_len"] = WEIRD_FILE,
["excess_netbios_hdr_len"] = WEIRD_FILE,
["netbios_client_session_reply"] = WEIRD_FILE,
["netbios_raw_session_msg"] = WEIRD_FILE,
["netbios_server_session_request"] = WEIRD_FILE,
["unknown_netbios_type"] = WEIRD_FILE,
# flow_weird
["excessively_large_fragment"] = WEIRD_NOTICE_ALWAYS,
# Code Red generates slews ...
["excessively_small_fragment"] = WEIRD_NOTICE_PER_ORIG,
["fragment_inconsistency"] = WEIRD_NOTICE_PER_ORIG,
["fragment_overlap"] = WEIRD_NOTICE_PER_ORIG,
["fragment_protocol_inconsistency"] = WEIRD_NOTICE_ALWAYS,
["fragment_size_inconsistency"] = WEIRD_NOTICE_PER_ORIG,
["fragment_with_DF"] = WEIRD_FILE, # these do indeed happen!
["incompletely_captured_fragment"] = WEIRD_NOTICE_ALWAYS,
# net_weird
["bad_IP_checksum"] = WEIRD_FILE,
["bad_TCP_header_len"] = WEIRD_FILE,
["internally_truncated_header"] = WEIRD_NOTICE_ALWAYS,
["truncated_IP"] = WEIRD_FILE,
["truncated_header"] = WEIRD_FILE,
# generated by policy script
["Land_attack"] = WEIRD_NOTICE_PER_ORIG,
["bad_pm_port"] = WEIRD_NOTICE_PER_ORIG,
["ICMP-unreachable for wrong state"] = WEIRD_NOTICE_PER_ORIG,
} &redef;
# table that maps weird types into a function that should be called
# to determine the action.
const weird_action_filters:
table[string] of function(c: connection): WeirdAction &redef;
const weird_ignore_host: set[addr, string] &redef;
# But don't ignore these (for the weird file), it's handy keeping
# track of clustered checksum errors.
const weird_do_not_ignore_repeats = {
"bad_IP_checksum", "bad_TCP_checksum", "bad_UDP_checksum",
"bad_ICMP_checksum",
} &redef;
global log_weird: event(rec: Info);
}
# id/msg pairs that should be ignored (because the problem has already
# been reported).
global weird_ignore: table[string] of set[string] &write_expire = 10 min;
# For WEIRD_NOTICE_PER_CONN.
global did_notice_conn: set[addr, port, addr, port, string]
&read_expire = 1 day;
# For WEIRD_NOTICE_PER_ORIG.
global did_notice_orig: set[addr, string] &read_expire = 1 day;
# For WEIRD_NOTICE_ONCE.
global did_weird_log: set[string] &read_expire = 1 day;
global did_inconsistency_msg: set[conn_id];
# Used to pass the optional connection into report_weird().
global current_conn: connection;
event bro_init()
{
Log::create_stream(WEIRD, [$columns=Info, $ev=log_weird]);
}
function report_weird(t: time, name: string, id: string, have_conn: bool,
addl: string, action: WeirdAction, no_log: bool)
{
local info: Info;
info$ts = t;
info$msg = name;
if ( addl != "" )
info$addl = addl;
if ( have_conn )
{
info$uid = current_conn$uid;
info$id = current_conn$id;
}
if ( action == WEIRD_IGNORE ||
(id in weird_ignore && name in weird_ignore[id]) )
return;
if ( action == WEIRD_UNSPECIFIED )
{
if ( name in weird_action && weird_action[name] == WEIRD_IGNORE )
return;
else
{
action = WEIRD_NOTICE_ALWAYS;
info$notice = T;
}
}
if ( action in notice_actions && ! no_log )
{
local n: Notice::Info;
n$note = Weird_Activity;
n$msg = info$msg;
if ( have_conn )
n$conn = current_conn;
if ( info?$addl )
n$sub = info$addl;
NOTICE(n);
}
else if ( id != "" && name !in weird_do_not_ignore_repeats )
{
if ( id !in weird_ignore )
weird_ignore[id] = set() &mergeable;
add weird_ignore[id][name];
}
Log::write(WEIRD, info);
}
function report_weird_conn(t: time, name: string, id: string, addl: string,
c: connection)
{
if ( [c$id$orig_h, name] in weird_ignore_host ||
[c$id$resp_h, name] in weird_ignore_host )
return;
local no_log = F;
local action = WEIRD_UNSPECIFIED;
if ( name in weird_action )
{
if ( name in weird_action_filters )
action = weird_action_filters[name](c);
if ( action == WEIRD_UNSPECIFIED )
action = weird_action[name];
local cid = c$id;
if ( action == WEIRD_NOTICE_PER_CONN )
{
if ( [cid$orig_h, cid$orig_p, cid$resp_h, cid$resp_p, name] in did_notice_conn )
no_log = T;
else
add did_notice_conn[cid$orig_h, cid$orig_p, cid$resp_h, cid$resp_p, name];
}
else if ( action == WEIRD_NOTICE_PER_ORIG )
{
if ( [c$id$orig_h, name] in did_notice_orig )
no_log = T;
else
add did_notice_orig[c$id$orig_h, name];
}
else if ( action == WEIRD_NOTICE_ONCE )
{
if ( name in did_weird_log )
no_log = T;
else
add did_weird_log[name];
}
}
current_conn = c;
report_weird(t, name, id, T, addl, action, no_log);
}
function report_weird_orig(t: time, name: string, id: string, orig: addr)
{
local no_log = F;
local action = WEIRD_UNSPECIFIED;
if ( name in weird_action )
{
action = weird_action[name];
if ( action == WEIRD_NOTICE_PER_ORIG )
{
if ( [orig, name] in did_notice_orig )
no_log = T;
else
add did_notice_orig[orig, name];
}
}
report_weird(t, name, id, F, "", action, no_log);
}
event conn_weird(name: string, c: connection, addl: string)
{
report_weird_conn(network_time(), name, id_string(c$id), addl, c);
}
event flow_weird(name: string, src: addr, dst: addr)
{
report_weird_orig(network_time(), name, fmt("%s -> %s", src, dst), src);
}
event net_weird(name: string)
{
report_weird(network_time(), name, "", F, "", WEIRD_UNSPECIFIED, F);
}
event rexmit_inconsistency(c: connection, t1: string, t2: string)
{
if ( c$id !in did_inconsistency_msg )
{
NOTICE([$note=Retransmission_Inconsistency,
$conn=c,
$msg=fmt("%s rexmit inconsistency (%s) (%s)",
id_string(c$id), t1, t2)]);
add did_inconsistency_msg[c$id];
}
}
event ack_above_hole(c: connection)
{
NOTICE([$note=Ack_Above_Hole, $conn=c,
$msg=fmt("%s ack above a hole", id_string(c$id))]);
}
event content_gap(c: connection, is_orig: bool, seq: count, length: count)
{
NOTICE([$note=Content_Gap, $conn=c,
$msg=fmt("%s content gap (%s %d/%d)%s",
id_string(c$id), is_orig ? ">" : "<", seq, length,
is_external_connection(c) ? " [external]" : "")]);
}
event connection_state_remove(c: connection)
{
delete weird_ignore[id_string(c$id)];
delete did_inconsistency_msg[c$id];
}

View file

@ -0,0 +1,2 @@
@load ./main
@load ./netstats

View file

@ -0,0 +1,152 @@
##! This script supports how Bro sets it's BPF capture filter. By default
##! Bro sets an unrestricted filter that allows all traffic. If a filter
##! is set on the command line, that filter takes precedence over the default
##! open filter and all filters defined in Bro scripts with the
##! :bro:id:`capture_filters` and :bro:id:`restrict_filters` variables.
module PacketFilter;
export {
redef enum Log::ID += { PACKET_FILTER };
redef enum Notice::Type += {
## This notice is generated if a packet filter is unable to be compiled.
Compile_Failure,
## This notice is generated if a packet filter is unable to be installed.
Install_Failure,
};
type Info: record {
ts: time &log;
## This is a string representation of the node that applied this
## packet filter. It's mostly useful in the context of dynamically
## changing filters on clusters.
node: string &log &optional;
## The packet filter that is being set.
filter: string &log;
## Indicate if this is the filter set during initialization.
init: bool &log &default=F;
## Indicate if the filter was applied successfully.
success: bool &log &default=T;
};
## By default, Bro will examine all packets. If this is set to false,
## it will dynamically build a BPF filter that only select protocols
## for which the user has loaded a corresponding analysis script.
## The latter used to be default for Bro versions < 1.6. That has now
## changed however to enable port-independent protocol analysis.
const all_packets = T &redef;
## Filter string which is unconditionally or'ed to the beginning of every
## dynamically built filter.
const unrestricted_filter = "" &redef;
## Call this function to build and install a new dynamically built
## packet filter.
global install: function();
## This is where the default packet filter is stored and it should not
## normally be modified by users.
global default_filter = "<not set yet>";
}
redef enum PcapFilterID += {
DefaultPcapFilter,
};
function combine_filters(lfilter: string, rfilter: string, op: string): string
{
if ( lfilter == "" && rfilter == "" )
return "";
else if ( lfilter == "" )
return rfilter;
else if ( rfilter == "" )
return lfilter;
else
return fmt("(%s) %s (%s)", lfilter, op, rfilter);
}
function build_default_filter(): string
{
if ( cmd_line_bpf_filter != "" )
# Return what the user specified on the command line;
return cmd_line_bpf_filter;
if ( all_packets )
{
# Return an "always true" filter.
if ( bro_has_ipv6() )
return "ip or not ip";
else
return "not ip6";
}
# Build filter dynamically.
# First the capture_filter.
local cfilter = "";
for ( id in capture_filters )
cfilter = combine_filters(cfilter, capture_filters[id], "or");
# Then the restrict_filter.
local rfilter = "";
for ( id in restrict_filters )
rfilter = combine_filters(rfilter, restrict_filters[id], "and");
# Finally, join them into one filter.
local filter = combine_filters(rfilter, cfilter, "and");
if ( unrestricted_filter != "" )
filter = combine_filters(unrestricted_filter, filter, "or");
# Exclude IPv6 if we don't support it.
if ( ! bro_has_ipv6() )
filter = combine_filters(filter, "not ip6", "and");
return filter;
}
function install()
{
default_filter = build_default_filter();
if ( ! precompile_pcap_filter(DefaultPcapFilter, default_filter) )
{
NOTICE([$note=Compile_Failure,
$msg=fmt("Compiling packet filter failed"),
$sub=default_filter]);
exit();
}
# Do an audit log for the packet filter.
local info: Info;
info$ts = network_time();
# If network_time() is 0.0 we're at init time so use the wall clock.
if ( info$ts == 0.0 )
{
info$ts = current_time();
info$init = T;
}
info$filter = default_filter;
if ( ! install_pcap_filter(DefaultPcapFilter) )
{
# Installing the filter failed for some reason.
info$success = F;
NOTICE([$note=Install_Failure,
$msg=fmt("Installing packet filter failed"),
$sub=default_filter]);
}
Log::write(PACKET_FILTER, info);
}
event bro_init() &priority=10
{
Log::create_stream(PACKET_FILTER, [$columns=Info]);
PacketFilter::install();
}

View file

@ -0,0 +1,38 @@
##! This script reports on packet loss from the various packet sources.
module PacketFilter;
export {
redef enum Notice::Type += {
## Bro reported packets dropped by the packet filter.
Dropped_Packets,
};
## This is the interval between individual statistics collection.
const stats_collection_interval = 10secs;
}
event net_stats_update(last_stat: NetStats)
{
local ns = net_stats();
local new_dropped = ns$pkts_dropped - last_stat$pkts_dropped;
if ( new_dropped > 0 )
{
local new_recvd = ns$pkts_recvd - last_stat$pkts_recvd;
local new_link = ns$pkts_link - last_stat$pkts_link;
NOTICE([$note=Dropped_Packets,
$msg=fmt("%d packets dropped after filtering, %d received%s",
new_dropped, new_recvd + new_dropped,
new_link != 0 ? fmt(", %d on link", new_link) : "")]);
}
schedule stats_collection_interval { net_stats_update(ns) };
}
event bro_init()
{
# Since this currently only calculates packet drops, let's skip the stats
# collection if reading traces.
if ( ! reading_traces() )
schedule stats_collection_interval { net_stats_update(net_stats()) };
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,40 @@
##! This framework is intended to create an output and filtering path for
##! internal messages/warnings/errors. It should typically be loaded to
##! avoid Bro spewing internal messages to standard error.
module Reporter;
export {
redef enum Log::ID += { REPORTER };
type Level: enum { INFO, WARNING, ERROR };
type Info: record {
ts: time &log;
level: Level &log;
message: string &log;
## This is the location in a Bro script where the message originated.
## Not all reporter messages will have locations in them though.
location: string &log &optional;
};
}
event bro_init()
{
Log::create_stream(REPORTER, [$columns=Info]);
}
event reporter_info(t: time, msg: string, location: string)
{
Log::write(REPORTER, [$ts=t, $level=INFO, $message=msg, $location=location]);
}
event reporter_warning(t: time, msg: string, location: string)
{
Log::write(REPORTER, [$ts=t, $level=WARNING, $message=msg, $location=location]);
}
event reporter_error(t: time, msg: string, location: string)
{
Log::write(REPORTER, [$ts=t, $level=ERROR, $message=msg, $location=location]);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,282 @@
##! Script level signature support.
module Signatures;
export {
redef enum Notice::Type += {
## Generic for alarm-worthy
Sensitive_Signature,
## Host has triggered many signatures on the same host. The number of
## signatures is defined by the :bro:id:`vert_scan_thresholds` variable.
Multiple_Signatures,
## Host has triggered the same signature on multiple hosts as defined by the
## :bro:id:`horiz_scan_thresholds` variable.
Multiple_Sig_Responders,
## The same signature has triggered multiple times for a host. The number
## of times the signature has be trigger is defined by the
## :bro:id:`count_thresholds` variable. To generate this notice, the
## :bro:enum:`SIG_COUNT_PER_RESP` action must be set for the signature.
Count_Signature,
## Summarize the number of times a host triggered a signature. The
## interval between summaries is defined by the :bro:id:`summary_interval`
## variable.
Signature_Summary,
};
redef enum Log::ID += { SIGNATURES };
## These are the default actions you can apply to signature matches.
## All of them write the signature record to the logging stream unless
## declared otherwise.
type Action: enum {
## Ignore this signature completely (even for scan detection). Don't
## write to the signatures logging stream.
SIG_IGNORE,
## Process through the various aggregate techniques, but don't report
## individually and don't write to the signatures logging stream.
SIG_QUIET,
## Generate a notice.
SIG_LOG,
## The same as :bro:enum:`SIG_FILE`, but ignore for aggregate/scan
## processing.
SIG_FILE_BUT_NO_SCAN,
## Generate a notice and set it to be alarmed upon.
SIG_ALARM,
## Alarm once per originator.
SIG_ALARM_PER_ORIG,
## Alarm once and then never again.
SIG_ALARM_ONCE,
## Count signatures per responder host and alarm with the
## :bro:enum:`Count_Signature` notice if a threshold defined by
## :bro:id:`count_thresholds` is reached.
SIG_COUNT_PER_RESP,
## Don't alarm, but generate per-orig summary.
SIG_SUMMARY,
};
type Info: record {
ts: time &log;
src_addr: addr &log &optional;
src_port: port &log &optional;
dst_addr: addr &log &optional;
dst_port: port &log &optional;
## Notice associated with signature event
note: Notice::Type &log;
sig_id: string &log &optional;
event_msg: string &log &optional;
## Extracted payload data or extra message.
sub_msg: string &log &optional;
## Number of sigs, usually from summary count.
sig_count: count &log &optional;
## Number of hosts, from a summary count.
host_count: count &log &optional;
};
## Actions for a signature.
const actions: table[string] of Action = {
["unspecified"] = SIG_IGNORE, # place-holder
} &redef &default = SIG_ALARM;
## Signature IDs that should always be ignored.
const ignored_ids = /NO_DEFAULT_MATCHES/ &redef;
## Alarm if, for a pair [orig, signature], the number of different
## responders has reached one of the thresholds.
const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef;
## Alarm if, for a pair [orig, resp], the number of different signature
## matches has reached one of the thresholds.
const vert_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef;
## Alarm if a :bro:enum:`SIG_COUNT_PER_RESP` signature is triggered as
## often as given by one of these thresholds.
const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef;
## The interval between when :bro:id:`Signature_Summary` notices are
## generated.
const summary_interval = 1 day &redef;
global log_signature: event(rec: Info);
}
global horiz_table: table[addr, string] of addr_set &read_expire = 1 hr;
global vert_table: table[addr, addr] of string_set &read_expire = 1 hr;
global last_hthresh: table[addr] of count &default = 0 &read_expire = 1 hr;
global last_vthresh: table[addr] of count &default = 0 &read_expire = 1 hr;
global count_per_resp: table[addr, string] of count
&default = 0 &read_expire = 1 hr;
global count_per_orig: table[addr, string] of count
&default = 0 &read_expire = 1 hr;
global did_sig_log: set[string] &read_expire = 1 hr;
event bro_init()
{
Log::create_stream(SIGNATURES, [$columns=Info, $ev=log_signature]);
}
# Returns true if the given signature has already been triggered for the given
# [orig, resp] pair.
function has_signature_matched(id: string, orig: addr, resp: addr): bool
{
return [orig, resp] in vert_table ? id in vert_table[orig, resp] : F;
}
event sig_summary(orig: addr, id: string, msg: string)
{
NOTICE([$note=Signature_Summary, $src=orig,
$filename=id, $msg=fmt("%s: %s", orig, msg),
$n=count_per_orig[orig,id] ]);
}
event signature_match(state: signature_state, msg: string, data: string)
{
local sig_id = state$sig_id;
local action = actions[sig_id];
if ( action == SIG_IGNORE || ignored_ids in sig_id )
return;
# Trim the matched data down to something reasonable
if ( byte_len(data) > 140 )
data = fmt("%s...", sub_bytes(data, 0, 140));
local src_addr: addr;
local src_port: port;
local dst_addr: addr;
local dst_port: port;
if ( state$is_orig )
{
src_addr = state$conn$id$orig_h;
src_port = state$conn$id$orig_p;
dst_addr = state$conn$id$resp_h;
dst_port = state$conn$id$resp_p;
}
else
{
src_addr = state$conn$id$resp_h;
src_port = state$conn$id$resp_p;
dst_addr = state$conn$id$orig_h;
dst_port = state$conn$id$orig_p;
}
if ( action != SIG_QUIET && action != SIG_COUNT_PER_RESP )
{
local info: Info = [$ts=network_time(),
$note=Sensitive_Signature,
$src_addr=src_addr,
$src_port=src_port,
$dst_addr=dst_addr,
$dst_port=dst_port,
$event_msg=fmt("%s: %s", src_addr, msg),
$sig_id=sig_id,
$sub_msg=data];
Log::write(SIGNATURES, info);
}
local notice = F;
if ( action == SIG_ALARM )
notice = T;
if ( action == SIG_COUNT_PER_RESP )
{
local dst = state$conn$id$resp_h;
if ( ++count_per_resp[dst,sig_id] in count_thresholds )
{
NOTICE([$note=Count_Signature, $conn=state$conn,
$msg=msg,
$filename=sig_id,
$n=count_per_resp[dst,sig_id],
$sub=fmt("%d matches of signature %s on host %s",
count_per_resp[dst,sig_id],
sig_id, dst)]);
}
}
if ( (action == SIG_ALARM_PER_ORIG || action == SIG_SUMMARY) &&
++count_per_orig[state$conn$id$orig_h, sig_id] == 1 )
{
if ( action == SIG_ALARM_PER_ORIG )
notice = T;
else
schedule summary_interval {
sig_summary(state$conn$id$orig_h, sig_id, msg)
};
}
if ( action == SIG_ALARM_ONCE )
{
if ( [sig_id] !in did_sig_log )
{
notice = T;
add did_sig_log[sig_id];
}
}
if ( notice )
NOTICE([$note=Sensitive_Signature,
$conn=state$conn, $src=src_addr,
$dst=dst_addr, $filename=sig_id, $msg=fmt("%s: %s", src_addr, msg),
$sub=data]);
if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY )
return;
# Keep track of scans.
local orig = state$conn$id$orig_h;
local resp = state$conn$id$resp_h;
if ( [orig, sig_id] !in horiz_table )
horiz_table[orig, sig_id] = set();
add horiz_table[orig, sig_id][resp];
if ( [orig, resp] !in vert_table )
vert_table[orig, resp] = set();
add vert_table[orig, resp][sig_id];
local hcount = length(horiz_table[orig, sig_id]);
local vcount = length(vert_table[orig, resp]);
if ( hcount in horiz_scan_thresholds && hcount != last_hthresh[orig] )
{
local horz_scan_msg =
fmt("%s has triggered signature %s on %d hosts",
orig, sig_id, hcount);
Log::write(SIGNATURES,
[$note=Multiple_Sig_Responders,
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
$host_count=hcount, $sub_msg=horz_scan_msg]);
NOTICE([$note=Multiple_Sig_Responders, $src=orig, $filename=sig_id,
$msg=msg, $n=hcount, $sub=horz_scan_msg]);
last_hthresh[orig] = hcount;
}
if ( vcount in vert_scan_thresholds && vcount != last_vthresh[orig] )
{
local vert_scan_msg =
fmt("%s has triggered %d different signatures on host %s",
orig, vcount, resp);
Log::write(SIGNATURES,
[$ts=network_time(),
$note=Multiple_Signatures,
$src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount),
$sub_msg=vert_scan_msg]);
NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp,
$filename=sig_id,
$msg=fmt("%s different signatures triggered", vcount),
$n=vcount, $sub=vert_scan_msg]);
last_vthresh[orig] = vcount;
}
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,392 @@
##! This script provides the framework for software version detection and
##! parsing, but doesn't actually do any detection on it's own. It relys on
##! other protocol specific scripts to parse out software from the protocols
##! that they analyze. The entry point for providing new software detections
##! to this framework is through the :bro:id:`Software::found` function.
module Software;
export {
redef enum Log::ID += { SOFTWARE };
type Type: enum {
UNKNOWN,
OPERATING_SYSTEM,
DATABASE_SERVER,
# There are a number of ways to detect printers on the
# network, we just need to codify them in a script and move
# this out of here. It isn't currently used for anything.
PRINTER,
};
type Version: record {
major: count &optional; ##< Major version number
minor: count &optional; ##< Minor version number
minor2: count &optional; ##< Minor subversion number
addl: string &optional; ##< Additional version string (e.g. "beta42")
} &log;
type Info: record {
## The time at which the software was first detected.
ts: time &log;
## The IP address detected running the software.
host: addr &log;
## The type of software detected (e.g. WEB_SERVER)
software_type: Type &log &default=UNKNOWN;
## Name of the software (e.g. Apache)
name: string &log;
## Version of the software
version: Version &log;
## The full unparsed version string found because the version parsing
## doesn't work 100% reliably and this acts as a fall back in the logs.
unparsed_version: string &log &optional;
## This can indicate that this software being detected should
## definitely be sent onward to the logging framework. By
## default, only software that is "interesting" due to a change
## in version or it being currently unknown is sent to the
## logging framework. This can be set to T to force the record
## to be sent to the logging framework if some amount of this tracking
## needs to happen in a specific way to the software.
force_log: bool &default=F;
};
## The hosts whose software should be detected and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
const asset_tracking = LOCAL_HOSTS &redef;
## Other scripts should call this function when they detect software.
## unparsed_version: This is the full string from which the
## :bro:type:`Software::Info` was extracted.
## Returns: T if the software was logged, F otherwise.
global found: function(id: conn_id, info: Software::Info): bool;
## This function can take many software version strings and parse them
## into a sensible :bro:type:`Software::Version` record. There are
## still many cases where scripts may have to have their own specific
## version parsing though.
global parse: function(unparsed_version: string,
host: addr,
software_type: Type): Info;
## Compare two versions.
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
## If the numerical version numbers match, the addl string
## is compared lexicographically.
global cmp_versions: function(v1: Version, v2: Version): int;
## This type represents a set of software. It's used by the
## :bro:id:`tracked` variable to store all known pieces of software
## for a particular host. It's indexed with the name of a piece of
## software such as "Firefox" and it yields a
## :bro:type:`Software::Info` record with more information about the
## software.
type SoftwareSet: table[string] of Info;
## The set of software associated with an address. Data expires from
## this table after one day by default so that a detected piece of
## software will be logged once each day.
global tracked: table[addr] of SoftwareSet
&create_expire=1day
&synchronized
&redef;
## This event can be handled to access the :bro:type:`Software::Info`
## record as it is sent on to the logging framework.
global log_software: event(rec: Info);
}
event bro_init()
{
Log::create_stream(SOFTWARE, [$columns=Info, $ev=log_software]);
}
function parse_mozilla(unparsed_version: string,
host: addr,
software_type: Type): Info
{
local software_name = "<unknown browser>";
local v: Version;
local parts: table[count] of string;
if ( /Opera [0-9\.]*$/ in unparsed_version )
{
software_name = "Opera";
parts = split_all(unparsed_version, /Opera [0-9\.]*$/);
if ( 2 in parts )
v = parse(parts[2], host, software_type)$version;
}
else if ( /MSIE 7.*Trident\/4\.0/ in unparsed_version )
{
software_name = "MSIE";
v = [$major=8,$minor=0];
}
else if ( / MSIE [0-9\.]*b?[0-9]*;/ in unparsed_version )
{
software_name = "MSIE";
parts = split_all(unparsed_version, /MSIE [0-9\.]*b?[0-9]*/);
if ( 2 in parts )
v = parse(parts[2], host, software_type)$version;
}
else if ( /Version\/.*Safari\// in unparsed_version )
{
software_name = "Safari";
parts = split_all(unparsed_version, /Version\/[0-9\.]*/);
if ( 2 in parts )
{
v = parse(parts[2], host, software_type)$version;
if ( / Mobile\/?.* Safari/ in unparsed_version )
v$addl = "Mobile";
}
}
else if ( /(Firefox|Netscape|Thunderbird)\/[0-9\.]*/ in unparsed_version )
{
parts = split_all(unparsed_version, /(Firefox|Netscape|Thunderbird)\/[0-9\.]*/);
if ( 2 in parts )
{
local tmp_s = parse(parts[2], host, software_type);
software_name = tmp_s$name;
v = tmp_s$version;
}
}
else if ( /Chrome\/.*Safari\// in unparsed_version )
{
software_name = "Chrome";
parts = split_all(unparsed_version, /Chrome\/[0-9\.]*/);
if ( 2 in parts )
v = parse(parts[2], host, software_type)$version;
}
else if ( /^Opera\// in unparsed_version )
{
if ( /Opera M(ini|obi)\// in unparsed_version )
{
parts = split_all(unparsed_version, /Opera M(ini|obi)/);
if ( 2 in parts )
software_name = parts[2];
parts = split_all(unparsed_version, /Version\/[0-9\.]*/);
if ( 2 in parts )
v = parse(parts[2], host, software_type)$version;
else
{
parts = split_all(unparsed_version, /Opera Mini\/[0-9\.]*/);
if ( 2 in parts )
v = parse(parts[2], host, software_type)$version;
}
}
else
{
software_name = "Opera";
parts = split_all(unparsed_version, /Version\/[0-9\.]*/);
if ( 2 in parts )
v = parse(parts[2], host, software_type)$version;
}
}
else if ( /AppleWebKit\/[0-9\.]*/ in unparsed_version )
{
software_name = "Unspecified WebKit";
parts = split_all(unparsed_version, /AppleWebKit\/[0-9\.]*/);
if ( 2 in parts )
v = parse(parts[2], host, software_type)$version;
}
return [$ts=network_time(), $host=host, $name=software_name, $version=v,
$software_type=software_type, $unparsed_version=unparsed_version];
}
# Don't even try to understand this now, just make sure the tests are
# working.
function parse(unparsed_version: string,
host: addr,
software_type: Type): Info
{
local software_name = "<parse error>";
local v: Version;
# Parse browser-alike versions separately
if ( /^(Mozilla|Opera)\/[0-9]\./ in unparsed_version )
{
return parse_mozilla(unparsed_version, host, software_type);
}
else
{
# The regular expression should match the complete version number
# and software name.
local version_parts = split_n(unparsed_version, /\/?( [\(])?v?[0-9\-\._, ]{2,}/, T, 1);
if ( 1 in version_parts )
{
if ( /^\(/ in version_parts[1] )
software_name = strip(sub(version_parts[1], /[\(]/, ""));
else
software_name = strip(version_parts[1]);
}
if ( |version_parts| >= 2 )
{
# Remove the name/version separator if it's left at the beginning
# of the version number from the previous split_all.
local sv = strip(version_parts[2]);
if ( /^[\/\-\._v\(]/ in sv )
sv = strip(sub(version_parts[2], /^\(?[\/\-\._v\(]/, ""));
local version_numbers = split_n(sv, /[\-\._,\[\(\{ ]/, F, 3);
if ( 4 in version_numbers && version_numbers[4] != "" )
v$addl = strip(version_numbers[4]);
else if ( 3 in version_parts && version_parts[3] != "" &&
version_parts[3] != ")" )
{
if ( /^[[:blank:]]*\([a-zA-Z0-9\-\._[:blank:]]*\)/ in version_parts[3] )
{
v$addl = split_n(version_parts[3], /[\(\)]/, F, 2)[2];
}
else
{
local vp = split_n(version_parts[3], /[\-\._,;\[\]\(\)\{\} ]/, F, 3);
if ( |vp| >= 1 && vp[1] != "" )
{
v$addl = strip(vp[1]);
}
else if ( |vp| >= 2 && vp[2] != "" )
{
v$addl = strip(vp[2]);
}
else if ( |vp| >= 3 && vp[3] != "" )
{
v$addl = strip(vp[3]);
}
else
{
v$addl = strip(version_parts[3]);
}
}
}
if ( 3 in version_numbers && version_numbers[3] != "" )
v$minor2 = extract_count(version_numbers[3]);
if ( 2 in version_numbers && version_numbers[2] != "" )
v$minor = extract_count(version_numbers[2]);
if ( 1 in version_numbers && version_numbers[1] != "" )
v$major = extract_count(version_numbers[1]);
}
}
return [$ts=network_time(), $host=host, $name=software_name,
$version=v, $unparsed_version=unparsed_version,
$software_type=software_type];
}
function cmp_versions(v1: Version, v2: Version): int
{
if ( v1?$major && v2?$major )
{
if ( v1$major < v2$major )
return -1;
if ( v1$major > v2$major )
return 1;
}
else
{
if ( !v1?$major && !v2?$major )
{ }
else
return v1?$major ? 1 : -1;
}
if ( v1?$minor && v2?$minor )
{
if ( v1$minor < v2$minor )
return -1;
if ( v1$minor > v2$minor )
return 1;
}
else
{
if ( !v1?$minor && !v2?$minor )
{ }
else
return v1?$minor ? 1 : -1;
}
if ( v1?$minor2 && v2?$minor2 )
{
if ( v1$minor2 < v2$minor2 )
return -1;
if ( v1$minor2 > v2$minor2 )
return 1;
}
else
{
if ( !v1?$minor2 && !v2?$minor2 )
{ }
else
return v1?$minor2 ? 1 : -1;
}
if ( v1?$addl && v2?$addl )
return strcmp(v1$addl, v2$addl);
else
{
if ( !v1?$addl && !v2?$addl )
return 0;
else
return v1?$addl ? 1 : -1;
}
}
function software_endpoint_name(id: conn_id, host: addr): string
{
return fmt("%s %s", host, (host == id$orig_h ? "client" : "server"));
}
# Convert a version into a string "a.b.c-x".
function software_fmt_version(v: Version): string
{
return fmt("%d.%d.%d%s",
v?$major ? v$major : 0,
v?$minor ? v$minor : 0,
v?$minor2 ? v$minor2 : 0,
v?$addl ? fmt("-%s", v$addl) : "");
}
# Convert a software into a string "name a.b.cx".
function software_fmt(i: Info): string
{
return fmt("%s %s", i$name, software_fmt_version(i$version));
}
# Insert a mapping into the table
# Overides old entries for the same software and generates events if needed.
event software_register(id: conn_id, info: Info)
{
# Host already known?
if ( info$host !in tracked )
tracked[info$host] = table();
local ts = tracked[info$host];
# Software already registered for this host? We don't want to endlessly
# log the same thing.
if ( info$name in ts )
{
local old = ts[info$name];
# If the version hasn't changed, then we're just redetecting the
# same thing, then we don't care. This results in no extra logging.
# But if the $force_log value is set then we'll continue.
if ( ! info$force_log && cmp_versions(old$version, info$version) == 0 )
return;
}
Log::write(SOFTWARE, info);
ts[info$name] = info;
}
function found(id: conn_id, info: Info): bool
{
if ( info$force_log || addr_matches_host(info$host, asset_tracking) )
{
event software_register(id, info);
return T;
}
else
return F;
}

View file

@ -0,0 +1,3 @@
# If we asked the Time Machine to capture, the filename prefix.
# TODO: implement this as a timemachine/notice.bro script?
#captured: string &optional;

834
scripts/base/misc/p0f.fp Normal file
View file

@ -0,0 +1,834 @@
#
# p0f - SYN fingerprints
# ----------------------
#
# .-------------------------------------------------------------------------.
# | The purpose of this file is to cover signatures for incoming TCP/IP |
# | connections (SYN packets). This is the default mode of operation for |
# | p0f. This is also the biggest and most up-to-date set of signatures |
# | shipped with this project. The file also contains a detailed discussion |
# | of all metrics examined by p0f, and some practical notes on how to |
# | add new signatures. |
# `-------------------------------------------------------------------------'
#
# (C) Copyright 2000-2006 by Michal Zalewski <lcamtuf@coredump.cx>
#
# Each line in this file specifies a single fingerprint. Please read the
# information below carefully before attempting to append any signatures
# reported by p0f as UNKNOWN to this file to avoid mistakes. Note that
# this file is compatible only with the default operation mode, and not
# with -R or -A options (SYN+ACK and RST+ modes).
#
# We use the following set metrics for fingerprinting:
#
# - Window size (WSS) - a highly OS dependent setting used for TCP/IP
# performance control (max. amount of data to be sent without ACK).
# Some systems use a fixed value for initial packets. On other
# systems, it is a multiple of MSS or MTU (MSS+40). In some rare
# cases, the value is just arbitrary.
#
# NEW SIGNATURE: if p0f reported a special value of 'Snn', the number
# appears to be a multiple of MSS (MSS*nn); a special value of 'Tnn'
# means it is a multiple of MTU ((MSS+40)*nn). Unless you notice the
# value of nn is not fixed (unlikely), just copy the Snn or Tnn token
# literally. If you know this device has a simple stack and a fixed
# MTU, you can however multiply S value by MSS, or T value by MSS+40,
# and put it instead of Snn or Tnn. One system may exhibit several T
# or S values. In some situations, this might be a source of some
# additional information about the setup if you have some time to dig
# thru the kernel sources; in some other cases, like Windows, there seem
# to be a multitude of variants and WSS selection algorithms, but it's
# rather difficult to find a pattern without having the source.
#
# If WSS looks like a regular fixed value (for example is a power of two),
# or if you can confirm the value is fixed by looking at several
# fingerprints, please quote it literaly. If there's no apparent pattern
# in WSS chosen, you should consider wildcarding this value - but this
# should be the last option.
#
# NOTE: Some NAT devices, such as Linux iptables with --set-mss, will
# modify MSS, but not WSS. As a result, MSS is changed to reflect
# the MTU of the NAT device, but WSS remains a multiple of the original
# MSS. Fortunately for us, the source device would almost always be
# hooked up to Ethernet. P0f handles it automatically for the original
# MSS of 1460, by adding "NAT!" tag to the result.
#
# In certain configurations, Linux erratically (?) uses MTU from another
# interface on the default gw interface. This only happens on systems with
# two network interfaces. Thus, some Linux systems that do not go thru NAT,
# but have multiple interfaces instead, will be also tagged this way.
#
# P0f recognizes and automatically wildcards WSS of 12345, as generated
# by sendack and sendsyn utilities shipped with the program, when
# reporting a new signature. See test/sendack.c and test/sendsyn.c for more
# information about this.
#
# - Overall packet size - a function of all IP and TCP options and bugs.
# While this is partly redundant in the real world, we record this value
# to capture rare cases when there are IP options (which we do not currently
# examine) or packet data past the headers. Both situations are rare.
#
# Packet size MAY be wildcarded, but the meaning of the wildcard is
# very special, and means the packet must be larger than PACKET_BIG
# (defined in config.h as 100). This is usually not necessary, except
# for some really broken implementations in RST+ mode. For more information,
# see p0fr.fp. P0f automatically wildcards big packets when reporting
# new signatures.
#
# NEW SIGNATURE: Copy this value literally.
#
# - Initial TTL - We check the actual TTL of a received packet. It can't
# be higher than the initial TTL, and also shouldn't be dramatically
# lower (maximum distance is defined in config.h as 40 hops).
#
# NEW SIGNATURE: *Never* copy TTL from a p0f-reported signature literally.
# You need to determine the initial TTL. The best way to do it is to
# check the documentation for a remote system, or check its settings.
# A fairly good method is to simply round the observed TTL up to
# 32, 64, 128, or 255, but it should be noted that some obscure devices
# might not use round TTLs (in particular, some shoddy appliances and
# IRIX and Tru64 are known to use "original" initial TTL settings). If not
# sure, use traceroute or mtr to see how far you are from the host.
#
# Note that -F option overrides this check if no signature can be found.
#
# - Don't fragment flag (DF) - some modern OSes set this to implement PMTU
# discovery. Others do not bother.
#
# NEW SIGNATURE: Copy this value literally. Note: this setting is
# sometimes cleared by firewalls and/or certain connectivity clients.
# Try to find out what's the actual state for a given OS if you see both,
# and add the right one. P0f will automatically detect a case when a
# firewall removed the DF flag and will append "(firewall!)" suffix to
# the signature, so if the DF version is the right one, don't add no-DF
# variant, unless it has a different meaning.
#
# - Maximum segment size (MSS) - this setting is usually link-dependent. P0f
# uses it to determine link type of the remote host.
#
# NEW SIGNATURE: Always wildcard this value, except for rare cases when
# you have an appliance with a fixed value, know the system supports only
# a very limited number of network interface types, or know the system
# is using a value it pulled out of nowhere. I use specific unique MSS
# to tell Google crawlbots from the rest of Linux population, for example.
#
# If a specific MSS/MTU is unique to a certain link type, be sure to
# add it to mtu.h instead of creating several variants of each signature.
#
# - Window scaling (WSCALE) - this feature is used to scale WSS.
# It extends the size of a TCP/IP window to 32 bits, of sorts. Some modern
# systems implement this feature.
#
# NEW SIGNATURE: Observe several signatures. Initial WSCALE is often set
# to zero or other low value. There's usually no need to wildcard this
# parameter.
#
# - Timestamp - some systems that implement timestamps set them to
# zero in the initial SYN. This case is detected and handled appropriately.
#
# NEW SIGNATURE: Copy T or T0 option literally.
#
# - Selective ACK permitted - a flag set by systems that implement
# selective ACK functionality,
#
# NEW SIGNATURE: copy S option literally.
#
# - NOP option - its presence, count and sequence is a useful OS-dependent
# characteristic,
#
# NEW SIGNATURE: copy N options literally.
#
# - Other and unrecognized options (TTCP-related and such) - implemented by
# some eccentric or very buggy TCP/IP stacks ;-),
#
# NEW SIGNATURE: copy ? options literally.
#
# - EOL option. Contrary to the popular belief, the presence of EOL
# option is actually quite rare, most systems just NOP-pad to the
# packet boundary.
#
# NEW SIGNATURE: copy E option literally.
#
# - The sequence of TCP all options mentioned above - this is very
# specific to the implementation,
#
# NEW SIGNATURE: Copy the sequence literally.
#
# - Quirks. Some buggy stacks set certain values that should be zeroed in a
# TCP packet to non-zero values. This has no effect as of today, but is
# a valuable source of information. Some systems actually seem to leak
# memory there. Other systems just exhibit harmful but very specific
# behavior. This section captures all unusual yes-no properties not
# related to the main and expected header layout. We detect the following:
#
# - Data past the headers. Neither SYN nor SYN+ACK packets are supposed
# to carry any payload. If they do, we should take notice. The actual
# payload is not examined, but will be displayed if use the -X option.
# Note that payload is not unusual in RST+ mode (see p0fr.fp), very
# rare otherwise.
#
# - Options past EOL. Some systems have some trailing data past EOL
# in the options section of TCP/IP headers. P0f does not examine this
# data as of today, simply detects its presence. If there is a
# confirmed sizable population of systems that have data past EOL, it
# might be a good idea to look at it. Until then, you have to recompile
# p0f with DEBUG_EXTRAS set or use -x to display this data,
#
# - Zero IP ID. This again is a (mostly) harmless setting to use a fixed
# IP ID for packets with DF set. Some systems reportedly use zero ID,
# most OSes do not. There is a very slight probability of a false
# positive when IP ID is "naturally" chosen to be zero on a system
# that otherwise does set proper values, but the probability is
# neglible (if it becomes a problem, recompile p0f with IGNORE_ZEROID
# set in the sources).
#
# - IP options specified. Usually, packets do not have any IP options
# set, but there can be some. Until there is a confirmed sizable
# population of systems that do have IP options in a packet, p0f
# does not examine those in detail, but it might change (use
# DEBUG_EXTRAS or -x to display IP options if any found),
#
# - URG pointer value. SYN packets do not have URG flag set, so the
# value in URG pointer in TCP header is ignored. Most systems set it
# to zero, but some OSes (some versions of Windows, for example) do
# not zero this field or even simply leak memory; the actual value is
# not examined, because most cases seem to be just random garbage
# (you can use DEBUG_EXTRAS or -x to report this information though);
# see doc/win-memleak.txt for more information,
#
# - "Unused" field value. This should be always zero, but some systems
# forget to clear it. This might result in some funny issues in the
# future. P0f checks for non-zero value (and will display it if
# DEBUG_EXTRAS is set, or you can use -x),
#
# - ACK number non-zero. ACK value in SYN packets with no ACK flag
# is disregarded and is usually set to zero (just like with URG
# pointer), but some systems forget to do it. The exact value is
# not examined (but will be displayed with DEBUG_EXTRAS, or you can
# use -x). Note that this is not an anomaly in SYN+ACK and RST+ modes,
#
# - Non-zero second timestamp. The initial SYN packet should have the
# second timestamp always zeroed. SYN+ACK and RST+ may "legally" have
# this quirk though,
#
# - Unusual flags. If, in addition to SYN (or SYN+ACK), there are some
# auxilinary flags that do not modify the very meaning of a packet,
# p0f records this (this can be URG, PUSH, or something else).
#
# Note: ECN flags (ECE and CWR) are ignored and denoted in a separate
# way. ECN is never by default, because some systems can't handle it,
# and it probably does not make much sense to include it in signatures
# right now.
#
# - TCP option segment parsing problems. If p0f fails to decode options
# because of a badly broken packet, it records this fact.
#
# There are several other quirks valid only in RST+ mode, see p0fr.fp for
# more information. Those quirks are unheard of in SYN and SYN+ACK
# modes.
#
# NEW SIGNATURE: Copy "quirks" section literally.
#
# We DO NOT use ToS for fingerprinting. While the original TCP/IP
# fingerprinting research believed this value would be useful for this
# purpose, it is not. The setting is way too often tweaked by network
# devices.
#
# To wildcard MSS, WSS or WSCALE, replace it with '*'. You can also use a
# modulo operator to match any values that divide by nnn - '%nnn' (and,
# as stated above, WSS also supports special values Snn and Tnn).
#
# Fingerprint entry format:
#
# wwww:ttt:D:ss:OOO...:QQ:OS:Details
#
# wwww - window size (can be * or %nnn or Sxx or Txx)
# "Snn" (multiple of MSS) and "Tnn" (multiple of MTU) are allowed.
# ttt - initial TTL
# D - don't fragment bit (0 - not set, 1 - set)
# ss - overall SYN packet size (* has a special meaning)
# OOO - option value and order specification (see below)
# QQ - quirks list (see below)
# OS - OS genre (Linux, Solaris, Windows)
# details - OS description (2.0.27 on x86, etc)
#
# If OS genre starts with '*', p0f will not show distance, link type
# and timestamp data. It is useful for userland TCP/IP stacks of
# network scanners and so on, where many settings are randomized or
# bogus.
#
# If OS genre starts with @, it denotes an approximate hit for a group
# of operating systems (signature reporting still enabled in this case).
# Use this feature at the end of this file to catch cases for which
# you don't have a precise match, but can tell it's Windows or FreeBSD
# or whatnot by looking at, say, flag layout alone.
#
# If OS genre starts with - (which can prefix @ or *), the entry is
# not considered to be a real operating system (but userland stack
# instead). It is important to mark all scanners and so on with -,
# so that they are not used for masquerade detection (also add this
# prefix for signatures of application-induced behavior, such as
# increased window size with Opera browser).
#
# Option block description is a list of comma or space separated
# options in the order they appear in the packet:
#
# N - NOP option
# E - EOL option
# Wnnn - window scaling option, value nnn (or * or %nnn)
# Mnnn - maximum segment size option, value nnn (or * or %nnn)
# S - selective ACK OK
# T - timestamp
# T0 - timestamp with zero value
# ?n - unrecognized option number n.
#
# P0f can sometimes report ?nn among the options. This means it couldn't
# recognize this option (option number nn). It's either a bug in p0f, or
# a faulty TCP/IP stack, or, if the number is listed here:
#
# http://www.iana.org/assignments/tcp-parameters
#
# ...the stack might be simply quite exotic.
#
# To denote no TCP options, use a single '.'.
#
# Quirks section is usually an empty list ('.') of oddities or bugs of this
# particular stack. List items are not separated in any way. Possible values:
#
# P - options past EOL,
# Z - zero IP ID,
# I - IP options specified,
# U - urg pointer non-zero,
# X - unused (x2) field non-zero,
# A - ACK number non-zero,
# T - non-zero second timestamp,
# F - unusual flags (PUSH, URG, etc),
# D - data payload,
# ! - broken options segment.
#
# WARNING WARNING WARNING
# -----------------------
#
# Do not add a system X as OS Y just because NMAP says so. It is often
# the case that X is a NAT firewall. While nmap is talking to the
# device itself, p0f is fingerprinting the guy behind the firewall
# instead.
#
# When in doubt, use common sense, don't add something that looks like
# a completely different system as Linux or FreeBSD or LinkSys router.
# Check DNS name, establish a connection to the remote host and look
# at SYN+ACK (p0f -A -S should do) - does it look similar?
#
# Some users tweak their TCP/IP settings - enable or disable RFC1323,
# RFC1644 or RFC2018 support, disable PMTU discovery, change MTU, initial
# TTL and so on. Always compare a new rule to other fingerprints for
# this system, and verify the system isn't "customized". It is OK to
# add signature variants caused by commonly used software (PFs, security
# packages, etc), but it makes no sense to try to add every single
# possible /proc/sys/net/ipv4/* tweak on Linux or so.
#
# KEEP IN MIND: Some packet firewalls configured to normalize outgoing
# traffic (OpenBSD pf with "scrub" enabled, for example) will, well,
# normalize packets. Signatures will not correspond to the originating
# system (and probably not quite to the firewall either).
#
# NOTE: Try to keep this file in some reasonable order, from most to
# least likely systems. This will speed up operation. Also keep most
# generic and broad rules near ehe end.
#
# Still decided to add signature? Let us know - mail a copy of your discovery
# to lcamtuf@coredump.cx. You can help make p0f better, and I can help you
# make your signature more accurate.
#
##########################
# Standard OS signatures #
##########################
# ----------------- AIX ---------------------
# AIX is first because its signatures are close to NetBSD, MacOS X and
# Linux 2.0, but it uses a fairly rare MSSes, at least sometimes...
# This is a shoddy hack, though.
45046:64:0:44:M*:.:AIX:4.3
16384:64:0:44:M512:.:AIX:4.3.2 and earlier
16384:64:0:60:M512,N,W%2,N,N,T:.:AIX:4.3.3-5.2 (1)
32768:64:0:60:M512,N,W%2,N,N,T:.:AIX:4.3.3-5.2 (2)
65535:64:0:60:M512,N,W%2,N,N,T:.:AIX:4.3.3-5.2 (3)
65535:64:0:64:M*,N,W1,N,N,T,N,N,S:.:AIX:5.3 ML1
# ----------------- Linux -------------------
S1:64:0:44:M*:A:Linux:1.2.x
512:64:0:44:M*:.:Linux:2.0.3x (1)
16384:64:0:44:M*:.:Linux:2.0.3x (2)
# Endian snafu! Nelson says "ha-ha":
2:64:0:44:M*:.:Linux:2.0.3x (MkLinux) on Mac (1)
64:64:0:44:M*:.:Linux:2.0.3x (MkLinux) on Mac (2)
S4:64:1:60:M1360,S,T,N,W0:.:Linux:2.4 (Google crawlbot)
S4:64:1:60:M1430,S,T,N,W0:.:Linux:2.4-2.6 (Google crawlbot)
S2:64:1:60:M*,S,T,N,W0:.:Linux:2.4 (large MTU?)
S3:64:1:60:M*,S,T,N,W0:.:Linux:2.4 (newer)
S4:64:1:60:M*,S,T,N,W0:.:Linux:2.4-2.6
S3:64:1:60:M*,S,T,N,W1:.:Linux:2.6, seldom 2.4 (older, 1)
S4:64:1:60:M*,S,T,N,W1:.:Linux:2.6, seldom 2.4 (older, 2)
S3:64:1:60:M*,S,T,N,W2:.:Linux:2.6, seldom 2.4 (older, 3)
S4:64:1:60:M*,S,T,N,W2:.:Linux:2.6, seldom 2.4 (older, 4)
T4:64:1:60:M*,S,T,N,W2:.:Linux:2.6 (older, 5)
S4:64:1:60:M*,S,T,N,W5:.:Linux:2.6 (newer, 1)
S4:64:1:60:M*,S,T,N,W6:.:Linux:2.6 (newer, 2)
S4:64:1:60:M*,S,T,N,W7:.:Linux:2.6 (newer, 3)
T4:64:1:60:M*,S,T,N,W7:.:Linux:2.6 (newer, 4)
S20:64:1:60:M*,S,T,N,W0:.:Linux:2.2 (1)
S22:64:1:60:M*,S,T,N,W0:.:Linux:2.2 (2)
S11:64:1:60:M*,S,T,N,W0:.:Linux:2.2 (3)
# Popular cluster config scripts disable timestamps and
# selective ACK:
S4:64:1:48:M1460,N,W0:.:Linux:2.4 in cluster
# This happens only over loopback, but let's make folks happy:
32767:64:1:60:M16396,S,T,N,W0:.:Linux:2.4 (loopback)
32767:64:1:60:M16396,S,T,N,W2:.:Linux:2.6 (newer, loopback)
S8:64:1:60:M3884,S,T,N,W0:.:Linux:2.2 (loopback)
# Opera visitors:
16384:64:1:60:M*,S,T,N,W0:.:-Linux:2.2 (Opera?)
32767:64:1:60:M*,S,T,N,W0:.:-Linux:2.4 (Opera?)
# Some fairly common mods & oddities:
S22:64:1:52:M*,N,N,S,N,W0:.:Linux:2.2 (tstamp-)
S4:64:1:52:M*,N,N,S,N,W0:.:Linux:2.4 (tstamp-)
S4:64:1:52:M*,N,N,S,N,W2:.:Linux:2.6 (tstamp-)
S4:64:1:44:M*:.:Linux:2.6? (barebone, rare!)
T4:64:1:60:M1412,S,T,N,W0:.:Linux:2.4 (rare!)
# ----------------- FreeBSD -----------------
16384:64:1:44:M*:.:FreeBSD:2.0-4.2
16384:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.4 (1)
1024:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.4 (2)
57344:64:1:44:M*:.:FreeBSD:4.6-4.8 (RFC1323-)
57344:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.6-4.9
32768:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.8-5.1 (or MacOS X 10.2-10.3)
65535:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.7-5.2 (or MacOS X 10.2-10.4) (1)
65535:64:1:60:M*,N,W1,N,N,T:.:FreeBSD:4.7-5.2 (or MacOS X 10.2-10.4) (2)
65535:64:1:60:M*,N,W0,N,N,T:Z:FreeBSD:5.1 (1)
65535:64:1:60:M*,N,W1,N,N,T:Z:FreeBSD:5.1 (2)
65535:64:1:60:M*,N,W2,N,N,T:Z:FreeBSD:5.1 (3)
65535:64:1:64:M*,N,N,S,N,W1,N,N,T:.:FreeBSD:5.3-5.4
65535:64:1:64:M*,N,W1,N,N,T,S,E:P:FreeBSD:6.x (1)
65535:64:1:64:M*,N,W0,N,N,T,S,E:P:FreeBSD:6.x (2)
65535:64:1:44:M*:Z:FreeBSD:5.2 (RFC1323-)
# 16384:64:1:60:M*,N,N,N,N,N,N,T:.:FreeBSD:4.4 (tstamp-)
# ----------------- NetBSD ------------------
16384:64:0:60:M*,N,W0,N,N,T:.:NetBSD:1.3
65535:64:0:60:M*,N,W0,N,N,T0:.:-NetBSD:1.6 (Opera)
16384:64:1:60:M*,N,W0,N,N,T0:.:NetBSD:1.6
65535:64:1:60:M*,N,W1,N,N,T0:.:NetBSD:1.6W-current (DF)
65535:64:1:60:M*,N,W0,N,N,T0:.:NetBSD:1.6X (DF)
32768:64:1:60:M*,N,W0,N,N,T0:.:NetBSD:1.6Z or 2.0 (DF)
32768:64:1:64:M1416,N,W0,S,N,N,N,N,T0:.:NetBSD:2.0G (DF)
32768:64:1:64:M*,N,W0,S,N,N,N,N,T0:.:NetBSD:3.0 (DF)
# ----------------- OpenBSD -----------------
16384:64:1:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.0-3.9
57344:64:1:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.3-3.4
16384:64:0:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.0-3.4 (scrub)
65535:64:1:64:M*,N,N,S,N,W0,N,N,T:.:-OpenBSD:3.0-3.4 (Opera?)
32768:64:1:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.7
# ----------------- Solaris -----------------
S17:64:1:64:N,W3,N,N,T0,N,N,S,M*:.:Solaris:8 (RFC1323 on)
S17:64:1:48:N,N,S,M*:.:Solaris:8 (1)
S17:255:1:44:M*:.:Solaris:2.5-7 (1)
# Sometimes, just sometimes, Solaris feels like coming up with
# rather arbitrary MSS values ;-)
S6:255:1:44:M*:.:Solaris:2.5-7 (2)
S23:64:1:48:N,N,S,M*:.:Solaris:8 (2)
S34:64:1:48:M*,N,N,S:.:Solaris:9
S34:64:1:48:M*,N,N,N,N:.:Solaris:9 (no sack)
S44:255:1:44:M*:.:Solaris:7
4096:64:0:44:M1460:.:SunOS:4.1.x
S34:64:1:52:M*,N,W0,N,N,S:.:Solaris:10 (beta)
32850:64:1:64:M*,N,N,T,N,W1,N,N,S:.:Solaris:10 (1203?)
32850:64:1:64:M*,N,W1,N,N,T,N,N,S:.:Solaris:9.1
# ----------------- IRIX --------------------
49152:60:0:44:M*:.:IRIX:6.2-6.4
61440:60:0:44:M*:.:IRIX:6.2-6.5
49152:60:0:52:M*,N,W2,N,N,S:.:IRIX:6.5 (RFC1323+) (1)
49152:60:0:52:M*,N,W3,N,N,S:.:IRIX:6.5 (RFC1323+) (2)
61440:60:0:48:M*,N,N,S:.:IRIX:6.5.12-6.5.21 (1)
49152:60:0:48:M*,N,N,S:.:IRIX:6.5.12-6.5.21 (2)
49152:60:0:64:M*,N,W2,N,N,T,N,N,S:.:IRIX:6.5 IP27
# ----------------- Tru64 -------------------
# Tru64 and OpenVMS share the same stack on occassions.
# Relax.
32768:60:1:48:M*,N,W0:.:Tru64:4.0 (or OS/2 Warp 4)
32768:60:0:48:M*,N,W0:.:Tru64:5.0 (or OpenVMS 7.x on Compaq 5.0 stack)
8192:60:0:44:M1460:.:Tru64:5.1 (no RFC1323) (or QNX 6)
61440:60:0:48:M*,N,W0:.:Tru64:v5.1a JP4 (or OpenVMS 7.x on Compaq 5.x stack)
# ----------------- OpenVMS -----------------
6144:64:1:60:M*,N,W0,N,N,T:.:OpenVMS:7.2 (Multinet 4.3-4.4 stack)
# ----------------- MacOS -------------------
S2:255:1:48:M*,W0,E:.:MacOS:8.6 classic
16616:255:1:48:M*,W0,E:.:MacOS:7.3-8.6 (OTTCP)
16616:255:1:48:M*,N,N,N,E:.:MacOS:8.1-8.6 (OTTCP)
32768:255:1:48:M*,W0,N:.:MacOS:9.0-9.2
32768:255:1:48:M1380,N,N,N,N:.:MacOS:9.1 (OT 2.7.4) (1)
65535:255:1:48:M*,N,N,N,N:.:MacOS:9.1 (OT 2.7.4) (2)
# ----------------- Windows -----------------
# Windows TCP/IP stack is a mess. For most recent XP, 2000 and
# even 98, the pathlevel, not the actual OS version, is more
# relevant to the signature. They share the same code, so it would
# seem. Luckily for us, almost all Windows 9x boxes have an
# awkward MSS of 536, which I use to tell one from another
# in most difficult cases.
8192:32:1:44:M*:.:Windows:3.11 (Tucows)
S44:64:1:64:M*,N,W0,N,N,T0,N,N,S:.:Windows:95
8192:128:1:64:M*,N,W0,N,N,T0,N,N,S:.:Windows:95b
# There were so many tweaking tools and so many stack versions for
# Windows 98 it is no longer possible to tell them from each other
# without some very serious research. Until then, there's an insane
# number of signatures, for your amusement:
S44:32:1:48:M*,N,N,S:.:Windows:98 (low TTL) (1)
8192:32:1:48:M*,N,N,S:.:Windows:98 (low TTL) (2)
%8192:64:1:48:M536,N,N,S:.:Windows:98 (13)
%8192:128:1:48:M536,N,N,S:.:Windows:98 (15)
S4:64:1:48:M*,N,N,S:.:Windows:98 (1)
S6:64:1:48:M*,N,N,S:.:Windows:98 (2)
S12:64:1:48:M*,N,N,S:.:Windows:98 (3
T30:64:1:64:M1460,N,W0,N,N,T0,N,N,S:.:Windows:98 (16)
32767:64:1:48:M*,N,N,S:.:Windows:98 (4)
37300:64:1:48:M*,N,N,S:.:Windows:98 (5)
46080:64:1:52:M*,N,W3,N,N,S:.:Windows:98 (RFC1323+)
65535:64:1:44:M*:.:Windows:98 (no sack)
S16:128:1:48:M*,N,N,S:.:Windows:98 (6)
S16:128:1:64:M*,N,W0,N,N,T0,N,N,S:.:Windows:98 (7)
S26:128:1:48:M*,N,N,S:.:Windows:98 (8)
T30:128:1:48:M*,N,N,S:.:Windows:98 (9)
32767:128:1:52:M*,N,W0,N,N,S:.:Windows:98 (10)
60352:128:1:48:M*,N,N,S:.:Windows:98 (11)
60352:128:1:64:M*,N,W2,N,N,T0,N,N,S:.:Windows:98 (12)
# What's with 1414 on NT?
T31:128:1:44:M1414:.:Windows:NT 4.0 SP6a (1)
64512:128:1:44:M1414:.:Windows:NT 4.0 SP6a (2)
8192:128:1:44:M*:.:Windows:NT 4.0 (older)
# Windows XP and 2000. Most of the signatures that were
# either dubious or non-specific (no service pack data)
# were deleted and replaced with generics at the end.
65535:128:1:48:M*,N,N,S:.:Windows:2000 SP4, XP SP1+
%8192:128:1:48:M*,N,N,S:.:Windows:2000 SP2+, XP SP1+ (seldom 98)
S20:128:1:48:M*,N,N,S:.:Windows:SP3
S45:128:1:48:M*,N,N,S:.:Windows:2000 SP4, XP SP1+ (2)
40320:128:1:48:M*,N,N,S:.:Windows:2000 SP4
S6:128:1:48:M*,N,N,S:.:Windows:XP, 2000 SP2+
S12:128:1:48:M*,N,N,S:.:Windows:XP SP1+ (1)
S44:128:1:48:M*,N,N,S:.:Windows:XP SP1+, 2000 SP3
64512:128:1:48:M*,N,N,S:.:Windows:XP SP1+, 2000 SP3 (2)
32767:128:1:48:M*,N,N,S:.:Windows:XP SP1+, 2000 SP4 (3)
# Windows 2003 & Vista
8192:128:1:52:M*,W8,N,N,N,S:.:Windows:Vista (beta)
32768:32:1:52:M1460,N,W0,N,N,S:.:Windows:2003 AS
65535:64:1:52:M1460,N,W2,N,N,S:.:Windows:2003 (1)
65535:64:1:48:M1460,N,N,S:.:Windows:2003 (2)
# Odds, ends, mods:
S52:128:1:48:M1260,N,N,S:.:Windows:XP/2000 via Cisco
65520:128:1:48:M*,N,N,S:.:Windows:XP bare-bone
16384:128:1:52:M536,N,W0,N,N,S:.:Windows:2000 w/ZoneAlarm?
2048:255:0:40:.:.:Windows:.NET Enterprise Server
44620:64:0:48:M*,N,N,S:.:Windows:ME no SP (?)
S6:255:1:48:M536,N,N,S:.:Windows:95 winsock 2
32000:128:0:48:M*,N,N,S:.:Windows:XP w/Winroute?
16384:64:1:48:M1452,N,N,S:.:Windows:XP w/Sygate? (1)
17256:64:1:48:M1460,N,N,S:.:Windows:XP w/Sygate? (2)
# No need to be more specific, it passes:
*:128:1:48:M*,N,N,S:U:-Windows:XP/2000 while downloading (leak!)
# ----------------- HP/UX -------------------
32768:64:1:44:M*:.:HP-UX:B.10.20
32768:64:1:48:M*,W0,N:.:HP-UX:11.00-11.11
# Whoa. Hardcore WSS.
0:64:0:48:M*,W0,N:.:HP-UX:B.11.00 A (RFC1323+)
# ----------------- RiscOS ------------------
16384:64:1:68:M1460,N,W0,N,N,T,N,N,?12:.:RISC OS:3.70-4.36 (inet 5.04)
12288:32:0:44:M536:.:RISC OS:3.70 inet 4.10
4096:64:1:56:M1460,N,N,T:T:RISC OS:3.70 freenet 2.00
# ----------------- BSD/OS ------------------
8192:64:1:60:M1460,N,W0,N,N,T:.:BSD/OS:3.1-4.3 (or MacOS X 10.2)
# ---------------- NetwonOS -----------------
4096:64:0:44:M1420:.:NewtonOS:2.1
# ---------------- NeXTSTEP -----------------
S8:64:0:44:M512:.:NeXTSTEP:3.3 (1)
S4:64:0:44:M1024:.:NeXTSTEP:3.3 (2)
# ------------------ BeOS -------------------
1024:255:0:48:M*,N,W0:.:BeOS:5.0-5.1
12288:255:0:44:M*:.:BeOS:5.0.x
# ------------------ OS/400 -----------------
8192:64:1:60:M1440,N,W0,N,N,T:.:OS/400:V4R4/R5
8192:64:0:44:M536:.:OS/400:V4R3/M0
4096:64:1:60:M1440,N,W0,N,N,T:.:OS/400:V4R5 + CF67032
28672:64:0:44:M1460:A:OS/390:?
# ------------------ ULTRIX -----------------
16384:64:0:40:.:.:ULTRIX:4.5
# ------------------- QNX -------------------
S16:64:0:44:M512:.:QNX:demodisk
16384:64:0:60:M1460,N,W0,N,N,T0:.:QNX:6.x
# ------------------ Novell -----------------
16384:128:1:44:M1460:.:Novell:NetWare 5.0
6144:128:1:44:M1460:.:Novell:IntranetWare 4.11
6144:128:1:44:M1368:.:Novell:BorderManager ?
# According to rfp:
6144:128:1:52:M*,W0,N,S,N,N:.:Novell:Netware 6 SP3
# -------------- SCO UnixWare ---------------
S3:64:1:60:M1460,N,W0,N,N,T:.:SCO:UnixWare 7.1
S17:64:1:60:M*,N,W0,N,N,T:.:SCO:UnixWare 7.1.x
S23:64:1:44:M1380:.:SCO:OpenServer 5.0
# ------------------- DOS -------------------
2048:255:0:44:M536:.:DOS:Arachne via WATTCP/1.05
T2:255:0:44:M984:.:DOS:Arachne via WATTCP/1.05 (eepro)
16383:64:0:44:M536:.:DOS:Unknown via WATTCP (epppd)
# ------------------ OS/2 -------------------
S56:64:0:44:M512:.:OS/2:4
28672:64:0:44:M1460:.:OS/2:Warp 4.0
# ----------------- TOPS-20 -----------------
# Another hardcore MSS, one of the ACK leakers hunted down.
0:64:0:44:M1460:A:TOPS-20:version 7
# ------------------ AMIGA ------------------
S32:64:1:56:M*,N,N,S,N,N,?12:.:AMIGA:3.9 BB2 with Miami stack
# ------------------ Minix ------------------
# Not quite sure.
# 8192:210:0:44:M1460:X:@Minix:?
# ------------------ Plan9 ------------------
65535:255:0:48:M1460,W0,N:.:Plan9:edition 4
# ----------------- AMIGAOS -----------------
16384:64:1:48:M1560,N,N,S:.:AMIGAOS:3.9 BB2 MiamiDX
# ----------------- FreeMiNT ----------------
S44:255:0:44:M536:.:FreeMiNT:1 patch 16A (Atari)
###########################################
# Appliance / embedded / other signatures #
###########################################
# ---------- Firewalls / routers ------------
S12:64:1:44:M1460:.:@Checkpoint:(unknown 1)
S12:64:1:48:N,N,S,M1460:.:@Checkpoint:(unknown 2)
4096:32:0:44:M1460:.:ExtremeWare:4.x
S32:64:0:68:M512,N,W0,N,N,T,N,N,?12:.:Nokia:IPSO w/Checkpoint NG FP3
S16:64:0:68:M1024,N,W0,N,N,T,N,N,?12:.:Nokia:IPSO 3.7 build 026
S4:64:1:60:W0,N,S,T,M1460:.:FortiNet:FortiGate 50
8192:64:1:44:M1460:.:@Eagle:Secure Gateway
# ------- Switches and other stuff ----------
4128:255:0:44:M*:Z:Cisco:7200, Catalyst 3500, etc
S8:255:0:44:M*:.:Cisco:12008
S4:255:0:44:M536:Z:Cisco:IOS 11.0
60352:128:1:64:M1460,N,W2,N,N,T,N,N,S:.:Alteon:ACEswitch
64512:128:1:44:M1370:.:Nortel:Contivity Client
# ---------- Caches and whatnots ------------
8190:255:0:44:M1428:.:Google:Wireless Transcoder (1)
8190:255:0:44:M1460:.:Google:Wireless Transcoder (2)
8192:64:1:64:M1460,N,N,S,N,W0,N,N,T:.:NetCache:5.2
16384:64:1:64:M1460,N,N,S,N,W0,N:.:NetCache:5.3
65535:64:1:64:M1460,N,N,S,N,W*,N,N,T:.:NetCache:5.3-5.5 (or FreeBSD 5.4)
20480:64:1:64:M1460,N,N,S,N,W0,N,N,T:.:NetCache:4.1
S44:64:1:64:M1460,N,N,S,N,W0,N,N,T:.:NetCache:5.5
32850:64:1:64:N,W1,N,N,T,N,N,S,M*:.:NetCache:Data OnTap 5.x
65535:64:0:60:M1460,N,W0,N,N,T:.:CacheFlow:CacheOS 4.1
8192:64:0:60:M1380,N,N,N,N,N,N,T:.:CacheFlow:CacheOS 1.1
S4:64:0:48:M1460,N,N,S:.:Cisco:Content Engine
27085:128:0:40:.:.:Dell:PowerApp cache (Linux-based)
65535:255:1:48:N,W1,M1460:.:Inktomi:crawler
S1:255:1:60:M1460,S,T,N,W0:.:LookSmart:ZyBorg
16384:255:0:40:.:.:Proxyblocker:(what's this?)
65535:255:0:48:M*,N,N,S:.:Redline: T|X 2200
# ----------- Embedded systems --------------
S9:255:0:44:M536:.:PalmOS:Tungsten T3/C
S5:255:0:44:M536:.:PalmOS:3/4
S4:255:0:44:M536:.:PalmOS:3.5
2948:255:0:44:M536:.:PalmOS:3.5.3 (Handera)
S29:255:0:44:M536:.:PalmOS:5.0
16384:255:0:44:M1398:.:PalmOS:5.2 (Clie)
S14:255:0:44:M1350:.:PalmOS:5.2.1 (Treo)
16384:255:0:44:M1400:.:PalmOS:5.2 (Sony)
S23:64:1:64:N,W1,N,N,T,N,N,S,M1460:.:SymbianOS:7
8192:255:0:44:M1460:.:SymbianOS:6048 (Nokia 7650?)
8192:255:0:44:M536:.:SymbianOS:(Nokia 9210?)
S22:64:1:56:M1460,T,S:.:SymbianOS:? (SE P800?)
S36:64:1:56:M1360,T,S:.:SymbianOS:60xx (Nokia 6600?)
S36:64:1:60:M1360,T,S,W0,E:.:SymbianOS:60xx
32768:32:1:44:M1460:.:Windows:CE 3
# Perhaps S4?
5840:64:1:60:M1452,S,T,N,W1:.:Zaurus:3.10
32768:128:1:64:M1460,N,W0,N,N,T0,N,N,S:.:PocketPC:2002
S1:255:0:44:M346:.:Contiki:1.1-rc0
4096:128:0:44:M1460:.:Sega:Dreamcast Dreamkey 3.0
T5:64:0:44:M536:.:Sega:Dreamcast HKT-3020 (browser disc 51027)
S22:64:1:44:M1460:.:Sony:Playstation 2 (SOCOM?)
S12:64:0:44:M1452:.:AXIS:Printer Server 5600 v5.64
3100:32:1:44:M1460:.:Windows:CE 2.0
####################
# Fancy signatures #
####################
1024:64:0:40:.:.:-*NMAP:syn scan (1)
2048:64:0:40:.:.:-*NMAP:syn scan (2)
3072:64:0:40:.:.:-*NMAP:syn scan (3)
4096:64:0:40:.:.:-*NMAP:syn scan (4)
1024:64:0:40:.:A:-*NMAP:TCP sweep probe (1)
2048:64:0:40:.:A:-*NMAP:TCP sweep probe (2)
3072:64:0:40:.:A:-*NMAP:TCP sweep probe (3)
4096:64:0:40:.:A:-*NMAP:TCP sweep probe (4)
1024:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (1)
2048:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (2)
3072:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (3)
4096:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (4)
1024:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (1)
2048:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (2)
3072:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (3)
4096:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (4)
32767:64:0:40:.:.:-*NAST:syn scan
12345:255:0:40:.:A:-p0f:sendsyn utility
# UFO - see tmp/*:
56922:128:0:40:.:A:-@Mysterious:port scanner (?)
5792:64:1:60:M1460,S,T,N,W0:T:-@Mysterious:NAT device (2nd tstamp)
S12:128:1:48:M1460,E:P:@Mysterious:Chello proxy (?)
S23:64:1:64:N,W1,N,N,T,N,N,S,M1380:.:@Mysterious:GPRS gateway (?)
#####################################
# Generic signatures - just in case #
#####################################
*:128:1:52:M*,N,W0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w, tstamp-)
*:128:1:52:M*,N,W*,N,N,S:.:@Windows:XP/2000 (RFC1323+, w+, tstamp-)
*:128:1:52:M*,N,N,T0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w-, tstamp+)
*:128:1:64:M*,N,W0,N,N,T0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w, tstamp+)
*:128:1:64:M*,N,W*,N,N,T0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w+, tstamp+)
*:128:1:48:M536,N,N,S:.:@Windows:98
*:128:1:48:M*,N,N,S:.:@Windows:XP/2000

View file

@ -0,0 +1,3 @@
@load ./main
@load ./contents
@load ./inactivity

View file

@ -0,0 +1,42 @@
##! This script can be used to extract either the originator's data or the
##! responders data or both. By default nothing is extracted, and in order
##! to actually extract data the ``c$extract_orig`` and/or the
##! ``c$extract_resp`` variable must be set to T. One way to achieve this
##! would be to handle the connection_established event elsewhere and set the
##! extract_orig and extract_resp options there. However, there may be trouble
##! with the timing due the event queue delay.
##! This script does not work well in a cluster context unless it has a
##! remotely mounted disk to write the content files to.
module Conn;
export {
## The prefix given to files as they are opened on disk.
const extraction_prefix = "contents" &redef;
## If this variable is set to T, then all contents of all files will be
## extracted.
const default_extract = F &redef;
}
redef record connection += {
extract_orig: bool &default=default_extract;
extract_resp: bool &default=default_extract;
};
event connection_established(c: connection) &priority=-5
{
if ( c$extract_orig )
{
local orig_file = generate_extraction_filename(extraction_prefix, c, "orig.dat");
local orig_f = open(orig_file);
set_contents_file(c$id, CONTENTS_ORIG, orig_f);
}
if ( c$extract_resp )
{
local resp_file = generate_extraction_filename(extraction_prefix, c, "resp.dat");
local resp_f = open(resp_file);
set_contents_file(c$id, CONTENTS_RESP, resp_f);
}
}

View file

@ -0,0 +1,41 @@
##! Adjust the inactivity timeouts for interactive services which could
##! very possibly have long delays between packets.
module Conn;
export {
## Define inactivty timeouts by the service detected being used over
## the connection.
const analyzer_inactivity_timeouts: table[AnalyzerTag] of interval = {
# For interactive services, allow longer periods of inactivity.
[[ANALYZER_SSH, ANALYZER_FTP]] = 1 hrs,
} &redef;
## Define inactivity timeouts based on common protocol ports.
const port_inactivity_timeouts: table[port] of interval = {
[[21/tcp, 22/tcp, 23/tcp, 513/tcp]] = 1 hrs,
} &redef;
}
event protocol_confirmation(c: connection, atype: count, aid: count)
{
if ( atype in analyzer_inactivity_timeouts )
set_inactivity_timeout(c$id, analyzer_inactivity_timeouts[atype]);
}
event connection_established(c: connection)
{
local service_port = c$id$resp_p;
if ( c$orig$state == TCP_INACTIVE )
{
# We're seeing a half-established connection. Use the
# service of the originator if it's well-known and the
# responder isn't.
if ( service_port !in likely_server_ports && c$id$orig_p in likely_server_ports )
service_port = c$id$orig_p;
}
if ( service_port in port_inactivity_timeouts )
set_inactivity_timeout(c$id, port_inactivity_timeouts[service_port]);
}

View file

@ -0,0 +1,198 @@
module Conn;
export {
redef enum Log::ID += { CONN };
type Info: record {
## This is the time of the first packet.
ts: time &log;
uid: string &log;
id: conn_id &log;
proto: transport_proto &log;
service: string &log &optional;
duration: interval &log &optional;
orig_bytes: count &log &optional;
resp_bytes: count &log &optional;
## ========== ===============================================
## conn_state Meaning
## ========== ===============================================
## S0 Connection attempt seen, no reply.
## S1 Connection established, not terminated.
## SF Normal establishment and termination. Note that this is the same symbol as for state S1. You can tell the two apart because for S1 there will not be any byte counts in the summary, while for SF there will be.
## REJ Connection attempt rejected.
## S2 Connection established and close attempt by originator seen (but no reply from responder).
## S3 Connection established and close attempt by responder seen (but no reply from originator).
## RSTO Connection established, originator aborted (sent a RST).
## RSTR Established, responder aborted.
## RSTOS0 Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder.
## RSTRH Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator.
## SH Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was "half" open).
## SHR Responder sent a SYN ACK followed by a FIN, we never saw a SYN from the originator.
## OTH No SYN seen, just midstream traffic (a "partial connection" that was not later closed).
## ========== ===============================================
conn_state: string &log &optional;
## If the connection is originated locally, this value will be T. If
## it was originated remotely it will be F. In the case that the
## :bro:id:`Site::local_nets` variable is undefined, this field will
## be left empty at all times.
local_orig: bool &log &optional;
## Indicates the number of bytes missed in content gaps which is
## representative of packet loss. A value other than zero will
## normally cause protocol analysis to fail but some analysis may
## have been completed prior to the packet loss.
missed_bytes: count &log &default=0;
## Records the state history of (TCP) connections as
## a string of letters.
##
## ====== ====================================================
## Letter Meaning
## ====== ====================================================
## s a SYN w/o the ACK bit set
## h a SYN+ACK ("handshake")
## a a pure ACK
## d packet with payload ("data")
## f packet with FIN bit set
## r packet with RST bit set
## c packet with a bad checksum
## i inconsistent packet (e.g. SYN+RST bits both set)
## ====== ====================================================
##
## If the letter is in upper case it means the event comes from the
## originator and lower case then means the responder.
## Also, there is compression. We only record one "d" in each direction,
## for instance. I.e., we just record that data went in that direction.
## This history is not meant to encode how much data that happened to be.
history: string &log &optional;
};
global log_conn: event(rec: Info);
}
redef record connection += {
conn: Info &optional;
};
event bro_init() &priority=5
{
Log::create_stream(CONN, [$columns=Info, $ev=log_conn]);
}
function conn_state(c: connection, trans: transport_proto): string
{
local os = c$orig$state;
local rs = c$resp$state;
local o_inactive = os == TCP_INACTIVE || os == TCP_PARTIAL;
local r_inactive = rs == TCP_INACTIVE || rs == TCP_PARTIAL;
if ( trans == tcp )
{
if ( rs == TCP_RESET )
{
if ( os == TCP_SYN_SENT || os == TCP_SYN_ACK_SENT ||
(os == TCP_RESET &&
c$orig$size == 0 && c$resp$size == 0) )
return "REJ";
else if ( o_inactive )
return "RSTRH";
else
return "RSTR";
}
else if ( os == TCP_RESET )
return r_inactive ? "RSTOS0" : "RSTO";
else if ( rs == TCP_CLOSED && os == TCP_CLOSED )
return "SF";
else if ( os == TCP_CLOSED )
return r_inactive ? "SH" : "S2";
else if ( rs == TCP_CLOSED )
return o_inactive ? "SHR" : "S3";
else if ( os == TCP_SYN_SENT && rs == TCP_INACTIVE )
return "S0";
else if ( os == TCP_ESTABLISHED && rs == TCP_ESTABLISHED )
return "S1";
else
return "OTH";
}
else if ( trans == udp )
{
if ( os == UDP_ACTIVE )
return rs == UDP_ACTIVE ? "SF" : "S0";
else
return rs == UDP_ACTIVE ? "SHR" : "OTH";
}
else
return "OTH";
}
function determine_service(c: connection): string
{
local service = "";
for ( s in c$service )
{
if ( sub_bytes(s, 0, 1) != "-" )
service = service == "" ? s : cat(service, ",", s);
}
return to_lower(service);
}
function set_conn(c: connection, eoc: bool)
{
if ( ! c?$conn )
{
local id = c$id;
local tmp: Info;
tmp$ts=c$start_time;
tmp$uid=c$uid;
tmp$id=id;
tmp$proto=get_port_transport_proto(id$resp_p);
if( |Site::local_nets| > 0 )
tmp$local_orig=Site::is_local_addr(id$orig_h);
c$conn = tmp;
}
if ( eoc )
{
if ( c$duration > 0secs )
{
c$conn$duration=c$duration;
# TODO: these should optionally use Gregor's new
# actual byte counting code if it's enabled.
c$conn$orig_bytes=c$orig$size;
c$conn$resp_bytes=c$resp$size;
}
local service = determine_service(c);
if ( service != "" )
c$conn$service=service;
c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p));
if ( c$history != "" )
c$conn$history=c$history;
}
}
event connection_established(c: connection) &priority=5
{
set_conn(c, F);
}
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
{
set_conn(c, F);
c$conn$missed_bytes = c$conn$missed_bytes + length;
}
event connection_state_remove(c: connection) &priority=-5
{
set_conn(c, T);
Log::write(CONN, c$conn);
}

View file

@ -0,0 +1,2 @@
@load ./consts
@load ./main

View file

@ -0,0 +1,84 @@
##! Types, errors, and fields for analyzing DNS data. A helper file
##! for DNS analysis scripts.
module DNS;
export {
const PTR = 12;
const EDNS = 41;
const ANY = 255;
## Mapping of DNS query type codes to human readable string representation.
const query_types = {
[1] = "A", [2] = "NS", [3] = "MD", [4] = "MF",
[5] = "CNAME", [6] = "SOA", [7] = "MB", [8] = "MG",
[9] = "MR", [10] = "NULL", [11] = "WKS", [PTR] = "PTR",
[13] = "HINFO", [14] = "MINFO", [15] = "MX", [16] = "TXT",
[17] = "RP", [18] = "AFSDB", [19] = "X25", [20] = "ISDN",
[21] = "RT", [22] = "NSAP", [23] = "NSAP-PTR", [24] = "SIG",
[25] = "KEY", [26] = "PX" , [27] = "GPOS", [28] = "AAAA",
[29] = "LOC", [30] = "EID", [31] = "NIMLOC", [32] = "NB",
[33] = "SRV", [34] = "ATMA", [35] = "NAPTR", [36] = "KX",
[37] = "CERT", [38] = "A6", [39] = "DNAME", [40] = "SINK",
[EDNS] = "EDNS", [42] = "APL", [43] = "DS", [44] = "SINK",
[45] = "SSHFP", [46] = "RRSIG", [47] = "NSEC", [48] = "DNSKEY",
[49] = "DHCID", [99] = "SPF", [100] = "DINFO", [101] = "UID",
[102] = "GID", [103] = "UNSPEC", [249] = "TKEY", [250] = "TSIG",
[251] = "IXFR", [252] = "AXFR", [253] = "MAILB", [254] = "MAILA",
[32768] = "TA", [32769] = "DLV",
[ANY] = "*",
} &default = function(n: count): string { return fmt("query-%d", n); };
const code_types = {
[0] = "X0",
[1] = "Xfmt",
[2] = "Xsrv",
[3] = "Xnam",
[4] = "Ximp",
[5] = "X[",
} &default="?";
## Errors used for non-TSIG/EDNS types.
const base_errors = {
[0] = "NOERROR", ##< No Error
[1] = "FORMERR", ##< Format Error
[2] = "SERVFAIL", ##< Server Failure
[3] = "NXDOMAIN", ##< Non-Existent Domain
[4] = "NOTIMP", ##< Not Implemented
[5] = "REFUSED", ##< Query Refused
[6] = "YXDOMAIN", ##< Name Exists when it should not
[7] = "YXRRSET", ##< RR Set Exists when it should not
[8] = "NXRRSet", ##< RR Set that should exist does not
[9] = "NOTAUTH", ##< Server Not Authoritative for zone
[10] = "NOTZONE", ##< Name not contained in zone
[11] = "unassigned-11", ##< available for assignment
[12] = "unassigned-12", ##< available for assignment
[13] = "unassigned-13", ##< available for assignment
[14] = "unassigned-14", ##< available for assignment
[15] = "unassigned-15", ##< available for assignment
[16] = "BADVERS", ##< for EDNS, collision w/ TSIG
[17] = "BADKEY", ##< Key not recognized
[18] = "BADTIME", ##< Signature out of time window
[19] = "BADMODE", ##< Bad TKEY Mode
[20] = "BADNAME", ##< Duplicate key name
[21] = "BADALG", ##< Algorithm not supported
[22] = "BADTRUNC", ##< draft-ietf-dnsext-tsig-sha-05.txt
[3842] = "BADSIG", ##< 16 <= number collision with EDNS(16);
##< this is a translation from TSIG(16)
} &default = function(n: count): string { return fmt("rcode-%d", n); };
# This deciphers EDNS Z field values.
const edns_zfield = {
[0] = "NOVALUE", # regular entry
[32768] = "DNS_SEC_OK", # accepts DNS Sec RRs
} &default="?";
const classes = {
[1] = "C_INTERNET",
[2] = "C_CSNET",
[3] = "C_CHAOS",
[4] = "C_HESOD",
[254] = "C_NONE",
[255] = "C_ANY",
} &default = function(n: count): string { return fmt("qclass-%d", n); };
}

View file

@ -0,0 +1,279 @@
module DNS;
export {
redef enum Log::ID += { DNS };
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
proto: transport_proto &log;
trans_id: count &log &optional;
query: string &log &optional;
qclass: count &log &optional;
qclass_name: string &log &optional;
qtype: count &log &optional;
qtype_name: string &log &optional;
rcode: count &log &optional;
rcode_name: string &log &optional;
QR: bool &log &default=F;
AA: bool &log &default=F;
TC: bool &log &default=F;
RD: bool &log &default=F;
RA: bool &log &default=F;
Z: count &log &default=0;
TTL: interval &log &optional;
answers: set[string] &log &optional;
## This value indicates if this request/response pair is ready to be logged.
ready: bool &default=F;
total_answers: count &optional;
total_replies: count &optional;
};
type State: record {
## Indexed by query id, returns Info record corresponding to
## query/response which haven't completed yet.
pending: table[count] of Info &optional;
## This is the list of DNS responses that have completed based on the
## number of responses declared and the number received. The contents
## of the set are transaction IDs.
finished_answers: set[count] &optional;
};
global log_dns: event(rec: Info);
## This is called by the specific dns_*_reply events with a "reply" which
## may not represent the full data available from the resource record, but
## it's generally considered a summarization of the response(s).
# TODO: Weirdly enough, if I define this, the locally defined script layer
# event won't trigger any of it's handlers.
#global do_reply: event(c: connection, msg: dns_msg, ans: dns_answer, reply: string);
}
redef record connection += {
dns: Info &optional;
dns_state: State &optional;
};
# DPD configuration.
redef capture_filters += {
["dns"] = "port 53",
["mdns"] = "udp and port 5353",
["llmns"] = "udp and port 5355",
["netbios-ns"] = "udp port 137",
};
global dns_ports = { 53/udp, 53/tcp, 137/udp, 5353/udp, 5355/udp } &redef;
redef dpd_config += { [ANALYZER_DNS] = [$ports = dns_ports] };
global dns_udp_ports = { 53/udp, 137/udp, 5353/udp, 5355/udp } &redef;
global dns_tcp_ports = { 53/tcp } &redef;
redef dpd_config += { [ANALYZER_DNS_UDP_BINPAC] = [$ports = dns_udp_ports] };
redef dpd_config += { [ANALYZER_DNS_TCP_BINPAC] = [$ports = dns_tcp_ports] };
event bro_init() &priority=5
{
Log::create_stream(DNS, [$columns=Info, $ev=log_dns]);
}
function new_session(c: connection, trans_id: count): Info
{
if ( ! c?$dns_state )
{
local state: State;
state$pending=table();
state$finished_answers=set();
c$dns_state = state;
}
local info: Info;
info$ts = network_time();
info$id = c$id;
info$uid = c$uid;
info$proto = get_conn_transport_proto(c$id);
info$trans_id = trans_id;
return info;
}
function set_session(c: connection, msg: dns_msg, is_query: bool)
{
if ( ! c?$dns_state || msg$id !in c$dns_state$pending )
c$dns_state$pending[msg$id] = new_session(c, msg$id);
c$dns = c$dns_state$pending[msg$id];
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! is_query )
{
if ( ! c$dns?$total_answers )
c$dns$total_answers = msg$num_answers;
if ( c$dns?$total_replies &&
c$dns$total_replies != msg$num_answers + msg$num_addl + msg$num_auth )
{
event conn_weird("dns_changed_number_of_responses", c,
fmt("The declared number of responses changed from %d to %d",
c$dns$total_replies,
msg$num_answers + msg$num_addl + msg$num_auth));
}
else
{
# Store the total number of responses expected from the first reply.
c$dns$total_replies = msg$num_answers + msg$num_addl + msg$num_auth;
}
}
}
event do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5
{
set_session(c, msg, F);
c$dns$AA = msg$AA;
c$dns$RA = msg$RA;
c$dns$TTL = ans$TTL;
if ( ans$answer_type == DNS_ANS )
{
if ( msg$id in c$dns_state$finished_answers )
event conn_weird("dns_reply_seen_after_done", c, "");
if ( reply != "" )
{
if ( ! c$dns?$answers )
c$dns$answers = set();
add c$dns$answers[reply];
}
if ( c$dns?$answers && |c$dns$answers| == c$dns$total_answers )
{
add c$dns_state$finished_answers[c$dns$trans_id];
# Indicate this request/reply pair is ready to be logged.
c$dns$ready = T;
}
}
}
event do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=-5
{
if ( c$dns$ready )
{
Log::write(DNS, c$dns);
add c$dns_state$finished_answers[c$dns$trans_id];
# This record is logged and no longer pending.
delete c$dns_state$pending[c$dns$trans_id];
}
}
event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5
{
set_session(c, msg, T);
c$dns$RD = msg$RD;
c$dns$TC = msg$TC;
c$dns$qclass = qclass;
c$dns$qclass_name = classes[qclass];
c$dns$qtype = qtype;
c$dns$qtype_name = query_types[qtype];
# Decode netbios name queries
# Note: I'm ignoring the name type for now. Not sure if this should be
# worked into the query/response in some fashion.
if ( c$id$resp_p == 137/udp )
query = decode_netbios_name(query);
c$dns$query = query;
c$dns$Z = msg$Z;
}
event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5
{
event do_reply(c, msg, ans, fmt("%s", a));
}
event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, str: string) &priority=5
{
event do_reply(c, msg, ans, str);
}
event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr,
astr: string) &priority=5
{
# TODO: What should we do with astr?
event do_reply(c, msg, ans, fmt("%s", a));
}
event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5
{
event do_reply(c, msg, ans, name);
}
event dns_CNAME_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5
{
event do_reply(c, msg, ans, name);
}
event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string,
preference: count) &priority=5
{
event do_reply(c, msg, ans, name);
}
event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5
{
event do_reply(c, msg, ans, name);
}
event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa) &priority=5
{
event do_reply(c, msg, ans, soa$mname);
}
event dns_WKS_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
{
event do_reply(c, msg, ans, "");
}
event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
{
event do_reply(c, msg, ans, "");
}
# TODO: figure out how to handle these
#event dns_EDNS(c: connection, msg: dns_msg, ans: dns_answer)
# {
#
# }
#
#event dns_EDNS_addl(c: connection, msg: dns_msg, ans: dns_edns_additional)
# {
#
# }
#
#event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional)
# {
#
# }
event dns_rejected(c: connection, msg: dns_msg,
query: string, qtype: count, qclass: count) &priority=5
{
set_session(c, msg, F);
}
event connection_state_remove(c: connection) &priority=-5
{
if ( ! c?$dns_state )
return;
# If Bro is expiring state, we should go ahead and log all unlogged
# request/response pairs now.
for ( trans_id in c$dns_state$pending )
Log::write(DNS, c$dns_state$pending[trans_id]);
}

View file

@ -0,0 +1,3 @@
@load ./utils-commands
@load ./main
@load ./file-extract

View file

@ -0,0 +1,65 @@
##! File extraction for FTP.
module FTP;
export {
## Pattern of file mime types to extract from FTP entity bodies.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from FTP-data transfers.
const extraction_prefix = "ftp-item" &redef;
}
redef record Info += {
## The file handle for the file to be extracted
extraction_file: file &log &optional;
extract_file: bool &default=F;
num_extracted_files: count &default=0;
};
event file_transferred(c: connection, prefix: string, descr: string,
mime_type: string) &priority=3
{
local id = c$id;
if ( [id$resp_h, id$resp_p] !in ftp_data_expected )
return;
local expected = ftp_data_expected[id$resp_h, id$resp_p];
local s = expected$state;
if ( extract_file_types in s$mime_type )
{
s$extract_file = T;
add s$tags["extracted_file"];
++s$num_extracted_files;
}
}
event file_transferred(c: connection, prefix: string, descr: string,
mime_type: string) &priority=-4
{
local id = c$id;
if ( [id$resp_h, id$resp_p] !in ftp_data_expected )
return;
local expected = ftp_data_expected[id$resp_h, id$resp_p];
local s = expected$state;
if ( s$extract_file )
{
local suffix = fmt("%d.dat", s$num_extracted_files);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
s$extraction_file = open(fname);
if ( s$passive )
set_contents_file(id, CONTENTS_RESP, s$extraction_file);
else
set_contents_file(id, CONTENTS_ORIG, s$extraction_file);
}
}
event log_ftp(rec: Info) &priority=-10
{
delete rec$extraction_file;
delete rec$extract_file;
}

View file

@ -0,0 +1,336 @@
##! The logging this script does is primarily focused on logging FTP commands
##! along with metadata. For example, if files are transferred, the argument
##! will take on the full path that the client is at along with the requested
##! file name.
##!
##! TODO:
##!
##! * Handle encrypted sessions correctly (get an example?)
module FTP;
export {
redef enum Log::ID += { FTP };
## This setting changes if passwords used in FTP sessions are captured or not.
const default_capture_password = F &redef;
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
user: string &log &default="<unknown>";
password: string &log &optional;
command: string &log &optional;
arg: string &log &optional;
mime_type: string &log &optional;
mime_desc: string &log &optional;
file_size: count &log &optional;
reply_code: count &log &optional;
reply_msg: string &log &optional;
tags: set[string] &log &default=set();
## By setting the CWD to '/.', we can indicate that unless something
## more concrete is discovered that the existing but unknown
## directory is ok to use.
cwd: string &default="/.";
cmdarg: CmdArg &optional;
pending_commands: PendingCmds;
## This indicates if the session is in active or passive mode.
passive: bool &default=F;
## This determines if the password will be captured for this request.
capture_password: bool &default=default_capture_password;
};
type ExpectedConn: record {
host: addr;
state: Info;
};
## This record is to hold a parsed FTP reply code. For example, for the
## 201 status code, the digits would be parsed as: x->2, y->0, z=>1.
type ReplyCode: record {
x: count;
y: count;
z: count;
};
# TODO: add this back in some form. raise a notice again?
#const excessive_filename_len = 250 &redef;
#const excessive_filename_trunc_len = 32 &redef;
## These are user IDs that can be considered "anonymous".
const guest_ids = { "anonymous", "ftp", "guest" } &redef;
## The list of commands that should have their command/response pairs logged.
const logged_commands = {
"APPE", "DELE", "RETR", "STOR", "STOU", "ACCT"
} &redef;
## This function splits FTP reply codes into the three constituent
global parse_ftp_reply_code: function(code: count): ReplyCode;
global log_ftp: event(rec: Info);
}
# Add the state tracking information variable to the connection record
redef record connection += {
ftp: Info &optional;
};
# Configure DPD
const ports = { 21/tcp } &redef;
redef capture_filters += { ["ftp"] = "port 21" };
redef dpd_config += { [ANALYZER_FTP] = [$ports = ports] };
# Establish the variable for tracking expected connections.
global ftp_data_expected: table[addr, port] of ExpectedConn &create_expire=5mins;
event bro_init() &priority=5
{
Log::create_stream(FTP, [$columns=Info, $ev=log_ftp]);
}
## A set of commands where the argument can be expected to refer
## to a file or directory.
const file_cmds = {
"APPE", "CWD", "DELE", "MKD", "RETR", "RMD", "RNFR", "RNTO",
"STOR", "STOU", "REST", "SIZE", "MDTM",
};
## Commands that either display or change the current working directory along
## with the response codes to indicate a successful command.
const directory_cmds = {
["CWD", 250],
["CDUP", 200], # typo in RFC?
["CDUP", 250], # as found in traces
["PWD", 257],
["XPWD", 257],
};
function parse_ftp_reply_code(code: count): ReplyCode
{
local a: ReplyCode;
a$z = code % 10;
code = code / 10;
a$y = code % 10;
code = code / 10;
a$x = code % 10;
return a;
}
function set_ftp_session(c: connection)
{
if ( ! c?$ftp )
{
local s: Info;
s$ts=network_time();
s$uid=c$uid;
s$id=c$id;
c$ftp=s;
# Add a shim command so the server can respond with some init response.
add_pending_cmd(c$ftp$pending_commands, "<init>", "");
}
}
function ftp_message(s: Info)
{
# If it either has a tag associated with it (something detected)
# or it's a deliberately logged command.
if ( |s$tags| > 0 || (s?$cmdarg && s$cmdarg$cmd in logged_commands) )
{
if ( s?$password && to_lower(s$user) !in guest_ids )
s$password = "<hidden>";
local arg = s$cmdarg$arg;
if ( s$cmdarg$cmd in file_cmds )
arg = fmt("ftp://%s%s", s$id$resp_h, build_path_compressed(s$cwd, arg));
s$ts=s$cmdarg$ts;
s$command=s$cmdarg$cmd;
if ( arg == "" )
delete s$arg;
else
s$arg=arg;
Log::write(FTP, s);
}
# The MIME and file_size fields are specific to file transfer commands
# and may not be used in all commands so they need reset to "blank"
# values after logging.
delete s$mime_type;
delete s$mime_desc;
delete s$file_size;
# Tags are cleared everytime too.
delete s$tags;
}
event ftp_request(c: connection, command: string, arg: string) &priority=5
{
# Write out the previous command when a new command is seen.
# The downside here is that commands definitely aren't logged until the
# next command is issued or the control session ends. In practicality
# this isn't an issue, but I suppose it could be a delay tactic for
# attackers.
if ( c?$ftp && c$ftp?$cmdarg && c$ftp?$reply_code )
{
remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg);
ftp_message(c$ftp);
}
local id = c$id;
set_ftp_session(c);
# Queue up the new command and argument
add_pending_cmd(c$ftp$pending_commands, command, arg);
if ( command == "USER" )
c$ftp$user = arg;
else if ( command == "PASS" )
c$ftp$password = arg;
else if ( command == "PORT" || command == "EPRT" )
{
local data = (command == "PORT") ?
parse_ftp_port(arg) : parse_eftp_port(arg);
if ( data$valid )
{
c$ftp$passive=F;
local expected = [$host=id$resp_h, $state=copy(c$ftp)];
ftp_data_expected[data$h, data$p] = expected;
expect_connection(id$resp_h, data$h, data$p, ANALYZER_FILE, 5mins);
}
else
{
# TODO: raise a notice? does anyone care?
}
}
}
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=5
{
# TODO: figure out what to do with continued FTP response (not used much)
#if ( cont_resp ) return;
local id = c$id;
set_ftp_session(c);
c$ftp$cmdarg = get_pending_cmd(c$ftp$pending_commands, code, msg);
c$ftp$reply_code = code;
c$ftp$reply_msg = msg;
# TODO: do some sort of generic clear text login processing here.
local response_xyz = parse_ftp_reply_code(code);
#if ( response_xyz$x == 2 && # successful
# session$cmdarg$cmd == "PASS" )
# do_ftp_login(c, session);
if ( (code == 150 && c$ftp$cmdarg$cmd == "RETR") ||
(code == 213 && c$ftp$cmdarg$cmd == "SIZE") )
{
# NOTE: This isn't exactly the right thing to do for SIZE since the size
# on a different file could be checked, but the file size will
# be overwritten by the server response to the RETR command
# if that's given as well which would be more correct.
c$ftp$file_size = extract_count(msg);
}
# PASV and EPSV processing
else if ( (code == 227 || code == 229) &&
(c$ftp$cmdarg$cmd == "PASV" || c$ftp$cmdarg$cmd == "EPSV") )
{
local data = (code == 227) ? parse_ftp_pasv(msg) : parse_ftp_epsv(msg);
if ( data$valid )
{
c$ftp$passive=T;
if ( code == 229 && data$h == 0.0.0.0 )
data$h = id$resp_h;
local expected = [$host=id$orig_h, $state=c$ftp];
ftp_data_expected[data$h, data$p] = expected;
expect_connection(id$orig_h, data$h, data$p, ANALYZER_FILE, 5mins);
}
else
{
# TODO: do something if there was a problem parsing the PASV message?
}
}
if ( [c$ftp$cmdarg$cmd, code] in directory_cmds )
{
if ( c$ftp$cmdarg$cmd == "CWD" )
c$ftp$cwd = build_path(c$ftp$cwd, c$ftp$cmdarg$arg);
else if ( c$ftp$cmdarg$cmd == "CDUP" )
c$ftp$cwd = cat(c$ftp$cwd, "/..");
else if ( c$ftp$cmdarg$cmd == "PWD" || c$ftp$cmdarg$cmd == "XPWD" )
c$ftp$cwd = extract_path(msg);
}
# In case there are multiple commands queued, go ahead and remove the
# command here and log because we can't do the normal processing pipeline
# to wait for a new command before logging the command/response pair.
if ( |c$ftp$pending_commands| > 1 )
{
remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg);
ftp_message(c$ftp);
}
}
event expected_connection_seen(c: connection, a: count) &priority=10
{
local id = c$id;
if ( [id$resp_h, id$resp_p] in ftp_data_expected )
add c$service["ftp-data"];
}
event file_transferred(c: connection, prefix: string, descr: string,
mime_type: string) &priority=5
{
local id = c$id;
if ( [id$resp_h, id$resp_p] in ftp_data_expected )
{
local expected = ftp_data_expected[id$resp_h, id$resp_p];
local s = expected$state;
s$mime_type = mime_type;
s$mime_desc = descr;
}
}
event file_transferred(c: connection, prefix: string, descr: string,
mime_type: string) &priority=-5
{
local id = c$id;
if ( [id$resp_h, id$resp_p] in ftp_data_expected )
delete ftp_data_expected[id$resp_h, id$resp_p];
}
# Use state remove event to cover connections terminated by RST.
event connection_state_remove(c: connection) &priority=-5
{
if ( ! c?$ftp ) return;
for ( ca in c$ftp$pending_commands )
{
c$ftp$cmdarg = c$ftp$pending_commands[ca];
ftp_message(c$ftp);
}
}

View file

@ -0,0 +1,134 @@
module FTP;
export {
type CmdArg: record {
ts: time;
cmd: string &default="<unknown>";
arg: string &default="";
seq: count &default=0;
};
type PendingCmds: table[count] of CmdArg;
const cmd_reply_code: set[string, count] = {
# According to RFC 959
["<init>", [120, 220, 421]],
["USER", [230, 331, 332, 421, 530, 500, 501]],
["PASS", [230, 202, 332, 421, 530, 500, 501, 503]],
["ACCT", [230, 202, 421, 530, 500, 501, 503]],
["CWD", [250, 421, 500, 501, 502, 530, 550]],
["CDUP", [200, 250, 421, 500, 501, 502, 530, 550]],
["SMNT", [202, 250, 421, 500, 501, 502, 530, 550]],
["REIN", [120, 220, 421, 500, 502]],
["QUIT", [221, 500]],
["PORT", [200, 421, 500, 501, 530]],
["PASV", [227, 421, 500, 501, 502, 530]],
["MODE", [200, 421, 500, 501, 502, 504, 530]],
["TYPE", [200, 421, 500, 501, 504, 530]],
["STRU", [200, 421, 500, 501, 504, 530]],
["ALLO", [200, 202, 421, 500, 501, 504, 530]],
["REST", [200, 350, 421, 500, 501, 502, 530]],
["STOR", [110, 125, 150, 226, 250, 421, 425, 426, 451, 551, 552, 532, 450, 452, 553, 500, 501, 530, 550]],
["STOU", [110, 125, 150, 226, 250, 421, 425, 426, 451, 551, 552, 532, 450, 452, 553, 500, 501, 530, 550]],
["RETR", [110, 125, 150, 226, 250, 421, 425, 426, 451, 450, 500, 501, 530, 550]],
["LIST", [125, 150, 226, 250, 421, 425, 426, 451, 450, 500, 501, 502, 530, 550]],
["NLST", [125, 150, 226, 250, 421, 425, 426, 451, 450, 500, 501, 502, 530, 550]],
["APPE", [125, 150, 226, 250, 421, 425, 426, 451, 551, 552, 532, 450, 550, 452, 553, 500, 501, 502, 530]],
["RNFR", [350, 421, 450, 550, 500, 501, 502, 530]],
["RNTO", [250, 421, 532, 553, 500, 501, 502, 503, 530]],
["DELE", [250, 421, 450, 550, 500, 501, 502, 530]],
["RMD", [250, 421, 500, 501, 502, 530, 550]],
["MKD", [257, 421, 500, 501, 502, 530, 550]],
["PWD", [257, 421, 500, 501, 502, 550]],
["ABOR", [225, 226, 421, 500, 501, 502]],
["SYST", [215, 421, 500, 501, 502, 530]],
["STAT", [211, 212, 213, 421, 450, 500, 501, 502, 530]],
["HELP", [200, 211, 214, 421, 500, 501, 502]],
["SITE", [200, 202, 214, 500, 501, 502, 530]],
["NOOP", [200, 421, 500]],
# Extensions
["LPRT", [500, 501, 521]], # RFC1639
["FEAT", [211, 500, 502]], # RFC2389
["OPTS", [200, 451, 501]], # RFC2389
["EPSV", [229, 500, 501]], # RFC2428
["EPRT", [200, 500, 501, 522]], # RFC2428
["SIZE", [213, 500, 501, 550]], # RFC3659
["MDTM", [213, 500, 501, 550]], # RFC3659
["MLST", [150, 226, 250, 500, 501, 550]], # RFC3659
["MLSD", [150, 226, 250, 500, 501, 550]], # RFC3659
["CLNT", [200, 500]], # No RFC (indicate client software)
["MACB", [200, 500, 550]], # No RFC (test for MacBinary support)
["<init>", 0], # unexpected command-reply pair
["<missing>", 0], # unexpected command-reply pair
["QUIT", 0], # unexpected command-reply pair
} &redef;
}
function add_pending_cmd(pc: PendingCmds, cmd: string, arg: string): CmdArg
{
local ca = [$cmd = cmd, $arg = arg, $seq=|pc|+1, $ts=network_time()];
pc[ca$seq] = ca;
return ca;
}
# Determine which is the best command to match with based on the
# response code and message.
function get_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg
{
local best_match: CmdArg;
local best_seq = 0;
local best_score: int = -1;
for ( cmd_seq in pc )
{
local cmd = pc[cmd_seq];
local score: int = 0;
# if the command is compatible with the reply code
# code 500 (syntax error) is compatible with all commands
if ( reply_code == 500 || [cmd$cmd, reply_code] in cmd_reply_code )
score = score + 100;
# if the command or the command arg appears in the reply message
if ( strstr(reply_msg, cmd$cmd) > 0 )
score = score + 20;
if ( strstr(reply_msg, cmd$arg) > 0 )
score = score + 10;
if ( score > best_score ||
( score == best_score && best_seq > cmd_seq ) ) # break tie with sequence number
{
best_score = score;
best_seq = cmd_seq;
best_match = cmd;
}
}
#if ( [best_match$cmd, reply_code] !in cmd_reply_code )
# {
# # TODO: maybe do something when best match doesn't have an expected response code?
# }
return best_match;
}
function remove_pending_cmd(pc: PendingCmds, ca: CmdArg): bool
{
if ( ca$seq in pc )
{
delete pc[ca$seq];
return T;
}
else
return F;
}
function pop_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg
{
local ca = get_pending_cmd(pc, reply_code, reply_msg);
remove_pending_cmd(pc, ca);
return ca;
}

View file

@ -0,0 +1,5 @@
@load ./main
@load ./utils
@load ./file-ident
@load ./file-hash
@load ./file-extract

View file

@ -0,0 +1,56 @@
##! Extracts the items from HTTP traffic, one per file. At this time only
##! the message body from the server can be extracted with this script.
module HTTP;
export {
## Pattern of file mime types to extract from HTTP entity bodies.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from HTTP entity bodies.
const extraction_prefix = "http-item" &redef;
redef record Info += {
## This field can be set per-connection to determine if the entity body
## will be extracted. It must be set to T on or before the first
## entity_body_data event.
extracting_file: bool &default=F;
## This is the holder for the file handle as the file is being written
## to disk.
extraction_file: file &log &optional;
};
redef record State += {
entity_bodies: count &default=0;
};
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=5
{
# Client body extraction is not currently supported in this script.
if ( is_orig || ! c$http$first_chunk ) return;
if ( c$http$first_chunk )
{
if ( c$http?$mime_type &&
extract_file_types in c$http$mime_type )
{
c$http$extracting_file = T;
local suffix = fmt("%s_%d.dat", is_orig ? "orig" : "resp", ++c$http_state$entity_bodies);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
c$http$extraction_file = open(fname);
enable_raw_output(c$http$extraction_file);
}
}
if ( c$http$extracting_file )
print c$http$extraction_file, data;
}
event http_end_entity(c: connection, is_orig: bool)
{
if ( c$http$extracting_file )
close(c$http$extraction_file);
}

View file

@ -0,0 +1,92 @@
##! Calculate hashes for HTTP body transfers.
module HTTP;
export {
redef enum Notice::Type += {
## Indicates that an MD5 sum was calculated for an HTTP response body.
MD5,
};
redef record Info += {
## The MD5 sum for a file transferred over HTTP will be stored here.
md5: string &log &optional;
## This value can be set per-transfer to determine per request
## if a file should have an MD5 sum generated. It must be
## set to T at the time of or before the first chunk of body data.
calc_md5: bool &default=F;
## This boolean value indicates if an MD5 sum is currently being
## calculated for the current file transfer.
calculating_md5: bool &default=F;
};
## Generate MD5 sums for these filetypes.
const generate_md5 = /application\/x-dosexec/ # Windows and DOS executables
| /application\/x-executable/ # *NIX executable binary
&redef;
}
## Initialize and calculate the hash.
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=5
{
if ( is_orig || ! c?$http ) return;
if ( c$http$first_chunk )
{
if ( c$http$calc_md5 ||
(c$http?$mime_type && generate_md5 in c$http$mime_type) )
{
c$http$calculating_md5 = T;
md5_hash_init(c$id);
}
}
if ( c$http$calculating_md5 )
md5_hash_update(c$id, data);
}
## In the event of a content gap during a file transfer, detect the state for
## the MD5 sum calculation and stop calculating the MD5 since it would be
## incorrect anyway.
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
{
if ( is_orig || ! c?$http ) return;
set_state(c, F, is_orig);
if ( c$http$calculating_md5 )
{
c$http$calculating_md5 = F;
md5_hash_finish(c$id);
}
}
## When the file finishes downloading, finish the hash and generate a notice.
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority=-3
{
if ( is_orig || ! c?$http ) return;
if ( c$http$calculating_md5 )
{
local url = build_url_http(c$http);
c$http$calculating_md5 = F;
c$http$md5 = md5_hash_finish(c$id);
NOTICE([$note=MD5, $msg=fmt("%s %s %s", c$id$orig_h, c$http$md5, url),
$sub=c$http$md5, $conn=c, $URL=url]);
}
}
event connection_state_remove(c: connection) &priority=-5
{
if ( c?$http_state &&
c$http_state$current_response in c$http_state$pending &&
c$http_state$pending[c$http_state$current_response]$calculating_md5 )
{
# The MD5 sum isn't going to be saved anywhere since the entire
# body wouldn't have been seen anyway and we'd just be giving an
# incorrect MD5 sum.
md5_hash_finish(c$id);
}
}

View file

@ -0,0 +1,76 @@
##! This script is involved in the identification of file types in HTTP
##! response bodies.
# Add the magic number signatures to the core signature set.
redef signature_files += "base/protocols/http/file-ident.sig";
# Ignore the signatures used to match files
redef Signatures::ignored_ids += /^matchfile-/;
module HTTP;
export {
redef enum Notice::Type += {
# This notice is thrown when the file extension doesn't
# seem to match the file contents.
Incorrect_File_Type,
};
redef record Info += {
## This will record the mime_type identified.
mime_type: string &log &optional;
## This indicates that no data of the current file transfer has been
## seen yet. After the first :bro:id:`http_entity_data` event, it
## will be set to T.
first_chunk: bool &default=T;
};
redef enum Tags += {
IDENTIFIED_FILE
};
# Create regexes that *should* in be in the urls for specifics mime types.
# Notices are thrown if the pattern doesn't match the url for the file type.
const mime_types_extensions: table[string] of pattern = {
["application/x-dosexec"] = /\.([eE][xX][eE]|[dD][lL][lL])/,
} &redef;
}
event signature_match(state: signature_state, msg: string, data: string) &priority=5
{
# Only signatures matching file types are dealt with here.
if ( /^matchfile-/ !in state$sig_id ) return;
local c = state$conn;
set_state(c, F, F);
# Not much point in any of this if we don't know about the HTTP session.
if ( ! c?$http ) return;
# Set the mime type that was detected.
c$http$mime_type = msg;
if ( msg in mime_types_extensions &&
c$http?$uri && mime_types_extensions[msg] !in c$http$uri )
{
local url = build_url_http(c$http);
local message = fmt("%s %s %s", msg, c$http$method, url);
NOTICE([$note=Incorrect_File_Type,
$msg=message,
$conn=c,
$method=c$http$method,
$URL=url]);
}
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=5
{
if ( c$http$first_chunk && ! c$http?$mime_type )
c$http$mime_type = split1(identify_data(data, T), /;/)[1];
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=-10
{
if ( c$http$first_chunk )
c$http$first_chunk=F;
}

View file

@ -0,0 +1,144 @@
# These signatures are used as a replacement for libmagic. The signature
# name needs to start with "matchfile" and the "event" directive takes
# the mime type of the file matched by the http-reply-body pattern.
#
# Signatures from: http://www.garykessler.net/library/file_sigs.html
signature matchfile-exe {
http-reply-body /\x4D\x5A/
event "application/x-dosexec"
}
signature matchfile-elf {
http-reply-body /\x7F\x45\x4C\x46/
event "application/x-executable"
}
signature matchfile-script {
# This is meant to match the interpreter declaration at the top of many
# interpreted scripts.
http-reply-body /\#\![[:blank:]]?\//
event "application/x-script"
}
signature matchfile-wmv {
http-reply-body /\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C/
event "video/x-ms-wmv"
}
signature matchfile-flv {
http-reply-body /\x46\x4C\x56\x01/
event "video/x-flv"
}
signature matchfile-swf {
http-reply-body /[\x46\x43]\x57\x53/
event "application/x-shockwave-flash"
}
signature matchfile-jar {
http-reply-body /\x5F\x27\xA8\x89/
event "application/java-archive"
}
signature matchfile-class {
http-reply-body /\xCA\xFE\xBA\xBE/
event "application/java-byte-code"
}
signature matchfile-msoffice-2007 {
# MS Office 2007 XML documents
http-reply-body /\x50\x4B\x03\x04\x14\x00\x06\x00/
event "application/msoffice"
}
signature matchfile-msoffice {
# Older MS Office files
http-reply-body /\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1/
event "application/msoffice"
}
signature matchfile-rtf {
http-reply-body /\x7B\x5C\x72\x74\x66\x31/
event "application/rtf"
}
signature matchfile-lnk {
http-reply-body /\x4C\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46/
event "application/x-ms-shortcut"
}
signature matchfile-torrent {
http-reply-body /\x64\x38\x3A\x61\x6E\x6E\x6F\x75\x6E\x63\x65/
event "application/x-bittorrent"
}
signature matchfile-pdf {
http-reply-body /\x25\x50\x44\x46/
event "application/pdf"
}
signature matchfile-html {
http-reply-body /<[hH][tT][mM][lL]/
event "text/html"
}
signature matchfile-html2 {
http-reply-body /<![dD][oO][cC][tT][yY][pP][eE][[:blank:]][hH][tT][mM][lL]/
event "text/html"
}
signature matchfile-xml {
http-reply-body /<\??[xX][mM][lL]/
event "text/xml"
}
signature matchfile-gif {
http-reply-body /\x47\x49\x46\x38[\x37\x39]\x61/
event "image/gif"
}
signature matchfile-jpg {
http-reply-body /\xFF\xD8\xFF[\xDB\xE0\xE1\xE2\xE3\xE8]..[\x4A\x45\x53][\x46\x78\x50][\x49\x69][\x46\x66]/
event "image/jpeg"
}
signature matchfile-tiff {
http-reply-body /\x4D\x4D\x00[\x2A\x2B]/
event "image/tiff"
}
signature matchfile-png {
http-reply-body /\x89\x50\x4e\x47/
event "image/png"
}
signature matchfile-zip {
http-reply-body /\x50\x4B\x03\x04/
event "application/zip"
}
signature matchfile-bzip {
http-reply-body /\x42\x5A\x68/
event "application/bzip2"
}
signature matchfile-gzip {
http-reply-body /\x1F\x8B\x08/
event "application/x-gzip"
}
signature matchfile-cab {
http-reply-body /\x4D\x53\x43\x46/
event "application/vnd.ms-cab-compressed"
}
signature matchfile-rar {
http-reply-body /\x52\x61\x72\x21\x1A\x07\x00/
event "application/x-rar-compressed"
}
signature matchfile-7z {
http-reply-body /\x37\x7A\xBC\xAF\x27\x1C/
event "application/x-7z-compressed"
}

View file

@ -0,0 +1,247 @@
module HTTP;
export {
redef enum Log::ID += { HTTP };
## Indicate a type of attack or compromise in the record to be logged.
type Tags: enum {
EMPTY
};
## This setting changes if passwords used in Basic-Auth are captured or not.
const default_capture_password = F &redef;
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
## The verb used in the HTTP request (GET, POST, HEAD, etc.).
method: string &log &optional;
## The value of the HOST header.
host: string &log &optional;
## The URI used in the request.
uri: string &log &optional;
## The value of the "referer" header. The comment is deliberately
## misspelled like the standard declares, but the name used here is
## "referrer" spelled correctly.
referrer: string &log &optional;
## The value of the User-Agent header from the client.
user_agent: string &log &optional;
## The value of the Content-Length header from the client.
request_content_length: count &log &optional;
## The value of the Content-Length header from the server.
response_content_length: count &log &optional;
## The status code returned by the server.
status_code: count &log &optional;
## The status message returned by the server.
status_msg: string &log &optional;
## The filename given in the Content-Disposition header
## sent by the server.
filename: string &log &optional;
## This is a set of indicators of various attributes discovered and
## related to a particular request/response pair.
tags: set[Tags] &log;
## The username if basic-auth is performed for the request.
username: string &log &optional;
## The password if basic-auth is performed for the request.
password: string &log &optional;
## This determines if the password will be captured for this request.
capture_password: bool &default=default_capture_password;
## All of the headers that may indicate if the request was proxied.
proxied: set[string] &log &optional;
};
type State: record {
pending: table[count] of Info;
current_response: count &default=0;
current_request: count &default=0;
};
## The list of HTTP headers typically used to indicate a proxied request.
const proxy_headers: set[string] = {
"FORWARDED",
"X-FORWARDED-FOR",
"X-FORWARDED-FROM",
"CLIENT-IP",
"VIA",
"XROXY-CONNECTION",
"PROXY-CONNECTION",
} &redef;
global log_http: event(rec: Info);
}
# Add the http state tracking fields to the connection record.
redef record connection += {
http: Info &optional;
http_state: State &optional;
};
# Initialize the HTTP logging stream.
event bro_init() &priority=5
{
Log::create_stream(HTTP, [$columns=Info, $ev=log_http]);
}
# DPD configuration.
const ports = {
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3138/tcp,
8000/tcp, 8080/tcp, 8888/tcp,
};
redef dpd_config += {
[[ANALYZER_HTTP, ANALYZER_HTTP_BINPAC]] = [$ports = ports],
};
redef capture_filters += {
["http"] = "tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888)"
};
function new_http_session(c: connection): Info
{
local tmp: Info;
tmp$ts=network_time();
tmp$uid=c$uid;
tmp$id=c$id;
return tmp;
}
function set_state(c: connection, request: bool, is_orig: bool)
{
if ( ! c?$http_state )
{
local s: State;
c$http_state = s;
}
# These deal with new requests and responses.
if ( request || c$http_state$current_request !in c$http_state$pending )
c$http_state$pending[c$http_state$current_request] = new_http_session(c);
if ( ! is_orig && c$http_state$current_response !in c$http_state$pending )
c$http_state$pending[c$http_state$current_response] = new_http_session(c);
if ( is_orig )
c$http = c$http_state$pending[c$http_state$current_request];
else
c$http = c$http_state$pending[c$http_state$current_response];
}
event http_request(c: connection, method: string, original_URI: string,
unescaped_URI: string, version: string) &priority=5
{
if ( ! c?$http_state )
{
local s: State;
c$http_state = s;
}
++c$http_state$current_request;
set_state(c, T, T);
c$http$method = method;
c$http$uri = unescaped_URI;
}
event http_reply(c: connection, version: string, code: count, reason: string) &priority=5
{
if ( ! c?$http_state )
{
local s: State;
c$http_state = s;
}
++c$http_state$current_response;
set_state(c, F, F);
c$http$status_code = code;
c$http$status_msg = reason;
}
event http_header(c: connection, is_orig: bool, name: string, value: string) &priority=5
{
set_state(c, F, is_orig);
if ( is_orig ) # client headers
{
if ( name == "REFERER" )
c$http$referrer = value;
else if ( name == "HOST" )
# The split is done to remove the occasional port value that shows up here.
c$http$host = split1(value, /:/)[1];
else if ( name == "CONTENT-LENGTH" )
c$http$request_content_length = extract_count(value);
else if ( name == "USER-AGENT" )
c$http$user_agent = value;
else if ( name in proxy_headers )
{
if ( ! c$http?$proxied )
c$http$proxied = set();
add c$http$proxied[fmt("%s -> %s", name, value)];
}
else if ( name == "AUTHORIZATION" )
{
if ( /^[bB][aA][sS][iI][cC] / in value )
{
local userpass = decode_base64(sub(value, /[bB][aA][sS][iI][cC][[:blank:]]/, ""));
local up = split(userpass, /:/);
if ( |up| >= 2 )
{
c$http$username = up[1];
if ( c$http$capture_password )
c$http$password = up[2];
}
else
{
c$http$username = "<problem-decoding>";
if ( c$http$capture_password )
c$http$password = userpass;
}
}
}
}
else # server headers
{
if ( name == "CONTENT-LENGTH" )
c$http$response_content_length = extract_count(value);
else if ( name == "CONTENT-DISPOSITION" &&
/[fF][iI][lL][eE][nN][aA][mM][eE]/ in value )
c$http$filename = sub(value, /^.*[fF][iI][lL][eE][nN][aA][mM][eE]=/, "");
}
}
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority = 5
{
set_state(c, F, is_orig);
}
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority = -5
{
# The reply body is done so we're ready to log.
if ( ! is_orig )
{
Log::write(HTTP, c$http);
delete c$http_state$pending[c$http_state$current_response];
}
}
event connection_state_remove(c: connection)
{
# Flush all pending but incomplete request/response pairs.
if ( c?$http_state )
{
for ( r in c$http_state$pending )
{
Log::write(HTTP, c$http_state$pending[r]);
}
}
}

View file

@ -0,0 +1,94 @@
##! This script makes it possible for the HTTP analysis scripts to analyze
##! the apparent normal case of "206 Partial Content" responses.
##!
##! This script doesn't work yet and isn't loaded by default.
module HTTP;
export {
redef enum Notice::Type += {
Partial_Content_Out_Of_Order,
};
type Range: record {
from: count;
to: count;
} &log;
redef record Info += {
current_range: count &default=0;
request_ranges: vector of Range &optional;
response_range: Range &optional;
};
## Index is client IP address, server IP address, and URL being requested. The
## URL is tracked as part of the index in case multiple partial content segmented
## files are being transferred simultaneously between the server and client.
global partial_content_files: table[addr, addr, string] of Info &read_expire=5mins &redef;
}
event http_header(c: connection, is_orig: bool, name: string, value: string) &priority=2
{
local parts: table[count] of string;
if ( is_orig && name == "RANGE" )
{
# Example --> Range: bytes=1-1,2336-4951
parts = split(value, /[=]/);
if ( 2 in parts )
{
local ranges = split(parts[2], /,/);
for ( i in ranges )
{
if ( ! c$http?$request_ranges )
c$http$request_ranges = vector();
parts = split(ranges[i], /-/);
local r: Range = [$from=extract_count(parts[1]), $to=extract_count(parts[2])];
print r;
c$http$request_ranges[|c$http$request_ranges|] = r;
}
}
}
else if ( ! is_orig && name == "CONTENT-RANGE" )
{
# Example --> Content-Range: bytes 2336-4951/489528
parts = split(value, /[0-9]*/);
c$http$response_range = [$from=extract_count(parts[2]), $to=extract_count(parts[4])];
}
}
event http_reply(c: connection, version: string, code: count, reason: string) &priority=5
{
if ( code != 206 || ! c$http?$request_ranges )
return;
local url = build_url(c$http);
if ( [c$id$orig_h, c$id$resp_h, url] !in partial_content_files )
{
partial_content_files[c$id$orig_h, c$id$resp_h, url] = copy(c$http);
}
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
if ( is_orig || c$http$status_code != 206 || ! c$http?$request_ranges )
return;
local url = build_url(c$http);
local http = partial_content_files[c$id$orig_h, c$id$resp_h, url];
local range = http$request_ranges[http$current_range];
print http$current_range;
if ( http$current_range == 0 &&
c$http$response_range$from == 0 )
{
print "correct file beginning!";
}
}
event http_end_entity(c: connection, is_orig: bool)
{
print "end entity";
++c$http$current_range;
}

View file

@ -0,0 +1,38 @@
##! Utilities specific for HTTP processing.
module HTTP;
export {
global extract_keys: function(data: string, kv_splitter: pattern): string_vec;
global build_url: function(rec: Info): string;
global build_url_http: function(rec: Info): string;
}
function extract_keys(data: string, kv_splitter: pattern): string_vec
{
local key_vec: vector of string = vector();
local parts = split(data, kv_splitter);
for ( part_index in parts )
{
local key_val = split1(parts[part_index], /=/);
if ( 1 in key_val )
key_vec[|key_vec|] = key_val[1];
}
return key_vec;
}
function build_url(rec: Info): string
{
local uri = rec?$uri ? rec$uri : "/<missed_request>";
local host = rec?$host ? rec$host : fmt("%s", rec$id$resp_h);
if ( rec$id$resp_p != 80/tcp )
host = fmt("%s:%s", host, rec$id$resp_p);
return fmt("%s%s", host, uri);
}
function build_url_http(rec: Info): string
{
return fmt("http://%s", build_url(rec));
}

View file

@ -0,0 +1,2 @@
@load ./main
@load ./dcc-send

View file

@ -0,0 +1,109 @@
##! File extraction and introspection for DCC transfers over IRC.
##!
##! There is a major problem with this script in the cluster context because
##! we might see A send B a message that a DCC connection is to be expected,
##! but that connection will actually be between B and C which could be
##! analyzed on a different worker.
##!
##! Example line from IRC server indicating that the DCC SEND is about to start:
##! PRIVMSG my_nick :^ADCC SEND whateverfile.zip 3640061780 1026 41709^A
module IRC;
export {
redef enum Tag += { EXTRACTED_FILE };
## Pattern of file mime types to extract from IRC DCC file transfers.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from IRC DCC file transfers.
const extraction_prefix = "irc-dcc-item" &redef;
redef record Info += {
dcc_file_name: string &log &optional;
dcc_file_size: count &log &optional;
dcc_mime_type: string &log &optional;
## The file handle for the file to be extracted
extraction_file: file &log &optional;
## A boolean to indicate if the current file transfer should be extraced.
extract_file: bool &default=F;
## The count of the number of file that have been extracted during the session.
num_extracted_files: count &default=0;
};
}
global dcc_expected_transfers: table[addr, port] of Info = table();
event file_transferred(c: connection, prefix: string, descr: string,
mime_type: string) &priority=3
{
local id = c$id;
if ( [id$resp_h, id$resp_p] !in dcc_expected_transfers )
return;
local irc = dcc_expected_transfers[id$resp_h, id$resp_p];
irc$dcc_mime_type = split1(mime_type, /;/)[1];
if ( extract_file_types == irc$dcc_mime_type )
{
irc$extract_file = T;
add irc$tags[EXTRACTED_FILE];
local suffix = fmt("%d.dat", ++irc$num_extracted_files);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
irc$extraction_file = open(fname);
}
}
event file_transferred(c: connection, prefix: string, descr: string,
mime_type: string) &priority=-4
{
local id = c$id;
if ( [id$resp_h, id$resp_p] !in dcc_expected_transfers )
return;
local irc = dcc_expected_transfers[id$resp_h, id$resp_p];
local tmp = irc$command;
irc$command = "DCC";
Log::write(IRC, irc);
irc$command = tmp;
if ( irc$extract_file && irc?$extraction_file )
set_contents_file(id, CONTENTS_RESP, irc$extraction_file);
# Delete these values in case another DCC transfer
# happens during the IRC session.
delete irc$extract_file;
delete irc$extraction_file;
delete irc$dcc_file_name;
delete irc$dcc_file_size;
delete irc$dcc_mime_type;
delete dcc_expected_transfers[id$resp_h, id$resp_p];
}
event irc_dcc_message(c: connection, is_orig: bool,
prefix: string, target: string,
dcc_type: string, argument: string,
address: addr, dest_port: count, size: count) &priority=5
{
set_session(c);
if ( dcc_type != "SEND" )
return;
c$irc$dcc_file_name = argument;
c$irc$dcc_file_size = size;
local p = to_port(dest_port, tcp);
expect_connection(to_addr("0.0.0.0"), address, p, ANALYZER_FILE, 5 min);
dcc_expected_transfers[address, p] = c$irc;
}
event expected_connection_seen(c: connection, a: count) &priority=10
{
local id = c$id;
if ( [id$resp_h, id$resp_p] in dcc_expected_transfers )
add c$service["irc-dcc-data"];
}

View file

@ -0,0 +1,124 @@
##! This is the script that implements the core IRC analysis support. It only
##! logs a very limited subset of the IRC protocol by default. The points
##! that it logs at are NICK commands, USER commands, and JOIN commands. It
##! log various bits of meta data as indicated in the :bro:type:`Info` record
##! along with the command at the command arguments.
module IRC;
export {
redef enum Log::ID += { IRC };
type Tag: enum {
EMPTY
};
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
nick: string &log &optional;
user: string &log &optional;
channels: set[string] &log &optional;
command: string &log &optional;
value: string &log &optional;
addl: string &log &optional;
tags: set[Tag] &log;
};
global irc_log: event(rec: Info);
}
redef record connection += {
irc: Info &optional;
};
# Some common IRC ports.
redef capture_filters += { ["irc-6666"] = "port 6666" };
redef capture_filters += { ["irc-6667"] = "port 6667" };
# DPD configuration.
global irc_ports = { 6666/tcp, 6667/tcp } &redef;
redef dpd_config += { [ANALYZER_IRC] = [$ports = irc_ports] };
event bro_init() &priority=5
{
Log::create_stream(IRC, [$columns=Info, $ev=irc_log]);
}
function new_session(c: connection): Info
{
local info: Info;
info$ts = network_time();
info$uid = c$uid;
info$id = c$id;
return info;
}
function set_session(c: connection)
{
if ( ! c?$irc )
c$irc = new_session(c);
c$irc$ts=network_time();
}
event irc_nick_message(c: connection, is_orig: bool, who: string, newnick: string) &priority=5
{
set_session(c);
if ( is_orig )
{
c$irc$command = "NICK";
c$irc$value = newnick;
}
}
event irc_nick_message(c: connection, is_orig: bool, who: string, newnick: string) &priority=-5
{
if ( is_orig )
{
Log::write(IRC, c$irc);
c$irc$nick = newnick;
}
}
event irc_user_message(c: connection, is_orig: bool, user: string, host: string, server: string, real_name: string) &priority=5
{
set_session(c);
if ( is_orig )
{
c$irc$command = "USER";
c$irc$value = user;
c$irc$addl=fmt("%s %s %s", host, server, real_name);
}
}
event irc_user_message(c: connection, is_orig: bool, user: string, host: string, server: string, real_name: string) &priority=-5
{
if ( is_orig )
{
Log::write(IRC, c$irc);
c$irc$user = user;
}
}
event irc_join_message(c: connection, is_orig: bool, info_list: irc_join_list) &priority=5
{
set_session(c);
if ( is_orig )
c$irc$command = "JOIN";
}
event irc_join_message(c: connection, is_orig: bool, info_list: irc_join_list) &priority=-5
{
if ( is_orig )
{
for ( l in info_list )
{
c$irc$value = l$channel;
c$irc$addl = (l$password != "" ? fmt(" with channel key: '%s'", l$password) : "");
Log::write(IRC, c$irc);
}
}
}

View file

@ -0,0 +1,4 @@
@load protocols/mime/base
@load protocols/mime/file-ident
@load protocols/mime/file-extract
@load protocols/mime/file-hash

View file

@ -0,0 +1,101 @@
##! The mime script does analysis of MIME encoded messages seen in certain
##! protocols (only SMTP and POP3 at the moment).
@load utils/strings
module MIME;
export {
redef enum Log::ID += { MIME };
# Let's assume for now that nothing transferring files using
# MIME attachments is multiplexing for simplicity's sake.
# We can make the assumption that one connection == one file (at a time)
type Info: record {
## This is the timestamp of when the MIME content transfer began.
ts: time &log;
uid: string &log;
id: conn_id &log;
## The application layer protocol over which the transfer was seen.
app_protocol: string &log &optional;
## The filename seen in the Content-Disposition header.
filename: string &log &optional;
## Track how many byte of the MIME encoded file have been seen.
content_len: count &log &default=0;
};
type State: record {
## Track the number of MIME encoded files transferred during this session.
level: count &default=0;
};
global log_mime: event(rec: Info);
}
redef record connection += {
mime: Info &optional;
mime_state: State &optional;
};
event bro_init()
{
Log::create_stream(MIME, [$columns=Info, $ev=log_mime]);
}
function new_mime_session(c: connection): Info
{
local info: Info;
info$ts=network_time();
info$uid=c$uid;
info$id=c$id;
return info;
}
function set_session(c: connection, new_entity: bool)
{
if ( ! c?$mime_state )
c$mime_state = [];
if ( ! c?$mime || new_entity )
c$mime = new_mime_session(c);
}
event mime_begin_entity(c: connection) &priority=10
{
set_session(c, T);
++c$mime_state$level;
if ( |c$service| > 0 )
c$mime$app_protocol = join_string_set(c$service, ",");
}
# This has priority -10 because other handlers need to know the current
# content_len before it's updated by this handler.
event mime_segment_data(c: connection, length: count, data: string) &priority=-10
{
c$mime$content_len = c$mime$content_len + length;
}
event mime_one_header(c: connection, h: mime_header_rec)
{
if ( h$name == "CONTENT-DISPOSITION" &&
/[fF][iI][lL][eE][nN][aA][mM][eE]/ in h$value )
c$mime$filename = sub(h$value, /^.*[fF][iI][lL][eE][nN][aA][mM][eE]=/, "");
}
event mime_end_entity(c: connection) &priority=-5
{
# This check and the delete below are just to cope with a bug where
# mime_end_entity can be generated multiple times for the same event.
if ( ! c?$mime )
return;
# Don't log anything if there wasn't any content.
if ( c$mime$content_len > 0 )
Log::write(MIME, c$mime);
delete c$mime;
}

View file

@ -0,0 +1,60 @@
@load protocols/mime/file-ident
@load utils/files
module MIME;
export {
## Pattern of file mime types to extract from MIME bodies.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from MIME entity bodies.
const extraction_prefix = "mime-item" &redef;
redef record Info += {
## Optionally write the file to disk. Must be set prior to first
## data chunk being seen in an event.
extract_file: bool &default=F;
## Store the file handle here for the file currently being extracted.
extraction_file: file &optional;
## Store a count of the number of files that have been transferred in
## this conversation to create unique file names on disk.
num_extracted_files: count &optional;
};
}
event mime_segment_data(c: connection, length: count, data: string) &priority=5
{
if ( extract_file_types in c$mime$mime_type )
c$mime$extract_file = T;
}
event mime_segment_data(c: connection, length: count, data: string) &priority=3
{
if ( c$mime$extract_file && c$mime$content_len == 0 )
{
local suffix = fmt("%d.dat", ++c$mime$num_extracted_files);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
c$mime$extraction_file = open(fname);
enable_raw_output(c$mime$extraction_file);
}
}
event mime_segment_data(c: connection, length: count, data: string) &priority=-5
{
if ( c$mime$extract_file && c$mime?$extraction_file )
print c$mime$extraction_file, data;
}
event mime_end_entity(c: connection) &priority=-3
{
# TODO: this check is only due to a bug in mime_end_entity that
# causes the event to be generated twice for the same real event.
if ( ! c?$mime )
return;
if ( c$mime?$extraction_file )
close(c$mime$extraction_file);
}

View file

@ -0,0 +1,78 @@
@load protocols/mime/file-ident
module MIME;
export {
redef enum Notice::Type += {
## Indicates that an MD5 sum was calculated for a MIME message.
MD5,
};
redef record Info += {
## The calculated MD5 sum for the MIME entity.
md5: string &log &optional;
## Optionally calculate the file's MD5 sum. Must be set prior to the
## first data chunk being see in an event.
calc_md5: bool &default=F;
## This boolean value indicates if an MD5 sum is being calculated
## for the current file transfer.
calculating_md5: bool &default=F;
};
## Generate MD5 sums for these filetypes.
const generate_md5 = /application\/x-dosexec/ # Windows and DOS executables
| /application\/x-executable/ # *NIX executable binary
&redef;
}
event mime_segment_data(c: connection, length: count, data: string) &priority=-5
{
if ( ! c?$mime ) return;
if ( c$mime$content_len == 0 )
{
if ( generate_md5 in c$mime$mime_type )
c$mime$calc_md5 = T;
if ( c$mime$calc_md5 )
{
c$mime$calculating_md5 = T;
md5_hash_init(c$id);
}
}
if ( c$mime$calculating_md5 )
md5_hash_update(c$id, data);
}
## In the event of a content gap during the MIME transfer, detect the state for
## the MD5 sum calculation and stop calculating the MD5 since it would be
## incorrect anyway.
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
{
if ( is_orig || ! c?$mime ) return;
if ( c$mime$calculating_md5 )
{
c$mime$calculating_md5 = F;
md5_hash_finish(c$id);
}
}
event mime_end_entity(c: connection) &priority=-3
{
# TODO: this check is only due to a bug in mime_end_entity that
# causes the event to be generated twice for the same real event.
if ( ! c?$mime )
return;
if ( c$mime$calculating_md5 )
{
c$mime$md5 = md5_hash_finish(c$id);
NOTICE([$note=MD5, $msg=fmt("Calculated a hash for a MIME entity from %s", c$id$orig_h),
$sub=c$mime$md5, $conn=c]);
}
}

View file

@ -0,0 +1,16 @@
@load protocols/mime/base
module MIME;
export {
redef record Info += {
## Sniffed MIME type for the transfer.
mime_type: string &log &optional;
};
}
event mime_segment_data(c: connection, length: count, data: string) &priority=7
{
if ( c$mime$content_len == 0 )
c$mime$mime_type = split1(identify_data(data, T), /;/)[1];
}

View file

@ -0,0 +1,147 @@
#
# Log RPC request and reply messages. Does not in itself start/activate
# an analyzer. You need to load portmap and/or NFS for that
#
# TODO: maybe automatically load portmap, add a generic RPC analyzer and
# use expect connection, so that we can see RPC request/replies for RPC
# programs for which we don't have an analyzer.
#
module RPC;
export {
global log_file = open_log_file("rpc") &redef;
# whether to match request to replies on the policy layer.
# (will report on rexmit and missing requests or replies)
global track_requests_replies = T &redef;
}
type rpc_call_state: enum {
NONE,
HAVE_CALL,
HAVE_REPLY
};
type rpc_call_info: record {
state: rpc_call_state;
calltime: time;
cid: conn_id;
};
function new_call(cid: conn_id): rpc_call_info
{
local ci: rpc_call_info;
ci$state = NONE;
ci$calltime = network_time();
ci$cid = cid;
return ci;
}
function rpc_expire_xid(t: table[count] of rpc_call_info, xid: count): interval
{
local ci = t[xid];
if (ci$state != HAVE_REPLY)
print log_file, fmt("%.6f %s %s note XID %d never recevied a reply",
ci$calltime, id_string(ci$cid),
get_port_transport_proto(ci$cid$orig_p), xid);
return 0 sec;
}
function new_xid_table(): table[count] of rpc_call_info
{
local inner: table[count] of rpc_call_info &write_expire=rpc_timeout &expire_func=rpc_expire_xid;
return inner;
}
# Match requests to replies.
# The analyzer does this indepently and might differ in timeouts and
# handling of xid reuse.
# FIXME: add timeouts. Note, we do clean up on connection_state_remove
global rpc_calls: table[conn_id] of table[count] of rpc_call_info;
# &write_expire = rpc_timeout &expire_func=expire_rpc_call;
event rpc_dialogue(c: connection, prog: count, ver: count, proc: count, status: rpc_status, start_time: time, call_len: count, reply_len: count)
{
# TODO: We currently do nothing here.
# using the rpc_call and rpc_reply events, is all we need.
}
event rpc_call(c: connection, xid: count, prog: count, ver: count, proc: count, call_len: count)
{
if (track_requests_replies)
{
if (c$id !in rpc_calls)
rpc_calls[c$id] = new_xid_table();
if (xid !in rpc_calls[c$id])
rpc_calls[c$id][xid] = new_call(c$id);
local curstate = rpc_calls[c$id][xid]$state;
if (curstate == HAVE_CALL)
print log_file, fmt("%.6f %s %s note XID %d call retransmitted",
network_time(), id_string(c$id), get_port_transport_proto(c$id$orig_p),
xid);
else if (curstate == HAVE_REPLY)
print log_file, fmt("%.6f %s %s note XID %d call received after reply",
network_time(), id_string(c$id), get_port_transport_proto(c$id$orig_p),
xid);
rpc_calls[c$id][xid]$state = HAVE_CALL;
}
print log_file, fmt("%.6f %s %s rpc_call %d %d %d %d %d",
network_time(), id_string(c$id), get_port_transport_proto(c$id$orig_p),
xid, prog, ver, proc, call_len);
}
event rpc_reply(c: connection, xid: count, status: rpc_status, reply_len: count)
{
if (track_requests_replies)
{
if (c$id !in rpc_calls)
rpc_calls[c$id] = new_xid_table();
if (xid !in rpc_calls[c$id])
{
rpc_calls[c$id][xid] = new_call(c$id);
# XXX: what to do about calltime in rpc_call_info??
}
if (rpc_calls[c$id][xid]$state == NONE)
print log_file, fmt("%.6f %s %s note XID %d reply but call is missing",
network_time(), id_string(c$id), get_port_transport_proto(c$id$orig_p),
xid);
else if (rpc_calls[c$id][xid]$state == HAVE_REPLY)
print log_file, fmt("%.6f %s %s note XID %d reply retransmitted",
network_time(), id_string(c$id), get_port_transport_proto(c$id$orig_p),
xid);
rpc_calls[c$id][xid]$state = HAVE_REPLY;
}
print log_file, fmt("%.6f %s %s rpc_reply %d %s %d",
network_time(), reverse_id_string(c$id), get_port_transport_proto(c$id$orig_p),
xid, status, reply_len);
}
function finish_calls(cid: conn_id)
{
for (xid in rpc_calls[cid])
rpc_expire_xid(rpc_calls[cid], xid);
}
event connection_state_remove(c: connection)
{
if (c$id !in rpc_calls)
return;
finish_calls(c$id);
delete rpc_calls[c$id];
}
event bro_done()
{
for (cid in rpc_calls)
finish_calls(cid);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,367 @@
module SMTP;
export {
redef enum Log::ID += { SMTP };
redef enum Notice::Type += {
## Indicates that the server sent a reply mentioning an SMTP block list.
BL_Error_Message,
## Indicates the client's address is seen in the block list error message.
BL_Blocked_Host,
};
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
helo: string &log &optional;
mailfrom: string &log &optional;
rcptto: set[string] &log &optional;
date: string &log &optional;
from: string &log &optional;
to: set[string] &log &optional;
reply_to: string &log &optional;
msg_id: string &log &optional;
in_reply_to: string &log &optional;
subject: string &log &optional;
x_originating_ip: addr &log &optional;
first_received: string &log &optional;
second_received: string &log &optional;
## The last message the server sent to the client.
last_reply: string &log &optional;
files: set[string] &log &optional;
path: vector of addr &log &optional;
user_agent: string &log &optional;
## Indicate if this session is currently transmitting SMTP message
## envelope headers.
in_headers: bool &default=F;
## Indicate if the "Received: from" headers should still be processed.
process_received_from: bool &default=T;
## Maintain the current header for cases where there is header wrapping.
current_header: string &default="";
## Indicate when the message is logged and no longer applicable.
done: bool &default=F;
};
type State: record {
helo: string &optional;
## Count the number of individual messages transmitted during this
## SMTP session. Note, this is not the number of recipients, but the
## number of message bodies transferred.
messages_transferred: count &default=0;
pending_messages: set[Info] &optional;
};
## Direction to capture the full "Received from" path.
## REMOTE_HOSTS - only capture the path until an internal host is found.
## LOCAL_HOSTS - only capture the path until the external host is discovered.
## ALL_HOSTS - always capture the entire path.
## NO_HOSTS - never capture the path.
const mail_path_capture = ALL_HOSTS &redef;
# This matches content in SMTP error messages that indicate some
# block list doesn't like the connection/mail.
const bl_error_messages =
/spamhaus\.org\//
| /sophos\.com\/security\//
| /spamcop\.net\/bl/
| /cbl\.abuseat\.org\//
| /sorbs\.net\//
| /bsn\.borderware\.com\//
| /mail-abuse\.com\//
| /b\.barracudacentral\.com\//
| /psbl\.surriel\.com\//
| /antispam\.imp\.ch\//
| /dyndns\.com\/.*spam/
| /rbl\.knology\.net\//
| /intercept\.datapacket\.net\//
| /uceprotect\.net\//
| /hostkarma\.junkemailfilter\.com\// &redef;
global log_smtp: event(rec: Info);
## Configure the default ports for SMTP analysis.
const ports = { 25/tcp, 587/tcp } &redef;
}
redef record connection += {
smtp: Info &optional;
smtp_state: State &optional;
};
# Configure DPD
redef capture_filters += { ["smtp"] = "tcp port smtp or tcp port 587" };
redef dpd_config += { [ANALYZER_SMTP] = [$ports = ports] };
event bro_init() &priority=5
{
Log::create_stream(SMTP, [$columns=SMTP::Info, $ev=log_smtp]);
}
function find_address_in_smtp_header(header: string): string
{
local ips = find_ip_addresses(header);
# If there are more than one IP address found, return the second.
if ( |ips| > 1 )
return ips[1];
# Otherwise, return the first.
else if ( |ips| > 0 )
return ips[0];
# Otherwise, there wasn't an IP address found.
else
return "";
}
function new_smtp_log(c: connection): Info
{
local l: Info;
l$ts=network_time();
l$uid=c$uid;
l$id=c$id;
if ( c?$smtp_state && c$smtp_state?$helo )
l$helo = c$smtp_state$helo;
# The path will always end with the hosts involved in this connection.
# The lower values in the vector are the end of the path.
l$path = vector(c$id$resp_h, c$id$orig_h);
return l;
}
function set_smtp_session(c: connection)
{
if ( ! c?$smtp_state )
c$smtp_state = [];
if ( ! c?$smtp || c$smtp$done )
{
c$smtp = new_smtp_log(c);
}
}
function smtp_message(c: connection)
{
Log::write(SMTP, c$smtp);
c$smtp$done = T;
# Track the number of messages seen in this session.
++c$smtp_state$messages_transferred;
}
event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5
{
set_smtp_session(c);
local upper_command = to_upper(command);
if ( upper_command == "HELO" || upper_command == "EHLO" )
{
c$smtp_state$helo = arg;
c$smtp$helo = arg;
}
else if ( upper_command == "RCPT" && /^[tT][oO]:/ in arg )
{
if ( ! c$smtp?$rcptto )
c$smtp$rcptto = set();
add c$smtp$rcptto[split1(arg, /:[[:blank:]]*/)[2]];
}
else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg )
{
# In case this is not the first message in a session we want to
# essentially write out a log, clear the session tracking, and begin
# new session tracking.
if ( c$smtp_state$messages_transferred > 0 )
{
smtp_message(c);
set_smtp_session(c);
}
local partially_done = split1(arg, /:[[:blank:]]*/)[2];
c$smtp$mailfrom = split1(partially_done, /[[:blank:]]?/)[1];
}
else if ( upper_command == "DATA" )
{
c$smtp$in_headers = T;
}
}
event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
msg: string, cont_resp: bool) &priority=5
{
set_smtp_session(c);
# This continually overwrites, but we want the last reply,
# so this actually works fine.
if ( code != 421 && code >= 400 )
{
c$smtp$last_reply = fmt("%d %s", code, msg);
# Raise a notice when an SMTP error about a block list is discovered.
if ( bl_error_messages in msg )
{
local note = BL_Error_Message;
local message = fmt("%s received an error message mentioning an SMTP block list", c$id$orig_h);
# Determine if the originator's IP address is in the message.
local ips = find_ip_addresses(msg);
local text_ip = "";
if ( |ips| > 0 && to_addr(ips[0]) == c$id$orig_h )
{
note = BL_Blocked_Host;
message = fmt("%s is on an SMTP block list", c$id$orig_h);
}
NOTICE([$note=note, $conn=c, $msg=message, $sub=msg]);
}
}
}
event smtp_data(c: connection, is_orig: bool, data: string) &priority=5
{
# Is there something we should be handling from the server?
if ( ! is_orig ) return;
set_smtp_session(c);
if ( ! c$smtp$in_headers )
{
if ( /^[cC][oO][nN][tT][eE][nN][tT]-[dD][iI][sS].*[fF][iI][lL][eE][nN][aA][mM][eE]/ in data )
{
if ( ! c$smtp?$files )
c$smtp$files = set();
data = sub(data, /^.*[fF][iI][lL][eE][nN][aA][mM][eE]=/, "");
add c$smtp$files[data];
}
return;
}
if ( /^[[:blank:]]*$/ in data )
c$smtp$in_headers = F;
# This is to reconstruct headers that tend to wrap around.
if ( /^[[:blank:]]/ in data )
{
# Remove all but a single space at the beginning (this seems to follow
# the most common behavior).
data = sub(data, /^[[:blank:]]*/, " ");
if ( c$smtp$current_header == "MESSAGE-ID" )
c$smtp$msg_id += data;
else if ( c$smtp$current_header == "RECEIVED" )
c$smtp$first_received += data;
else if ( c$smtp$current_header == "IN-REPLY-TO" )
c$smtp$in_reply_to += data;
else if ( c$smtp$current_header == "SUBJECCT" )
c$smtp$subject += data;
else if ( c$smtp$current_header == "FROM" )
c$smtp$from += data;
else if ( c$smtp$current_header == "REPLY-TO" )
c$smtp$reply_to += data;
else if ( c$smtp$current_header == "USER-AGENT" )
c$smtp$user_agent += data;
return;
}
# Once there isn't a line starting with a blank, we're not continuing a
# header anymore.
c$smtp$current_header = "";
local header_parts = split1(data, /:[[:blank:]]*/);
# TODO: do something in this case? This would definitely be odd.
# Header wrapping needs to be handled more elegantly. This will happen
# if the header value is wrapped immediately after the header key.
if ( |header_parts| != 2 )
return;
local header_key = to_upper(header_parts[1]);
c$smtp$current_header = header_key;
local header_val = header_parts[2];
if ( header_key == "MESSAGE-ID" )
c$smtp$msg_id = header_val;
else if ( header_key == "RECEIVED" )
{
if ( c$smtp?$first_received )
c$smtp$second_received = c$smtp$first_received;
c$smtp$first_received = header_val;
}
else if ( header_key == "IN-REPLY-TO" )
c$smtp$in_reply_to = header_val;
else if ( header_key == "DATE" )
c$smtp$date = header_val;
else if ( header_key == "FROM" )
c$smtp$from = header_val;
else if ( header_key == "TO" )
{
if ( ! c$smtp?$to )
c$smtp$to = set();
add c$smtp$to[header_val];
}
else if ( header_key == "REPLY-TO" )
c$smtp$reply_to = header_val;
else if ( header_key == "SUBJECT" )
c$smtp$subject = header_val;
else if ( header_key == "X-ORIGINATING-IP" )
{
local addresses = find_ip_addresses(header_val);
if ( 1 in addresses )
c$smtp$x_originating_ip = to_addr(addresses[1]);
}
else if ( header_key == "X-MAILER" ||
header_key == "USER-AGENT" ||
header_key == "X-USER-AGENT" )
{
c$smtp$user_agent = header_val;
# Explicitly set the current header here because there are several
# headers bulked under this same key.
c$smtp$current_header = "USER-AGENT";
}
}
# This event handler builds the "Received From" path by reading the
# headers in the mail
event smtp_data(c: connection, is_orig: bool, data: string) &priority=3
{
# If we've decided that we're done watching the received headers for
# whatever reason, we're done. Could be due to only watching until
# local addresses are seen in the received from headers.
if ( c$smtp$current_header != "RECEIVED" ||
! c$smtp$process_received_from )
return;
local text_ip = find_address_in_smtp_header(data);
if ( text_ip == "" )
return;
local ip = to_addr(text_ip);
if ( ! addr_matches_host(ip, mail_path_capture) &&
! Site::is_private_addr(ip) )
{
c$smtp$process_received_from = F;
}
if ( c$smtp$path[|c$smtp$path|-1] != ip )
c$smtp$path[|c$smtp$path|] = ip;
}
event connection_state_remove(c: connection) &priority=-5
{
if ( c?$smtp && ! c$smtp$done )
smtp_message(c);
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,248 @@
module SSH;
export {
redef enum Log::ID += { SSH };
redef enum Notice::Type += {
Login,
Password_Guessing,
Login_By_Password_Guesser,
Login_From_Interesting_Hostname,
Bytecount_Inconsistency,
};
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
status: string &log &optional;
direction: string &log &optional;
remote_location: geo_location &log &optional;
client: string &log &optional;
server: string &log &optional;
resp_size: count &log &default=0;
## Indicate if the SSH session is done being watched.
done: bool &default=F;
};
const password_guesses_limit = 30 &redef;
# The size in bytes at which the SSH connection is presumed to be
# successful.
const authentication_data_size = 5500 &redef;
# The amount of time to remember presumed non-successful logins to build
# model of a password guesser.
const guessing_timeout = 30 mins &redef;
# The set of countries for which you'd like to throw notices upon successful login
# requires Bro compiled with libGeoIP support
const watched_countries: set[string] = {"RO"} &redef;
# Strange/bad host names to originate successful SSH logins
const interesting_hostnames =
/^d?ns[0-9]*\./ |
/^smtp[0-9]*\./ |
/^mail[0-9]*\./ |
/^pop[0-9]*\./ |
/^imap[0-9]*\./ |
/^www[0-9]*\./ |
/^ftp[0-9]*\./ &redef;
# This is a table with orig subnet as the key, and subnet as the value.
const ignore_guessers: table[subnet] of subnet &redef;
# If true, we tell the event engine to not look at further data
# packets after the initial SSH handshake. Helps with performance
# (especially with large file transfers) but precludes some
# kinds of analyses (e.g., tracking connection size).
const skip_processing_after_detection = F &redef;
# Keeps count of how many rejections a host has had
global password_rejections: table[addr] of TrackCount
&write_expire=guessing_timeout
&synchronized;
# Keeps track of hosts identified as guessing passwords
# TODO: guessing_timeout doesn't work correctly here. If a user redefs
# the variable, it won't take effect.
global password_guessers: set[addr] &read_expire=guessing_timeout+1hr &synchronized;
global log_ssh: event(rec: Info);
}
# Configure DPD and the packet filter
redef capture_filters += { ["ssh"] = "tcp port 22" };
redef dpd_config += { [ANALYZER_SSH] = [$ports = set(22/tcp)] };
redef record connection += {
ssh: Info &optional;
};
event bro_init() &priority=5
{
Log::create_stream(SSH, [$columns=Info, $ev=log_ssh]);
}
function set_session(c: connection)
{
if ( ! c?$ssh )
{
local info: Info;
info$ts=network_time();
info$uid=c$uid;
info$id=c$id;
c$ssh = info;
}
}
function check_ssh_connection(c: connection, done: bool)
{
# If done watching this connection, just return.
if ( c$ssh$done )
return;
# If this is still a live connection and the byte count has not
# crossed the threshold, just return and let the resheduled check happen later.
if ( !done && c$resp$size < authentication_data_size )
return;
# Make sure the server has sent back more than 50 bytes to filter out
# hosts that are just port scanning. Nothing is ever logged if the server
# doesn't send back at least 50 bytes.
if ( c$resp$size < 50 )
return;
local status = "failure";
local direction = Site::is_local_addr(c$id$orig_h) ? "to" : "from";
local location: geo_location;
location = (direction == "to") ? lookup_location(c$id$resp_h) : lookup_location(c$id$orig_h);
if ( done && c$resp$size < authentication_data_size )
{
# presumed failure
if ( c$id$orig_h !in password_rejections )
password_rejections[c$id$orig_h] = new_track_count();
# Track the number of rejections
if ( !(c$id$orig_h in ignore_guessers &&
c$id$resp_h in ignore_guessers[c$id$orig_h]) )
++password_rejections[c$id$orig_h]$n;
if ( default_check_threshold(password_rejections[c$id$orig_h]) )
{
add password_guessers[c$id$orig_h];
NOTICE([$note=Password_Guessing,
$conn=c,
$msg=fmt("SSH password guessing by %s", c$id$orig_h),
$sub=fmt("%d failed logins", password_rejections[c$id$orig_h]$n),
$n=password_rejections[c$id$orig_h]$n]);
}
}
# TODO: This is to work around a quasi-bug in Bro which occasionally
# causes the byte count to be oversized.
# Watch for Gregors work that adds an actual counter of bytes transferred.
else if ( c$resp$size < 20000000 )
{
# presumed successful login
status = "success";
c$ssh$done = T;
if ( c$id$orig_h in password_rejections &&
password_rejections[c$id$orig_h]$n > password_guesses_limit &&
c$id$orig_h !in password_guessers )
{
add password_guessers[c$id$orig_h];
NOTICE([$note=Login_By_Password_Guesser,
$conn=c,
$n=password_rejections[c$id$orig_h]$n,
$msg=fmt("Successful SSH login by password guesser %s", c$id$orig_h),
$sub=fmt("%d failed logins", password_rejections[c$id$orig_h]$n)]);
}
local message = fmt("SSH login %s %s \"%s\" \"%s\" %f %f %s (triggered with %d bytes)",
direction, location$country_code, location$region, location$city,
location$latitude, location$longitude,
id_string(c$id), c$resp$size);
NOTICE([$note=Login,
$conn=c,
$msg=message,
$sub=location$country_code]);
# Check to see if this login came from an interesting hostname
when ( local hostname = lookup_addr(c$id$orig_h) )
{
if ( interesting_hostnames in hostname )
{
NOTICE([$note=Login_From_Interesting_Hostname,
$conn=c,
$msg=fmt("Strange login from %s", hostname),
$sub=hostname]);
}
}
if ( location$country_code in watched_countries )
{
}
}
else if ( c$resp$size >= 200000000 )
{
NOTICE([$note=Bytecount_Inconsistency,
$conn=c,
$msg="During byte counting in SSH analysis, an overly large value was seen.",
$sub=fmt("%d",c$resp$size)]);
}
c$ssh$remote_location = location;
c$ssh$status = status;
c$ssh$direction = direction;
c$ssh$resp_size = c$resp$size;
Log::write(SSH, c$ssh);
# Set the "done" flag to prevent the watching event from rescheduling
# after detection is done.
c$ssh$done;
# Stop watching this connection, we don't care about it anymore.
if ( skip_processing_after_detection )
{
skip_further_processing(c$id);
set_record_packets(c$id, F);
}
}
event connection_state_remove(c: connection) &priority=-5
{
if ( c?$ssh )
check_ssh_connection(c, T);
}
event ssh_watcher(c: connection)
{
local id = c$id;
# don't go any further if this connection is gone already!
if ( !connection_exists(id) )
return;
check_ssh_connection(c, F);
if ( ! c$ssh$done )
schedule +15secs { ssh_watcher(c) };
}
event ssh_server_version(c: connection, version: string) &priority=5
{
set_session(c);
c$ssh$server = version;
}
event ssh_client_version(c: connection, version: string) &priority=5
{
set_session(c);
c$ssh$client = version;
schedule +15secs { ssh_watcher(c) };
}

View file

@ -0,0 +1,3 @@
@load ./consts
@load ./main
@load ./mozilla-ca-list

View file

@ -0,0 +1,532 @@
module SSL;
export {
const SSLv2 = 0x0002;
const SSLv3 = 0x0300;
const TLSv10 = 0x0301;
const TLSv11 = 0x0302;
const version_strings: table[count] of string = {
[SSLv2] = "SSLv2",
[SSLv3] = "SSLv3",
[TLSv10] = "TLSv10",
[TLSv11] = "TLSv11",
} &default="UNKNOWN";
# http://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xml
const extensions: table[count] of string = {
[0] = "server_name",
[1] = "max_fragment_length",
[2] = "client_certificate_url",
[3] = "trusted_ca_keys",
[4] = "truncated_hmac",
[5] = "status_request",
[6] = "user_mapping",
[7] = "client_authz",
[8] = "server_authz",
[9] = "cert_type",
[10] = "elliptic_curves",
[11] = "ec_point_formats",
[12] = "srp",
[13] = "signature_algorithms",
[14] = "use_srtp",
[35] = "SessionTicket TLS",
[65281] = "renegotiation_info"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## SSLv2
const SSLv20_CK_RC4_128_WITH_MD5 = 0x010080;
const SSLv20_CK_RC4_128_EXPORT40_WITH_MD5 = 0x020080;
const SSLv20_CK_RC2_128_CBC_WITH_MD5 = 0x030080;
const SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5 = 0x040080;
const SSLv20_CK_IDEA_128_CBC_WITH_MD5 = 0x050080;
const SSLv20_CK_DES_64_CBC_WITH_MD5 = 0x060040;
const SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5 = 0x0700C0;
## TLS
const TLS_NULL_WITH_NULL_NULL = 0x0000;
const TLS_RSA_WITH_NULL_MD5 = 0x0001;
const TLS_RSA_WITH_NULL_SHA = 0x0002;
const TLS_RSA_EXPORT_WITH_RC4_40_MD5 = 0x0003;
const TLS_RSA_WITH_RC4_128_MD5 = 0x0004;
const TLS_RSA_WITH_RC4_128_SHA = 0x0005;
const TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = 0x0006;
const TLS_RSA_WITH_IDEA_CBC_SHA = 0x0007;
const TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0008;
const TLS_RSA_WITH_DES_CBC_SHA = 0x0009;
const TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A;
const TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x000B;
const TLS_DH_DSS_WITH_DES_CBC_SHA = 0x000C;
const TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = 0x000D;
const TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x000E;
const TLS_DH_RSA_WITH_DES_CBC_SHA = 0x000F;
const TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = 0x0010;
const TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x0011;
const TLS_DHE_DSS_WITH_DES_CBC_SHA = 0x0012;
const TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = 0x0013;
const TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0014;
const TLS_DHE_RSA_WITH_DES_CBC_SHA = 0x0015;
const TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = 0x0016;
const TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5 = 0x0017;
const TLS_DH_ANON_WITH_RC4_128_MD5 = 0x0018;
const TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA = 0x0019;
const TLS_DH_ANON_WITH_DES_CBC_SHA = 0x001A;
const TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA = 0x001B;
const SSL_FORTEZZA_KEA_WITH_NULL_SHA = 0x001C;
const SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA = 0x001D;
const TLS_KRB5_WITH_DES_CBC_SHA = 0x001E;
const TLS_KRB5_WITH_3DES_EDE_CBC_SHA = 0x001F;
const TLS_KRB5_WITH_RC4_128_SHA = 0x0020;
const TLS_KRB5_WITH_IDEA_CBC_SHA = 0x0021;
const TLS_KRB5_WITH_DES_CBC_MD5 = 0x0022;
const TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = 0x0023;
const TLS_KRB5_WITH_RC4_128_MD5 = 0x0024;
const TLS_KRB5_WITH_IDEA_CBC_MD5 = 0x0025;
const TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = 0x0026;
const TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = 0x0027;
const TLS_KRB5_EXPORT_WITH_RC4_40_SHA = 0x0028;
const TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = 0x0029;
const TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = 0x002A;
const TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = 0x002B;
const TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F;
const TLS_DH_DSS_WITH_AES_128_CBC_SHA = 0x0030;
const TLS_DH_RSA_WITH_AES_128_CBC_SHA = 0x0031;
const TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032;
const TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033;
const TLS_DH_ANON_WITH_AES_128_CBC_SHA = 0x0034;
const TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035;
const TLS_DH_DSS_WITH_AES_256_CBC_SHA = 0x0036;
const TLS_DH_RSA_WITH_AES_256_CBC_SHA = 0x0037;
const TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038;
const TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039;
const TLS_DH_ANON_WITH_AES_256_CBC_SHA = 0x003A;
const TLS_RSA_WITH_NULL_SHA256 = 0x003B;
const TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C;
const TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D;
const TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = 0x003E;
const TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = 0x003F;
const TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040;
const TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0041;
const TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0042;
const TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0043;
const TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0044;
const TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0045;
const TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA = 0x0046;
const TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 = 0x0060;
const TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 = 0x0061;
const TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA = 0x0062;
const TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA = 0x0063;
const TLS_RSA_EXPORT1024_WITH_RC4_56_SHA = 0x0064;
const TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA = 0x0065;
const TLS_DHE_DSS_WITH_RC4_128_SHA = 0x0066;
const TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067;
const TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = 0x0068;
const TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = 0x0069;
const TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A;
const TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B;
const TLS_DH_ANON_WITH_AES_128_CBC_SHA256 = 0x006C;
const TLS_DH_ANON_WITH_AES_256_CBC_SHA256 = 0x006D;
const TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0084;
const TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0085;
const TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0086;
const TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0087;
const TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0088;
const TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA = 0x0089;
const TLS_PSK_WITH_RC4_128_SHA = 0x008A;
const TLS_PSK_WITH_3DES_EDE_CBC_SHA = 0x008B;
const TLS_PSK_WITH_AES_128_CBC_SHA = 0x008C;
const TLS_PSK_WITH_AES_256_CBC_SHA = 0x008D;
const TLS_DHE_PSK_WITH_RC4_128_SHA = 0x008E;
const TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = 0x008F;
const TLS_DHE_PSK_WITH_AES_128_CBC_SHA = 0x0090;
const TLS_DHE_PSK_WITH_AES_256_CBC_SHA = 0x0091;
const TLS_RSA_PSK_WITH_RC4_128_SHA = 0x0092;
const TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = 0x0093;
const TLS_RSA_PSK_WITH_AES_128_CBC_SHA = 0x0094;
const TLS_RSA_PSK_WITH_AES_256_CBC_SHA = 0x0095;
const TLS_RSA_WITH_SEED_CBC_SHA = 0x0096;
const TLS_DH_DSS_WITH_SEED_CBC_SHA = 0x0097;
const TLS_DH_RSA_WITH_SEED_CBC_SHA = 0x0098;
const TLS_DHE_DSS_WITH_SEED_CBC_SHA = 0x0099;
const TLS_DHE_RSA_WITH_SEED_CBC_SHA = 0x009A;
const TLS_DH_ANON_WITH_SEED_CBC_SHA = 0x009B;
const TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C;
const TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D;
const TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E;
const TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F;
const TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = 0x00A0;
const TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = 0x00A1;
const TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2;
const TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3;
const TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = 0x00A4;
const TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = 0x00A5;
const TLS_DH_ANON_WITH_AES_128_GCM_SHA256 = 0x00A6;
const TLS_DH_ANON_WITH_AES_256_GCM_SHA384 = 0x00A7;
const TLS_PSK_WITH_AES_128_GCM_SHA256 = 0x00A8;
const TLS_PSK_WITH_AES_256_GCM_SHA384 = 0x00A9;
const TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = 0x00AA;
const TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = 0x00AB;
const TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = 0x00AC;
const TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = 0x00AD;
const TLS_PSK_WITH_AES_128_CBC_SHA256 = 0x00AE;
const TLS_PSK_WITH_AES_256_CBC_SHA384 = 0x00AF;
const TLS_PSK_WITH_NULL_SHA256 = 0x00B0;
const TLS_PSK_WITH_NULL_SHA384 = 0x00B1;
const TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = 0x00B2;
const TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = 0x00B3;
const TLS_DHE_PSK_WITH_NULL_SHA256 = 0x00B4;
const TLS_DHE_PSK_WITH_NULL_SHA384 = 0x00B5;
const TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = 0x00B6;
const TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = 0x00B7;
const TLS_RSA_PSK_WITH_NULL_SHA256 = 0x00B8;
const TLS_RSA_PSK_WITH_NULL_SHA384 = 0x00B9;
const TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BA;
const TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BB;
const TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BC;
const TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BD;
const TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BE;
const TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BF;
const TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C0;
const TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C1;
const TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C2;
const TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C3;
const TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C4;
const TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C5;
const TLS_ECDH_ECDSA_WITH_NULL_SHA = 0xC001;
const TLS_ECDH_ECDSA_WITH_RC4_128_SHA = 0xC002;
const TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC003;
const TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = 0xC004;
const TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = 0xC005;
const TLS_ECDHE_ECDSA_WITH_NULL_SHA = 0xC006;
const TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = 0xC007;
const TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC008;
const TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009;
const TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A;
const TLS_ECDH_RSA_WITH_NULL_SHA = 0xC00B;
const TLS_ECDH_RSA_WITH_RC4_128_SHA = 0xC00C;
const TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = 0xC00D;
const TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = 0xC00E;
const TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = 0xC00F;
const TLS_ECDHE_RSA_WITH_NULL_SHA = 0xC010;
const TLS_ECDHE_RSA_WITH_RC4_128_SHA = 0xC011;
const TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = 0xC012;
const TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013;
const TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014;
const TLS_ECDH_ANON_WITH_NULL_SHA = 0xC015;
const TLS_ECDH_ANON_WITH_RC4_128_SHA = 0xC016;
const TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA = 0xC017;
const TLS_ECDH_ANON_WITH_AES_128_CBC_SHA = 0xC018;
const TLS_ECDH_ANON_WITH_AES_256_CBC_SHA = 0xC019;
const TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0xC01A;
const TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0xC01B;
const TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = 0xC01C;
const TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0xC01D;
const TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0xC01E;
const TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = 0xC01F;
const TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0xC020;
const TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0xC021;
const TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = 0xC022;
const TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023;
const TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024;
const TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC025;
const TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC026;
const TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027;
const TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028;
const TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = 0xC029;
const TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = 0xC02A;
const TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B;
const TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C;
const TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02D;
const TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02E;
const TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F;
const TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030;
const TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = 0xC031;
const TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = 0xC032;
const TLS_ECDHE_PSK_WITH_RC4_128_SHA = 0xC033;
const TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = 0xC034;
const TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = 0xC035;
const TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = 0xC036;
const TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = 0xC037;
const TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = 0xC038;
const TLS_ECDHE_PSK_WITH_NULL_SHA = 0xC039;
const TLS_ECDHE_PSK_WITH_NULL_SHA256 = 0xC03A;
const TLS_ECDHE_PSK_WITH_NULL_SHA384 = 0xC03B;
const SSL_RSA_FIPS_WITH_DES_CBC_SHA = 0xFEFE;
const SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA = 0xFEFF;
const SSL_RSA_FIPS_WITH_DES_CBC_SHA_2 = 0xFFE1;
const SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2 = 0xFFE0;
const SSL_RSA_WITH_RC2_CBC_MD5 = 0xFF80;
const SSL_RSA_WITH_IDEA_CBC_MD5 = 0xFF81;
const SSL_RSA_WITH_DES_CBC_MD5 = 0xFF82;
const SSL_RSA_WITH_3DES_EDE_CBC_MD5 = 0xFF83;
const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF;
# --- This is a table of all known cipher specs.
# --- It can be used for detecting unknown ciphers and for
# --- converting the cipher spec constants into a human readable format.
const cipher_desc: table[count] of string = {
# --- sslv20 ---
[SSLv20_CK_RC4_128_EXPORT40_WITH_MD5] =
"SSLv20_CK_RC4_128_EXPORT40_WITH_MD5",
[SSLv20_CK_RC4_128_WITH_MD5] = "SSLv20_CK_RC4_128_WITH_MD5",
[SSLv20_CK_RC2_128_CBC_WITH_MD5] = "SSLv20_CK_RC2_128_CBC_WITH_MD5",
[SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5] =
"SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5",
[SSLv20_CK_IDEA_128_CBC_WITH_MD5] = "SSLv20_CK_IDEA_128_CBC_WITH_MD5",
[SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5] =
"SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5",
[SSLv20_CK_DES_64_CBC_WITH_MD5] = "SSLv20_CK_DES_64_CBC_WITH_MD5",
# --- TLS ---
[TLS_NULL_WITH_NULL_NULL] = "TLS_NULL_WITH_NULL_NULL",
[TLS_RSA_WITH_NULL_MD5] = "TLS_RSA_WITH_NULL_MD5",
[TLS_RSA_WITH_NULL_SHA] = "TLS_RSA_WITH_NULL_SHA",
[TLS_RSA_EXPORT_WITH_RC4_40_MD5] = "TLS_RSA_EXPORT_WITH_RC4_40_MD5",
[TLS_RSA_WITH_RC4_128_MD5] = "TLS_RSA_WITH_RC4_128_MD5",
[TLS_RSA_WITH_RC4_128_SHA] = "TLS_RSA_WITH_RC4_128_SHA",
[TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5] = "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5",
[TLS_RSA_WITH_IDEA_CBC_SHA] = "TLS_RSA_WITH_IDEA_CBC_SHA",
[TLS_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_RSA_EXPORT_WITH_DES40_CBC_SHA",
[TLS_RSA_WITH_DES_CBC_SHA] = "TLS_RSA_WITH_DES_CBC_SHA",
[TLS_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
[TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA",
[TLS_DH_DSS_WITH_DES_CBC_SHA] = "TLS_DH_DSS_WITH_DES_CBC_SHA",
[TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA",
[TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA",
[TLS_DH_RSA_WITH_DES_CBC_SHA] = "TLS_DH_RSA_WITH_DES_CBC_SHA",
[TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA",
[TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA",
[TLS_DHE_DSS_WITH_DES_CBC_SHA] = "TLS_DHE_DSS_WITH_DES_CBC_SHA",
[TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA",
[TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA",
[TLS_DHE_RSA_WITH_DES_CBC_SHA] = "TLS_DHE_RSA_WITH_DES_CBC_SHA",
[TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
[TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5] = "TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5",
[TLS_DH_ANON_WITH_RC4_128_MD5] = "TLS_DH_ANON_WITH_RC4_128_MD5",
[TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA",
[TLS_DH_ANON_WITH_DES_CBC_SHA] = "TLS_DH_ANON_WITH_DES_CBC_SHA",
[TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA",
[SSL_FORTEZZA_KEA_WITH_NULL_SHA] = "SSL_FORTEZZA_KEA_WITH_NULL_SHA",
[SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA] = "SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA",
[TLS_KRB5_WITH_DES_CBC_SHA] = "TLS_KRB5_WITH_DES_CBC_SHA",
[TLS_KRB5_WITH_3DES_EDE_CBC_SHA] = "TLS_KRB5_WITH_3DES_EDE_CBC_SHA",
[TLS_KRB5_WITH_RC4_128_SHA] = "TLS_KRB5_WITH_RC4_128_SHA",
[TLS_KRB5_WITH_IDEA_CBC_SHA] = "TLS_KRB5_WITH_IDEA_CBC_SHA",
[TLS_KRB5_WITH_DES_CBC_MD5] = "TLS_KRB5_WITH_DES_CBC_MD5",
[TLS_KRB5_WITH_3DES_EDE_CBC_MD5] = "TLS_KRB5_WITH_3DES_EDE_CBC_MD5",
[TLS_KRB5_WITH_RC4_128_MD5] = "TLS_KRB5_WITH_RC4_128_MD5",
[TLS_KRB5_WITH_IDEA_CBC_MD5] = "TLS_KRB5_WITH_IDEA_CBC_MD5",
[TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA",
[TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA",
[TLS_KRB5_EXPORT_WITH_RC4_40_SHA] = "TLS_KRB5_EXPORT_WITH_RC4_40_SHA",
[TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5",
[TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5",
[TLS_KRB5_EXPORT_WITH_RC4_40_MD5] = "TLS_KRB5_EXPORT_WITH_RC4_40_MD5",
[TLS_RSA_WITH_AES_128_CBC_SHA] = "TLS_RSA_WITH_AES_128_CBC_SHA",
[TLS_DH_DSS_WITH_AES_128_CBC_SHA] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA",
[TLS_DH_RSA_WITH_AES_128_CBC_SHA] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA",
[TLS_DHE_DSS_WITH_AES_128_CBC_SHA] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA",
[TLS_DHE_RSA_WITH_AES_128_CBC_SHA] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA",
[TLS_DH_ANON_WITH_AES_128_CBC_SHA] = "TLS_DH_ANON_WITH_AES_128_CBC_SHA",
[TLS_RSA_WITH_AES_256_CBC_SHA] = "TLS_RSA_WITH_AES_256_CBC_SHA",
[TLS_DH_DSS_WITH_AES_256_CBC_SHA] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA",
[TLS_DH_RSA_WITH_AES_256_CBC_SHA] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA",
[TLS_DHE_DSS_WITH_AES_256_CBC_SHA] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA",
[TLS_DHE_RSA_WITH_AES_256_CBC_SHA] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA",
[TLS_DH_ANON_WITH_AES_256_CBC_SHA] = "TLS_DH_ANON_WITH_AES_256_CBC_SHA",
[TLS_RSA_WITH_NULL_SHA256] = "TLS_RSA_WITH_NULL_SHA256",
[TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256",
[TLS_RSA_WITH_AES_256_CBC_SHA256] = "TLS_RSA_WITH_AES_256_CBC_SHA256",
[TLS_DH_DSS_WITH_AES_128_CBC_SHA256] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA256",
[TLS_DH_RSA_WITH_AES_128_CBC_SHA256] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA256",
[TLS_DHE_DSS_WITH_AES_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256",
[TLS_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA",
[TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA",
[TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA",
[TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA",
[TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA",
[TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA",
[TLS_RSA_EXPORT1024_WITH_RC4_56_MD5] = "TLS_RSA_EXPORT1024_WITH_RC4_56_MD5",
[TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5] = "TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5",
[TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA] = "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA",
[TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA] = "TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA",
[TLS_RSA_EXPORT1024_WITH_RC4_56_SHA] = "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA",
[TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA] = "TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA",
[TLS_DHE_DSS_WITH_RC4_128_SHA] = "TLS_DHE_DSS_WITH_RC4_128_SHA",
[TLS_DHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256",
[TLS_DH_DSS_WITH_AES_256_CBC_SHA256] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA256",
[TLS_DH_RSA_WITH_AES_256_CBC_SHA256] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA256",
[TLS_DHE_DSS_WITH_AES_256_CBC_SHA256] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256",
[TLS_DHE_RSA_WITH_AES_256_CBC_SHA256] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256",
[TLS_DH_ANON_WITH_AES_128_CBC_SHA256] = "TLS_DH_ANON_WITH_AES_128_CBC_SHA256",
[TLS_DH_ANON_WITH_AES_256_CBC_SHA256] = "TLS_DH_ANON_WITH_AES_256_CBC_SHA256",
[TLS_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA",
[TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA",
[TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA",
[TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA",
[TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA",
[TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA",
[TLS_PSK_WITH_RC4_128_SHA] = "TLS_PSK_WITH_RC4_128_SHA",
[TLS_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_PSK_WITH_3DES_EDE_CBC_SHA",
[TLS_PSK_WITH_AES_128_CBC_SHA] = "TLS_PSK_WITH_AES_128_CBC_SHA",
[TLS_PSK_WITH_AES_256_CBC_SHA] = "TLS_PSK_WITH_AES_256_CBC_SHA",
[TLS_DHE_PSK_WITH_RC4_128_SHA] = "TLS_DHE_PSK_WITH_RC4_128_SHA",
[TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA",
[TLS_DHE_PSK_WITH_AES_128_CBC_SHA] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA",
[TLS_DHE_PSK_WITH_AES_256_CBC_SHA] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA",
[TLS_RSA_PSK_WITH_RC4_128_SHA] = "TLS_RSA_PSK_WITH_RC4_128_SHA",
[TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA",
[TLS_RSA_PSK_WITH_AES_128_CBC_SHA] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA",
[TLS_RSA_PSK_WITH_AES_256_CBC_SHA] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA",
[TLS_RSA_WITH_SEED_CBC_SHA] = "TLS_RSA_WITH_SEED_CBC_SHA",
[TLS_DH_DSS_WITH_SEED_CBC_SHA] = "TLS_DH_DSS_WITH_SEED_CBC_SHA",
[TLS_DH_RSA_WITH_SEED_CBC_SHA] = "TLS_DH_RSA_WITH_SEED_CBC_SHA",
[TLS_DHE_DSS_WITH_SEED_CBC_SHA] = "TLS_DHE_DSS_WITH_SEED_CBC_SHA",
[TLS_DHE_RSA_WITH_SEED_CBC_SHA] = "TLS_DHE_RSA_WITH_SEED_CBC_SHA",
[TLS_DH_ANON_WITH_SEED_CBC_SHA] = "TLS_DH_ANON_WITH_SEED_CBC_SHA",
[TLS_RSA_WITH_AES_128_GCM_SHA256] = "TLS_RSA_WITH_AES_128_GCM_SHA256",
[TLS_RSA_WITH_AES_256_GCM_SHA384] = "TLS_RSA_WITH_AES_256_GCM_SHA384",
[TLS_DHE_RSA_WITH_AES_128_GCM_SHA256] = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
[TLS_DHE_RSA_WITH_AES_256_GCM_SHA384] = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
[TLS_DH_RSA_WITH_AES_128_GCM_SHA256] = "TLS_DH_RSA_WITH_AES_128_GCM_SHA256",
[TLS_DH_RSA_WITH_AES_256_GCM_SHA384] = "TLS_DH_RSA_WITH_AES_256_GCM_SHA384",
[TLS_DHE_DSS_WITH_AES_128_GCM_SHA256] = "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256",
[TLS_DHE_DSS_WITH_AES_256_GCM_SHA384] = "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384",
[TLS_DH_DSS_WITH_AES_128_GCM_SHA256] = "TLS_DH_DSS_WITH_AES_128_GCM_SHA256",
[TLS_DH_DSS_WITH_AES_256_GCM_SHA384] = "TLS_DH_DSS_WITH_AES_256_GCM_SHA384",
[TLS_DH_ANON_WITH_AES_128_GCM_SHA256] = "TLS_DH_ANON_WITH_AES_128_GCM_SHA256",
[TLS_DH_ANON_WITH_AES_256_GCM_SHA384] = "TLS_DH_ANON_WITH_AES_256_GCM_SHA384",
[TLS_PSK_WITH_AES_128_GCM_SHA256] = "TLS_PSK_WITH_AES_128_GCM_SHA256",
[TLS_PSK_WITH_AES_256_GCM_SHA384] = "TLS_PSK_WITH_AES_256_GCM_SHA384",
[TLS_DHE_PSK_WITH_AES_128_GCM_SHA256] = "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256",
[TLS_DHE_PSK_WITH_AES_256_GCM_SHA384] = "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384",
[TLS_RSA_PSK_WITH_AES_128_GCM_SHA256] = "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256",
[TLS_RSA_PSK_WITH_AES_256_GCM_SHA384] = "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384",
[TLS_PSK_WITH_AES_128_CBC_SHA256] = "TLS_PSK_WITH_AES_128_CBC_SHA256",
[TLS_PSK_WITH_AES_256_CBC_SHA384] = "TLS_PSK_WITH_AES_256_CBC_SHA384",
[TLS_PSK_WITH_NULL_SHA256] = "TLS_PSK_WITH_NULL_SHA256",
[TLS_PSK_WITH_NULL_SHA384] = "TLS_PSK_WITH_NULL_SHA384",
[TLS_DHE_PSK_WITH_AES_128_CBC_SHA256] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256",
[TLS_DHE_PSK_WITH_AES_256_CBC_SHA384] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384",
[TLS_DHE_PSK_WITH_NULL_SHA256] = "TLS_DHE_PSK_WITH_NULL_SHA256",
[TLS_DHE_PSK_WITH_NULL_SHA384] = "TLS_DHE_PSK_WITH_NULL_SHA384",
[TLS_RSA_PSK_WITH_AES_128_CBC_SHA256] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256",
[TLS_RSA_PSK_WITH_AES_256_CBC_SHA384] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384",
[TLS_RSA_PSK_WITH_NULL_SHA256] = "TLS_RSA_PSK_WITH_NULL_SHA256",
[TLS_RSA_PSK_WITH_NULL_SHA384] = "TLS_RSA_PSK_WITH_NULL_SHA384",
[TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256",
[TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256",
[TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256",
[TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256",
[TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256",
[TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256",
[TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256",
[TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256",
[TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256",
[TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256",
[TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256",
[TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256",
[TLS_ECDH_ECDSA_WITH_NULL_SHA] = "TLS_ECDH_ECDSA_WITH_NULL_SHA",
[TLS_ECDH_ECDSA_WITH_RC4_128_SHA] = "TLS_ECDH_ECDSA_WITH_RC4_128_SHA",
[TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA",
[TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA",
[TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA",
[TLS_ECDHE_ECDSA_WITH_NULL_SHA] = "TLS_ECDHE_ECDSA_WITH_NULL_SHA",
[TLS_ECDHE_ECDSA_WITH_RC4_128_SHA] = "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
[TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA",
[TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
[TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
[TLS_ECDH_RSA_WITH_NULL_SHA] = "TLS_ECDH_RSA_WITH_NULL_SHA",
[TLS_ECDH_RSA_WITH_RC4_128_SHA] = "TLS_ECDH_RSA_WITH_RC4_128_SHA",
[TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA",
[TLS_ECDH_RSA_WITH_AES_128_CBC_SHA] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA",
[TLS_ECDH_RSA_WITH_AES_256_CBC_SHA] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA",
[TLS_ECDHE_RSA_WITH_NULL_SHA] = "TLS_ECDHE_RSA_WITH_NULL_SHA",
[TLS_ECDHE_RSA_WITH_RC4_128_SHA] = "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
[TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
[TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
[TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
[TLS_ECDH_ANON_WITH_NULL_SHA] = "TLS_ECDH_ANON_WITH_NULL_SHA",
[TLS_ECDH_ANON_WITH_RC4_128_SHA] = "TLS_ECDH_ANON_WITH_RC4_128_SHA",
[TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA",
[TLS_ECDH_ANON_WITH_AES_128_CBC_SHA] = "TLS_ECDH_ANON_WITH_AES_128_CBC_SHA",
[TLS_ECDH_ANON_WITH_AES_256_CBC_SHA] = "TLS_ECDH_ANON_WITH_AES_256_CBC_SHA",
[TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA",
[TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA",
[TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA",
[TLS_SRP_SHA_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_WITH_AES_128_CBC_SHA",
[TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA",
[TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA",
[TLS_SRP_SHA_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_WITH_AES_256_CBC_SHA",
[TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA",
[TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA",
[TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
[TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
[TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256",
[TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384",
[TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
[TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384",
[TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256",
[TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384",
[TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
[TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256",
[TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384",
[TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
[TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
[TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256",
[TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384",
[TLS_ECDHE_PSK_WITH_RC4_128_SHA] = "TLS_ECDHE_PSK_WITH_RC4_128_SHA",
[TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA",
[TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA",
[TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA",
[TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256",
[TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384",
[TLS_ECDHE_PSK_WITH_NULL_SHA] = "TLS_ECDHE_PSK_WITH_NULL_SHA",
[TLS_ECDHE_PSK_WITH_NULL_SHA256] = "TLS_ECDHE_PSK_WITH_NULL_SHA256",
[TLS_ECDHE_PSK_WITH_NULL_SHA384] = "TLS_ECDHE_PSK_WITH_NULL_SHA384",
[SSL_RSA_FIPS_WITH_DES_CBC_SHA] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA",
[SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA",
[SSL_RSA_FIPS_WITH_DES_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA_2",
[SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2",
} &default="UNKNOWN";
const x509_errors: table[count] of string = {
[0] = "X509_V_OK",
[1] = "X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT",
[2] = "X509_V_ERR_UNABLE_TO_GET_CRL",
[3] = "X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE",
[4] = "X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE",
[5] = "X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY",
[6] = "X509_V_ERR_CERT_SIGNATURE_FAILURE",
[7] = "X509_V_ERR_CRL_SIGNATURE_FAILURE",
[8] = "X509_V_ERR_CERT_NOT_YET_VALID",
[9] = "X509_V_ERR_CERT_HAS_EXPIRED",
[10] = "X509_V_ERR_CRL_NOT_YET_VALID",
[11] = "X509_V_ERR_CRL_HAS_EXPIRED",
[12] = "X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD",
[13] = "X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD",
[14] = "X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD",
[15] = "X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD",
[16] = "X509_V_ERR_OUT_OF_MEM",
[17] = "X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT",
[18] = "X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN",
[19] = "X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY",
[20] = "X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE",
[21] = "X509_V_ERR_CERT_CHAIN_TOO_LONG",
[22] = "X509_V_ERR_CERT_REVOKED",
[23] = "X509_V_ERR_INVALID_CA",
[24] = "X509_V_ERR_PATH_LENGTH_EXCEEDED",
[25] = "X509_V_ERR_INVALID_PURPOSE",
[26] = "X509_V_ERR_CERT_UNTRUSTED",
[27] = "X509_V_ERR_CERT_REJECTED",
[28] = "X509_V_ERR_SUBJECT_ISSUER_MISMATCH",
[29] = "X509_V_ERR_AKID_SKID_MISMATCH",
[30] = "X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH",
[31] = "X509_V_ERR_KEYUSAGE_NO_CERTSIGN",
[32] = "X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER",
[33] = "X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION"
};
}

View file

@ -0,0 +1,120 @@
module SSL;
export {
redef enum Log::ID += { SSL };
redef enum Notice::Type += {
Self_Signed_Cert
};
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
version: string &log &optional;
cipher: string &log &optional;
server_name: string &log &optional;
subject: string &log &optional;
not_valid_before: time &log &optional;
not_valid_after: time &log &optional;
cert: string &optional;
cert_chain: vector of string &optional;
};
## This is where the default root CA bundle is defined. By loading the
## mozilla-ca-list.bro script it will be set to Mozilla's root CA list.
const root_certs: table[string] of string = {} &redef;
global log_ssl: event(rec: Info);
const ports = {
443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp,
989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp
} &redef;
}
redef record connection += {
ssl: Info &optional;
};
event bro_init() &priority=5
{
Log::create_stream(SSL, [$columns=Info, $ev=log_ssl]);
}
redef capture_filters += {
["ssl"] = "tcp port 443",
["nntps"] = "tcp port 563",
["imap4-ssl"] = "tcp port 585",
["sshell"] = "tcp port 614",
["ldaps"] = "tcp port 636",
["ftps-data"] = "tcp port 989",
["ftps"] = "tcp port 990",
["telnets"] = "tcp port 992",
["imaps"] = "tcp port 993",
["ircs"] = "tcp port 994",
["pop3s"] = "tcp port 995",
["xmpps"] = "tcp port 5223",
};
redef dpd_config += {
[[ANALYZER_SSL]] = [$ports = ports]
};
function set_session(c: connection)
{
if ( ! c?$ssl )
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector()];
}
event ssl_client_hello(c: connection, version: count, possible_ts: time, session_id: string, ciphers: count_set) &priority=5
{
set_session(c);
}
event ssl_server_hello(c: connection, version: count, possible_ts: time, session_id: string, cipher: count, comp_method: count) &priority=5
{
set_session(c);
c$ssl$version = version_strings[version];
c$ssl$cipher = cipher_desc[cipher];
}
event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: count, chain_len: count, der_cert: string) &priority=5
{
set_session(c);
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$cert = der_cert;
# Also save other certificate information about the primary cert.
c$ssl$subject = cert$subject;
c$ssl$not_valid_before = cert$not_valid_before;
c$ssl$not_valid_after = cert$not_valid_after;
}
else
{
# Otherwise, add it to the cert validation chain.
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
}
}
event ssl_extension(c: connection, code: count, val: string) &priority=5
{
set_session(c);
if ( extensions[code] == "server_name" )
c$ssl$server_name = sub_bytes(val, 6, |val|);
}
event ssl_established(c: connection) &priority=-5
{
set_session(c);
Log::write(SSL, c$ssl);
}

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,2 @@
@load ./consts
@load ./main

View file

@ -0,0 +1,41 @@
module Syslog;
export {
const facility_codes: table[count] of string = {
[0] = "KERN",
[1] = "USER",
[2] = "MAIL",
[3] = "DAEMON",
[4] = "AUTH",
[5] = "SYSLOG",
[6] = "LPR",
[7] = "NEWS",
[8] = "UUCP",
[9] = "CRON",
[10] = "AUTHPRIV",
[11] = "FTP",
[12] = "NTP",
[13] = "AUDIT",
[14] = "ALERT",
[15] = "CLOCK",
[16] = "LOCAL0",
[17] = "LOCAL1",
[18] = "LOCAL2",
[19] = "LOCAL3",
[20] = "LOCAL4",
[21] = "LOCAL5",
[22] = "LOCAL6",
[23] = "LOCAL7",
} &default=function(c: count): string { return fmt("?-%d", c); };
const severity_codes: table[count] of string = {
[0] = "EMERG",
[1] = "ALERT",
[2] = "CRIT",
[3] = "ERR",
[4] = "WARNING",
[5] = "NOTICE",
[6] = "INFO",
[7] = "DEBUG",
} &default=function(c: count): string { return fmt("?-%d", c); };
}

View file

@ -0,0 +1,52 @@
##! Core script support for logging syslog messages.
@load ./consts
module Syslog;
export {
redef enum Log::ID += { SYSLOG };
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
proto: transport_proto &log;
facility: string &log;
severity: string &log;
message: string &log;
};
const ports = { 514/udp } &redef;
}
redef capture_filters += { ["syslog"] = "port 514" };
redef dpd_config += { [ANALYZER_SYSLOG_BINPAC] = [$ports = ports] };
redef record connection += {
syslog: Info &optional;
};
event bro_init() &priority=5
{
Log::create_stream(SYSLOG, [$columns=Info]);
}
event syslog_message(c: connection, facility: count, severity: count, msg: string) &priority=5
{
local info: Info;
info$ts=network_time();
info$uid=c$uid;
info$id=c$id;
info$proto=get_port_transport_proto(c$id$resp_p);
info$facility=facility_codes[facility];
info$severity=severity_codes[severity];
info$message=msg;
c$syslog = info;
}
event syslog_message(c: connection, facility: count, severity: count, msg: string) &priority=-5
{
Log::write(SYSLOG, c$syslog);
}

View file

@ -0,0 +1,100 @@
##! Functions for parsing and manipulating IP addresses.
# Regular expressions for matching IP addresses in strings.
const ipv4_addr_regex = /[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/;
const ipv6_8hex_regex = /([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/;
const ipv6_compressed_hex_regex = /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/;
const ipv6_hex4dec_regex = /(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/;
const ipv6_compressed_hex4dec_regex = /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/;
# These are commented out until patterns can be constructed this way at init time.
#const ipv6_addr_regex = ipv6_8hex_regex |
# ipv6_compressed_hex_regex |
# ipv6_hex4dec_regex |
# ipv6_compressed_hex4dec_regex;
#const ip_addr_regex = ipv4_addr_regex | ipv6_addr_regex;
const ipv6_addr_regex =
/([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ |
/(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex
/(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/ | # 6Hex4Dec
/(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; # CompressedHex4Dec
const ip_addr_regex =
/[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/ |
/([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ |
/(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex
/(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/ | # 6Hex4Dec
/(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; # CompressedHex4Dec
## Checks if all elements of a string array are a valid octet value.
## octets: an array of strings to check for valid octet values.
## Returns: T if every element is between 0 and 255, inclusive, else F.
function has_valid_octets(octets: string_array): bool
{
local num = 0;
for ( i in octets )
{
num = to_count(octets[i]);
if ( num < 0 || 255 < num )
return F;
}
return T;
}
## Checks if a string appears to be a valid IPv4 or IPv6 address.
## ip_str: the string to check for valid IP formatting.
## Returns: T if the string is a valid IPv4 or IPv6 address format.
function is_valid_ip(ip_str: string): bool
{
local octets: string_array;
if ( ip_str == ipv4_addr_regex )
{
octets = split(ip_str, /\./);
if ( |octets| != 4 )
return F;
return has_valid_octets(octets);
}
else if ( ip_str == ipv6_addr_regex )
{
if ( ip_str == ipv6_hex4dec_regex ||
ip_str == ipv6_compressed_hex4dec_regex )
{
# the regexes for hybrid IPv6-IPv4 address formats don't for valid
# octets within the IPv4 part, so do that now
octets = split(ip_str, /\./);
if ( |octets| != 4 )
return F;
# get rid of remaining IPv6 stuff in first octet
local tmp = split(octets[1], /:/);
octets[1] = tmp[|tmp|];
return has_valid_octets(octets);
}
else
{
# pure IPv6 address formats that only use hex digits don't need
# any additional checks -- the regexes should be complete
return T;
}
}
return F;
}
## Extracts all IP (v4 or v6) address strings from a given string.
## input: a string that may contain an IP address anywhere within it.
## Returns: an array containing all valid IP address strings found in input.
function find_ip_addresses(input: string): string_array
{
local parts = split_all(input, ip_addr_regex);
local output: string_array;
for ( i in parts )
{
if ( i % 2 == 0 && is_valid_ip(parts[i]) )
output[|output|] = parts[i];
}
return output;
}

View file

@ -0,0 +1,38 @@
##! Simple functions for generating ASCII strings from connection IDs.
module GLOBAL;
export {
## Takes a conn_id record and returns a string representation with the
## the general data flow appearing to be from the connection originator
## on the left to the responder on the right.
global id_string: function(id: conn_id): string;
## Takes a conn_id record and returns a string representation with the
## the general data flow appearing to be from the connection responder
## on the right to the originator on the left.
global reverse_id_string: function(id: conn_id): string;
## Calls :bro:id:`id_string` or :bro:id:`reverse_id_string` if the second
## argument is T or F, respectively.
global directed_id_string: function(id: conn_id, is_orig: bool): string;
}
function id_string(id: conn_id): string
{
return fmt("%s:%d > %s:%d",
id$orig_h, id$orig_p,
id$resp_h, id$resp_p);
}
function reverse_id_string(id: conn_id): string
{
return fmt("%s:%d < %s:%d",
id$orig_h, id$orig_p,
id$resp_h, id$resp_p);
}
function directed_id_string(id: conn_id, is_orig: bool): string
{
return is_orig ? id_string(id) : reverse_id_string(id);
}

View file

@ -0,0 +1,58 @@
type Direction: enum {
## The connection originator is not within the locally-monitored network,
## but the other endpoint is.
INBOUND,
## The connection originator is within the locally-monitored network,
## but the other endpoint is not.
OUTBOUND,
## Only one endpoint is within the locally-monitored network, meaning
## the connection is either outbound or inbound.
BIDIRECTIONAL,
## This value doesn't match any connection.
NO_DIRECTION
};
## Checks whether a given connection is of a given direction with respect
## to the locally-monitored network.
## id: a connection record containing the originator/responder hosts.
## d: a direction with respect to the locally-monitored network
## Returns: T if the two connection endpoints match the given direction, else F.
function id_matches_direction(id: conn_id, d: Direction): bool
{
if ( d == NO_DIRECTION ) return F;
local o_local = Site::is_local_addr(id$orig_h);
local r_local = Site::is_local_addr(id$resp_h);
if ( d == BIDIRECTIONAL )
return (o_local && !r_local) || (!o_local && r_local);
else if ( d == OUTBOUND )
return o_local && !r_local;
else if ( d == INBOUND )
return !o_local && r_local;
}
type Host: enum {
## A host within the locally-monitored network.
LOCAL_HOSTS,
## A host not within the locally-monitored network.
REMOTE_HOSTS,
## Any host.
ALL_HOSTS,
## This value doesn't match any host.
NO_HOSTS
};
## Checks whether a given host (IP address) matches a given host type.
## ip: address of a host
## h: a host type
## Returns: T if the given host matches the given type, else F.
function addr_matches_host(ip: addr, h: Host): bool
{
if ( h == NO_HOSTS ) return F;
return ( h == ALL_HOSTS ||
(h == LOCAL_HOSTS && Site::is_local_addr(ip)) ||
(h == REMOTE_HOSTS && !Site::is_local_addr(ip)) );
}

View file

@ -0,0 +1,15 @@
## This function can be used to generate a consistent filename for when
## contents of a file, stream, or connection are being extracted to disk.
function generate_extraction_filename(prefix: string, c: connection, suffix: string): string
{
local conn_info = fmt("%s:%d-%s:%d",
c$id$orig_h, c$id$orig_p, c$id$resp_h, c$id$resp_p);
if ( prefix != "" )
conn_info = fmt("%s_%s", prefix, conn_info);
if ( suffix != "" )
conn_info = fmt("%s_%s", conn_info, suffix);
return conn_info;
}

View file

@ -0,0 +1,10 @@
## Extract the first integer found in the given string.
## If no integer can be found, 0 is returned.
function extract_count(s: string): count
{
local parts = split_n(s, /[0-9]+/, T, 1);
if ( 2 in parts )
return to_count(parts[2]);
else
return 0;
}

View file

@ -0,0 +1,74 @@
##! Functions to parse and manipulate UNIX style paths and directories.
const absolute_path_pat = /(\/|[A-Za-z]:[\\\/]).*/;
## Given an arbitrary string, extracts a single, absolute path (directory
## with filename).
## TODO: Make this work on Window's style directories.
## input: a string that may contain an absolute path
## Returns: the first absolute path found in input string, else an empty string
function extract_path(input: string): string
{
const dir_pattern = /(\/|[A-Za-z]:[\\\/])([^\"\ ]|(\\\ ))*/;
local parts = split_all(input, dir_pattern);
if ( |parts| < 3 )
return "";
return parts[2];
}
## Compresses a given path by removing '..'s and the parent directory it
## references and also removing '/'s.
## dir: a path string, either relative or absolute
## Returns: a compressed version of the input path
function compress_path(dir: string): string
{
const cdup_sep = /((\/)*([^\/]|\\\/)+)?((\/)+\.\.(\/)*)/;
local parts = split_n(dir, cdup_sep, T, 1);
if ( length(parts) > 1 )
{
# reaching a point with two parent dir references back-to-back means
# we don't know about anything higher in the tree to pop off
if ( parts[2] == "../.." )
return cat_string_array(parts);
if ( sub_bytes(parts[2], 0, 1) == "/" )
parts[2] = "/";
else
parts[2] = "";
dir = cat_string_array(parts);
return compress_path(dir);
}
const multislash_sep = /(\/){2,}/;
parts = split_all(dir, multislash_sep);
for ( i in parts )
if ( i % 2 == 0 )
parts[i] = "/";
dir = cat_string_array(parts);
# remove trailing slashes from path
if ( |dir| > 1 && sub_bytes(dir, |dir|, 1) == "/" )
dir = sub_bytes(dir, 0, |dir| - 1);
return dir;
}
## Constructs a path to a file given a directory and a file name.
## dir: the directory in which the file lives
## file_name: the name of the file
## Returns: the concatenation of the directory path and file name, or just
## the file name if it's already an absolute path
function build_path(dir: string, file_name: string): string
{
return (file_name == absolute_path_pat) ?
file_name : cat(dir, "/", file_name);
}
## Returns a compressed path to a file given a directory and file name.
## See :bro:id`build_path` and :bro:id:`compress_path`.
function build_path_compressed(dir: string, file_name: string): string
{
return compress_path(build_path(dir, file_name));
}

View file

@ -0,0 +1,52 @@
##! Functions for creating and working with patterns.
## Given a pattern as a string with two tildes (~~) contained in it, it will
## return a pattern with string set's elements OR'd together where the
## double-tilde was given (this function only works at or before init time).
## ss: a set of strings to OR together
## pat: the pattern containing a "~~" in it. If a literal backslash is
## included, it needs to be escaped with another backslash due to Bro's
## string parsing reducing it to a single backslash upon rendering.
## Returns: the input pattern with "~~" replaced by OR'd elements of input set
function set_to_regex(ss: set[string], pat: string): pattern
{
local i: count = 0;
local return_pat = "";
for ( s in ss )
{
local tmp_pattern = convert_for_pattern(s);
return_pat = ( i == 0 ) ?
tmp_pattern : cat(tmp_pattern, "|", return_pat);
++i;
}
return string_to_pattern(sub(pat, /~~/, return_pat), F);
}
type PatternMatchResult: record {
## T if a match was found, F otherwise.
matched: bool;
## Portion of string that first matched.
str: string;
## 1-based offset where match starts.
off: count;
};
## Matches the given pattern against the given string, returning
## a :bro:type:`PatternMatchResult` record.
## For example: ``match_pattern("foobar", /o*[a-k]/)`` returns
## ``[matched=T, str=f, off=1]``, because the *first* match is for
## zero o's followed by an [a-k], but ``match_pattern("foobar", /o+[a-k]/)``
## returns ``[matched=T, str=oob, off=2]``
## s: a string to match against
## p: a pattern to match
## Returns: a record indicating the match status
function match_pattern(s: string, p: pattern): PatternMatchResult
{
local a = split_n(s, p, T, 1);
if ( |a| == 1 )
# no match
return [$matched = F, $str = "", $off = 0];
else
return [$matched = T, $str = a[2], $off = |a[1]| + 1];
}

141
scripts/base/utils/site.bro Normal file
View file

@ -0,0 +1,141 @@
##! Definitions describing a site - which networks and DNS zones are "local"
##! and "neighbors", and servers running particular services.
@load ./patterns
module Site;
export {
## Address space that is considered private and unrouted.
## By default it has RFC defined non-routable IPv4 address space.
const private_address_space: set[subnet] = {
10.0.0.0/8,
192.168.0.0/16,
127.0.0.0/8,
172.16.0.0/12
} &redef;
## Networks that are considered "local".
const local_nets: set[subnet] &redef;
## Networks that are considered "neighbors".
const neighbor_nets: set[subnet] &redef;
## If local network administrators are known and they have responsibility
## for defined address space, then a mapping can be defined here between
## networks for which they have responsibility and a set of email
## addresses.
const local_admins: table[subnet] of set[string] = {} &redef;
## DNS zones that are considered "local".
const local_zones: set[string] &redef;
## DNS zones that are considered "neighbors".
const neighbor_zones: set[string] &redef;
## Function that returns true if an address corresponds to one of
## the local networks, false if not.
global is_local_addr: function(a: addr): bool;
## Function that returns true if an address corresponds to one of
## the neighbor networks, false if not.
global is_neighbor_addr: function(a: addr): bool;
## Function that returns true if an address corresponds to one of
## the private/unrouted networks, false if not.
global is_private_addr: function(a: addr): bool;
## Function that returns true if a host name is within a local
## DNS zone.
global is_local_name: function(name: string): bool;
## Function that returns true if a host name is within a neighbor
## DNS zone.
global is_neighbor_name: function(name: string): bool;
## Function that returns a common separated list of email addresses
## that are considered administrators for the IP address provided as
## an argument.
global get_emails: function(a: addr): string;
}
# Please ignore, this is an interally used variable.
global local_dns_suffix_regex: pattern = /MATCH_NOTHING/;
global local_dns_neighbor_suffix_regex: pattern = /MATCH_NOTHING/;
function is_local_addr(a: addr): bool
{
return a in local_nets;
}
function is_neighbor_addr(a: addr): bool
{
return a in neighbor_nets;
}
function is_private_addr(a: addr): bool
{
return a in private_address_space;
}
function is_local_name(name: string): bool
{
return local_dns_suffix_regex in name;
}
function is_neighbor_name(name: string): bool
{
return local_dns_neighbor_suffix_regex in name;
}
# This is a hack for doing a for loop.
const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32};
# TODO: make this work with IPv6
function find_all_emails(ip: addr): set[string]
{
if ( ip !in local_admins ) return set();
local output_values: set[string] = set();
local tmp_subnet: subnet;
local i: count;
local emails: string;
for ( i in one_to_32 )
{
tmp_subnet = mask_addr(ip, one_to_32[i]);
for ( email in local_admins[tmp_subnet] )
{
for ( email in local_admins[tmp_subnet] )
{
if ( email != "" )
add output_values[email];
}
}
}
return output_values;
}
function fmt_email_string(emails: set[string]): string
{
local output="";
for( email in emails )
{
if ( output == "" )
output = email;
else
output = fmt("%s, %s", output, email);
}
return output;
}
function get_emails(a: addr): string
{
return fmt_email_string(find_all_emails(a));
}
event bro_init() &priority=10
{
# Double backslashes are needed due to string parsing.
local_dns_suffix_regex = set_to_regex(local_zones, "(^\\.?|\\.)(~~)$");
local_dns_neighbor_suffix_regex = set_to_regex(neighbor_zones, "(^\\.?|\\.)(~~)$");
}

View file

@ -0,0 +1,54 @@
##! Functions to assist with small string analysis and manipulation that can
##! be implemented as Bro functions and don't need to be implemented as built
##! in functions.
## Returns true if the given string is at least 25% composed of 8-bit
## characters.
function is_string_binary(s: string): bool
{
return byte_len(gsub(s, /[\x00-\x7f]/, "")) * 100 / |s| >= 25;
}
## Joins a set of string together, with elements delimited by a constant string.
## ss: a set of strings to join
## j: the string used to join set elements
## Returns: a string composed of the all elements of the set, delimited by the
## joining string.
function join_string_set(ss: set[string], j: string): string
{
local output="";
local i=0;
for ( s in ss )
{
if ( i > 0 )
output = cat(output, j);
output = cat(output, s);
++i;
}
return output;
}
## Given a string, returns an escaped version.
## s: a string to escape
## chars: a string containing all the characters that need to be escaped
## Returns: a string with all occurrences of any character in ``chars`` escaped
## using ``\``, and any literal ``\`` characters likewise escaped.
function string_escape(s: string, chars: string): string
{
s = subst_string(s, "\\", "\\\\");
for ( c in chars )
s = subst_string(s, c, cat("\\", c));
return s;
}
## Cut a number of character from the end of the given string.
## s: a string to trim
## tail_len: the number of characters to remove from end of string
## Returns: the string in ``s`` with ``tail_len`` characters removed from end
function cut_tail(s: string, tail_len: count): string
{
if ( tail_len > |s| )
tail_len = |s|;
return sub_bytes(s, 1, int_to_count(|s| - tail_len));
}

View file

@ -0,0 +1,58 @@
##! Functions for using multiple thresholds with a counting tracker. For
##! example, you may want to generate a notice when something happens 10 times
##! and again when it happens 100 times but nothing in between. You can use
##! the :bro:id:`check_threshold` function to define your threshold points
##! and the :bro:type:`TrackCount` variable where you are keeping track of your
##! counter.
module GLOBAL;
export {
type TrackCount: record {
## The counter for the number of times something has happened.
n: count &default=0;
## The index of the vector where the counter currently is. This is
## used to track which threshold is currently being watched for.
index: count &default=0;
};
## The thresholds you would like to use as defaults with the
## :bro:id:`default_check_threshold` function.
const default_notice_thresholds: vector of count = {
30, 100, 1000, 10000, 100000, 1000000, 10000000,
} &redef;
## This will check if a :bro:type:`TrackCount` variable has crossed any
## thresholds in a given set.
## v: a vector holding counts that represent thresholds
## tracker: the record being used to track event counter and currently
## monitored threshold value
## Returns: T if a threshold has been crossed, else F
global check_threshold: function(v: vector of count, tracker: TrackCount): bool;
## This will use the :bro:id:`default_notice_thresholds` variable to check
## a :bro:type:`TrackCount` variable to see if it has crossed another
## threshold.
global default_check_threshold: function(tracker: TrackCount): bool;
}
function new_track_count(): TrackCount
{
local tc: TrackCount;
return tc;
}
function check_threshold(v: vector of count, tracker: TrackCount): bool
{
if ( tracker$index <= |v| && tracker$n >= v[tracker$index] )
{
++tracker$index;
return T;
}
return F;
}
function default_check_threshold(tracker: TrackCount): bool
{
return check_threshold(default_notice_thresholds, tracker);
}

View file

@ -0,0 +1,17 @@
##! Listen for other Bro instances to make unencrypted connections.
module Communication;
export {
## Which port to listen on for clear connections.
const listen_port_clear = Communication::default_port_clear &redef;
## Which IP address to bind to (0.0.0.0 for any interface).
const listen_if_clear = 0.0.0.0 &redef;
}
event bro_init() &priority=-10
{
listen(listen_if_clear, listen_port_clear, F);
}

View file

@ -0,0 +1,18 @@
##! Listen for other Bro instances and encrypt the connection with SSL.
module Communication;
export {
## Which port to listen on for SSL encrypted connections.
const listen_port_ssl = Communication::default_port_ssl &redef;
## Which IP address to bind to for SSL encrypted connections
## (0.0.0.0 for any interface).
const listen_if_ssl = 0.0.0.0 &redef;
}
event bro_init() &priority=-10
{
listen(listen_if_ssl, listen_port_ssl, T);
}

View file

@ -0,0 +1,60 @@
# If an instance is a controllee, it implicitly needs to listen for remote
# connections.
@load frameworks/communication/listen-clear
module Control;
event Control::id_value_request(id: string)
{
local val = lookup_ID(id);
event Control::id_value_response(id, fmt("%s", val));
}
event Control::peer_status_request()
{
local status = "";
for ( p in Communication::nodes )
{
local peer = Communication::nodes[p];
if ( ! peer$connected )
next;
local res = resource_usage();
status += fmt("%.6f peer=%s host=%s events_in=%s events_out=%s ops_in=%s ops_out=%s bytes_in=? bytes_out=?\n",
network_time(),
peer$peer$descr, peer$host,
res$num_events_queued, res$num_events_dispatched,
res$blocking_input, res$blocking_output);
}
event Control::peer_status_response(status);
}
event Control::net_stats_request()
{
local ns = net_stats();
local reply = fmt("%.6f recvd=%d dropped=%d link=%d\n", network_time(),
ns$pkts_recvd, ns$pkts_dropped, ns$pkts_link);
event Control::net_stats_response(reply);
}
event Control::configuration_update_request()
{
# Generate the alias event.
event Control::configuration_update();
# Don't need to do anything in particular here, it's just indicating that
# the configuration is going to be updated. This event could be handled
# by other scripts if they need to do some ancilliary processing if
# redef-able consts are modified at runtime.
event Control::configuration_update_response();
}
event Control::shutdown_request()
{
# Send the acknowledgement event.
event Control::shutdown_response();
# Schedule the shutdown to let the current event queue flush itself first.
event terminate_event();
}

View file

@ -0,0 +1,102 @@
module Control;
# Do some sanity checking and rework the communication nodes.
event bro_init() &priority=5
{
# We know that some command was given because this script wouldn't be
# loaded if there wasn't so we can feel free to throw an error here and
# shutdown.
if ( cmd !in commands )
{
# TODO: do an actual error here. Maybe through the reporter events?
print fmt("The '%s' control command is unknown.", cmd);
terminate();
}
# Establish the communication configuration and only request response
# messages.
Communication::nodes["control"] = [$host=host, $p=host_port,
$sync=F, $connect=T,
$class="control", $events=Control::controllee_events];
}
event Control::id_value_response(id: string, val: string) &priority=-10
{
event terminate_event();
}
event Control::peer_status_response(s: string) &priority=-10
{
event terminate_event();
}
event Control::net_stats_response(s: string) &priority=-10
{
event terminate_event();
}
event Control::configuration_update_response() &priority=-10
{
event terminate_event();
}
event Control::shutdown_response() &priority=-10
{
event terminate_event();
}
function configuration_update_func(p: event_peer)
{
# Send all &redef'able consts to the peer.
local globals = global_ids();
local cnt = 0;
for ( id in globals )
{
if ( id in ignore_ids )
next;
local t = globals[id];
# Skip it if the variable isn't redefinable or not const.
# We don't want to update non-const globals because that's usually
# where state is stored and those values will frequently be declared
# with &redef so that attributes can be redefined.
if ( t$constant && t$redefinable )
{
send_id(p, id);
++cnt;
}
}
print fmt("sent %d IDs", cnt);
event terminate_event();
}
event remote_connection_handshake_done(p: event_peer) &priority=-10
{
if ( cmd == "id_value" )
{
if ( arg != "" )
event Control::id_value_request(arg);
else
{
# TODO: do an actual error here. Maybe through the reporter events?
print "The id_value command requires that Control::arg have some value.";
terminate();
}
}
else if ( cmd == "peer_status" )
event Control::peer_status_request();
else if ( cmd == "net_stats" )
event Control::net_stats_request();
else if ( cmd == "shutdown" )
event Control::shutdown_request();
else if ( cmd == "configuration_update" )
{
configuration_update_func(p);
# Signal configuration update to peer.
event Control::configuration_update_request();
}
}

View file

@ -0,0 +1,242 @@
##! Finds connections with protocols on non-standard ports with DPD.
module ProtocolDetector;
export {
redef enum Notice::Type += {
Off_Port_Protocol_Found, # raised for each connection found
Protocol_Found,
Server_Found,
};
# Table of (protocol, resp_h, resp_p) tuples known to be uninteresting
# in the given direction. For all other protocols detected on
# non-standard ports, we raise a Protocol_Found notice. (More specific
# filtering can then be done via notice_filters.)
#
# Use 0.0.0.0 for to wildcard-match any resp_h.
type dir: enum { NONE, INCOMING, OUTGOING, BOTH };
const valids: table[count, addr, port] of dir = {
# A couple of ports commonly used for benign HTTP servers.
# For now we want to see everything.
# [ANALYZER_HTTP, 0.0.0.0, 81/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 82/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 83/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 88/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8001/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8090/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8081/tcp] = OUTGOING,
#
# [ANALYZER_HTTP, 0.0.0.0, 6346/tcp] = BOTH, # Gnutella
# [ANALYZER_HTTP, 0.0.0.0, 6347/tcp] = BOTH, # Gnutella
# [ANALYZER_HTTP, 0.0.0.0, 6348/tcp] = BOTH, # Gnutella
} &redef;
# Set of analyzers for which we suppress Server_Found notices
# (but not Protocol_Found). Along with avoiding clutter in the
# log files, this also saves memory because for these we don't
# need to remember which servers we already have reported, which
# for some can be a lot.
const suppress_servers: set [count] = {
# ANALYZER_HTTP
} &redef;
# We consider a connection to use a protocol X if the analyzer for X
# is still active (i) after an interval of minimum_duration, or (ii)
# after a payload volume of minimum_volume, or (iii) at the end of the
# connection.
const minimum_duration = 30 secs &redef;
const minimum_volume = 4e3 &redef; # bytes
# How often to check the size of the connection.
const check_interval = 5 secs;
# Entry point for other analyzers to report that they recognized
# a certain (sub-)protocol.
global found_protocol: function(c: connection, analyzer: count,
protocol: string);
# Table keeping reported (server, port, analyzer) tuples (and their
# reported sub-protocols).
global servers: table[addr, port, string] of set[string]
&read_expire = 14 days;
}
# Table that tracks currently active dynamic analyzers per connection.
global conns: table[conn_id] of set[count];
# Table of reports by other analyzers about the protocol used in a connection.
global protocols: table[conn_id] of set[string];
type protocol : record {
a: string; # analyzer name
sub: string; # "sub-protocols" reported by other sources
};
function get_protocol(c: connection, a: count) : protocol
{
local str = "";
if ( c$id in protocols )
{
for ( p in protocols[c$id] )
str = |str| > 0 ? fmt("%s/%s", str, p) : p;
}
return [$a=analyzer_name(a), $sub=str];
}
function fmt_protocol(p: protocol) : string
{
return p$sub != "" ? fmt("%s (via %s)", p$sub, p$a) : p$a;
}
function do_notice(c: connection, a: count, d: dir)
{
if ( d == BOTH )
return;
if ( d == INCOMING && Site::is_local_addr(c$id$resp_h) )
return;
if ( d == OUTGOING && ! Site::is_local_addr(c$id$resp_h) )
return;
local p = get_protocol(c, a);
local s = fmt_protocol(p);
NOTICE([$note=Protocol_Found,
$msg=fmt("%s %s on port %s", id_string(c$id), s, c$id$resp_p),
$sub=s, $conn=c, $n=a]);
# We report multiple Server_Found's per host if we find a new
# sub-protocol.
local known = [c$id$resp_h, c$id$resp_p, p$a] in servers;
local newsub = F;
if ( known )
newsub = (p$sub != "" &&
p$sub !in servers[c$id$resp_h, c$id$resp_p, p$a]);
if ( (! known || newsub) && a !in suppress_servers )
{
NOTICE([$note=Server_Found,
$msg=fmt("%s: %s server on port %s%s", c$id$resp_h, s,
c$id$resp_p, (known ? " (update)" : "")),
$p=c$id$resp_p, $sub=s, $conn=c, $src=c$id$resp_h, $n=a]);
if ( ! known )
servers[c$id$resp_h, c$id$resp_p, p$a] = set();
add servers[c$id$resp_h, c$id$resp_p, p$a][p$sub];
}
}
function report_protocols(c: connection)
{
# We only report the connection if both sides have transferred data.
if ( c$resp$size == 0 || c$orig$size == 0 )
{
delete conns[c$id];
delete protocols[c$id];
return;
}
local analyzers = conns[c$id];
for ( a in analyzers )
{
if ( [a, c$id$resp_h, c$id$resp_p] in valids )
do_notice(c, a, valids[a, c$id$resp_h, c$id$resp_p]);
else if ( [a, 0.0.0.0, c$id$resp_p] in valids )
do_notice(c, a, valids[a, 0.0.0.0, c$id$resp_p]);
else
do_notice(c, a, NONE);
append_addl(c, analyzer_name(a));
}
delete conns[c$id];
delete protocols[c$id];
}
event ProtocolDetector::check_connection(c: connection)
{
if ( c$id !in conns )
return;
local duration = network_time() - c$start_time;
local size = c$resp$size + c$orig$size;
if ( duration >= minimum_duration || size >= minimum_volume )
report_protocols(c);
else
{
local delay = min_interval(minimum_duration - duration,
check_interval);
schedule delay { ProtocolDetector::check_connection(c) };
}
}
event connection_state_remove(c: connection)
{
if ( c$id !in conns )
{
delete protocols[c$id];
return;
}
# Reports all analyzers that have remained to the end.
report_protocols(c);
}
event protocol_confirmation(c: connection, atype: count, aid: count)
{
# Don't report anything running on a well-known port.
if ( atype in dpd_config && c$id$resp_p in dpd_config[atype]$ports )
return;
if ( c$id in conns )
{
local analyzers = conns[c$id];
add analyzers[atype];
}
else
{
conns[c$id] = set(atype);
local delay = min_interval(minimum_duration, check_interval);
schedule delay { ProtocolDetector::check_connection(c) };
}
}
# event connection_analyzer_disabled(c: connection, analyzer: count)
# {
# if ( c$id !in conns )
# return;
#
# delete conns[c$id][analyzer];
# }
function append_proto_addl(c: connection)
{
for ( a in conns[c$id] )
append_addl(c, fmt_protocol(get_protocol(c, a)));
}
function found_protocol(c: connection, analyzer: count, protocol: string)
{
# Don't report anything running on a well-known port.
if ( analyzer in dpd_config &&
c$id$resp_p in dpd_config[analyzer]$ports )
return;
if ( c$id !in protocols )
protocols[c$id] = set();
add protocols[c$id][protocol];
}

View file

@ -0,0 +1,27 @@
##! This script enables logging of packet segment data when a protocol
##! parsing violation is encountered. The amount of
##! data from the packet logged is set by the packet_segment_size variable.
##! A caveat to logging packet data is that in some cases, the packet may
##! not be the packet that actually caused the protocol violation.
module DPD;
export {
redef record Info += {
## A chunk of the payload the most likely resulted in the protocol
## violation.
packet_segment: string &optional &log;
};
## Size of the packet segment to display in the DPD log.
const packet_segment_size: int = 255 &redef;
}
event protocol_violation(c: connection, atype: count, aid: count,
reason: string) &priority=4
{
if ( ! c?$dpd ) return;
c$dpd$packet_segment=fmt("%s", sub_bytes(get_current_packet()$data, 0, packet_segment_size));
}

View file

@ -0,0 +1,13 @@
signature windows_reverse_shell {
ip-proto == tcp
tcp-state established,originator
event "ATTACK-RESPONSES Microsoft cmd.exe banner (reverse-shell originator)"
payload /.*Microsoft Windows.*\x28C\x29 Copyright 1985-.*Microsoft Corp/
}
signature windows_shell {
ip-proto == tcp
tcp-state established,responder
event "ATTACK-RESPONSES Microsoft cmd.exe banner (normal-shell responder)"
payload /.*Microsoft Windows.*\x28C\x29 Copyright 1985-.*Microsoft Corp/
}

Some files were not shown because too many files have changed in this diff Show more