Merge remote-tracking branch 'origin/topic/seth/bittorrent-fix-and-dpd-sig-breakout' into topic/seth/faf-updates

Conflicts:
	magic
	scripts/base/protocols/http/__load__.bro
	scripts/base/protocols/irc/__load__.bro
	scripts/base/protocols/smtp/__load__.bro
This commit is contained in:
Seth Hall 2013-07-10 16:28:38 -04:00
commit 2e0912b543
54 changed files with 880 additions and 381 deletions

View file

@ -0,0 +1,169 @@
@load base/frameworks/notice
@load base/frameworks/packet-filter
module PacketFilter;
export {
## The maximum number of BPF based shunts that Bro is allowed to perform.
const max_bpf_shunts = 100 &redef;
## Call this function to use BPF to shunt a connection (to prevent the
## data packets from reaching Bro). For TCP connections, control packets
## are still allowed through so that Bro can continue logging the connection
## and it can stop shunting once the connection ends.
global shunt_conn: function(id: conn_id): bool;
## This function will use a BPF expresssion to shunt traffic between
## the two hosts given in the `conn_id` so that the traffic is never
## exposed to Bro's traffic processing.
global shunt_host_pair: function(id: conn_id): bool;
## Remove shunting for a host pair given as a `conn_id`. The filter
## is not immediately removed. It waits for the occassional filter
## update done by the `PacketFilter` framework.
global unshunt_host_pair: function(id: conn_id): bool;
## Performs the same function as the `unshunt_host_pair` function, but
## it forces an immediate filter update.
global force_unshunt_host_pair: function(id: conn_id): bool;
## Retrieve the currently shunted connections.
global current_shunted_conns: function(): set[conn_id];
## Retrieve the currently shunted host pairs.
global current_shunted_host_pairs: function(): set[conn_id];
redef enum Notice::Type += {
## Indicative that :bro:id:`max_bpf_shunts` connections are already
## being shunted with BPF filters and no more are allowed.
No_More_Conn_Shunts_Available,
## Limitations in BPF make shunting some connections with BPF impossible.
## This notice encompasses those various cases.
Cannot_BPF_Shunt_Conn,
};
}
global shunted_conns: set[conn_id];
global shunted_host_pairs: set[conn_id];
function shunt_filters()
{
# NOTE: this could wrongly match if a connection happens with the ports reversed.
local tcp_filter = "";
local udp_filter = "";
for ( id in shunted_conns )
{
local prot = get_port_transport_proto(id$resp_p);
local filt = fmt("host %s and port %d and host %s and port %d", id$orig_h, id$orig_p, id$resp_h, id$resp_p);
if ( prot == udp )
udp_filter = combine_filters(udp_filter, "and", filt);
else if ( prot == tcp )
tcp_filter = combine_filters(tcp_filter, "and", filt);
}
if ( tcp_filter != "" )
tcp_filter = combine_filters("tcp and tcp[tcpflags] & (tcp-syn|tcp-fin|tcp-rst) == 0", "and", tcp_filter);
local conn_shunt_filter = combine_filters(tcp_filter, "and", udp_filter);
local hp_shunt_filter = "";
for ( id in shunted_host_pairs )
hp_shunt_filter = combine_filters(hp_shunt_filter, "and", fmt("host %s and host %s", id$orig_h, id$resp_h));
local filter = combine_filters(conn_shunt_filter, "and", hp_shunt_filter);
if ( filter != "" )
PacketFilter::exclude("shunt_filters", filter);
}
event bro_init() &priority=5
{
register_filter_plugin([
$func()={ return shunt_filters(); }
]);
}
function current_shunted_conns(): set[conn_id]
{
return shunted_conns;
}
function current_shunted_host_pairs(): set[conn_id]
{
return shunted_host_pairs;
}
function reached_max_shunts(): bool
{
if ( |shunted_conns| + |shunted_host_pairs| > max_bpf_shunts )
{
NOTICE([$note=No_More_Conn_Shunts_Available,
$msg=fmt("%d BPF shunts are in place and no more will be added until space clears.", max_bpf_shunts)]);
return T;
}
else
return F;
}
function shunt_host_pair(id: conn_id): bool
{
PacketFilter::filter_changed = T;
if ( reached_max_shunts() )
return F;
add shunted_host_pairs[id];
install();
return T;
}
function unshunt_host_pair(id: conn_id): bool
{
PacketFilter::filter_changed = T;
if ( id in shunted_host_pairs )
{
delete shunted_host_pairs[id];
return T;
}
else
return F;
}
function force_unshunt_host_pair(id: conn_id): bool
{
if ( unshunt_host_pair(id) )
{
install();
return T;
}
else
return F;
}
function shunt_conn(id: conn_id): bool
{
if ( is_v6_addr(id$orig_h) )
{
NOTICE([$note=Cannot_BPF_Shunt_Conn,
$msg="IPv6 connections can't be shunted with BPF due to limitations in BPF",
$sub="ipv6_conn",
$id=id, $identifier=cat(id)]);
return F;
}
if ( reached_max_shunts() )
return F;
PacketFilter::filter_changed = T;
add shunted_conns[id];
install();
return T;
}
event connection_state_remove(c: connection) &priority=-5
{
# Don't rebuild the filter right away because the packet filter framework
# will check every few minutes and update the filter if things have changed.
if ( c$id in shunted_conns )
delete shunted_conns[c$id];
}

View file

@ -0,0 +1,132 @@
##! This script implements the "Bro side" of several load balancing
##! approaches for Bro clusters.
@load base/frameworks/cluster
@load base/frameworks/packet-filter
module LoadBalancing;
export {
type Method: enum {
## Apply BPF filters to each worker in a way that causes them to
## automatically flow balance traffic between them.
AUTO_BPF,
## Load balance traffic across the workers by making each one apply
## a restrict filter to only listen to a single MAC address. This
## is a somewhat common deployment option for sites doing network
## based load balancing with MAC address rewriting and passing the
## traffic to a single interface. Multiple MAC addresses will show
## up on the same interface and need filtered to a single address.
#MAC_ADDR_BPF,
};
## Defines the method of load balancing to use.
const method = AUTO_BPF &redef;
# Configure the cluster framework to enable the load balancing filter configuration.
#global send_filter: event(for_node: string, filter: string);
#global confirm_filter_installation: event(success: bool);
redef record Cluster::Node += {
## A BPF filter for load balancing traffic sniffed on a single interface
## across a number of processes. In normal uses, this will be assigned
## dynamically by the manager and installed by the workers.
lb_filter: string &optional;
};
}
#redef Cluster::manager2worker_events += /LoadBalancing::send_filter/;
#redef Cluster::worker2manager_events += /LoadBalancing::confirm_filter_installation/;
@if ( Cluster::is_enabled() )
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event bro_init() &priority=5
{
if ( method != AUTO_BPF )
return;
local worker_ip_interface: table[addr, string] of count = table();
for ( n in Cluster::nodes )
{
local this_node = Cluster::nodes[n];
# Only workers!
if ( this_node$node_type != Cluster::WORKER ||
! this_node?$interface )
next;
if ( [this_node$ip, this_node$interface] !in worker_ip_interface )
worker_ip_interface[this_node$ip, this_node$interface] = 0;
++worker_ip_interface[this_node$ip, this_node$interface];
}
# Now that we've counted up how many processes are running on an interface
# let's create the filters for each worker.
local lb_proc_track: table[addr, string] of count = table();
for ( no in Cluster::nodes )
{
local that_node = Cluster::nodes[no];
if ( that_node$node_type == Cluster::WORKER &&
that_node?$interface && [that_node$ip, that_node$interface] in worker_ip_interface )
{
if ( [that_node$ip, that_node$interface] !in lb_proc_track )
lb_proc_track[that_node$ip, that_node$interface] = 0;
local this_lb_proc = lb_proc_track[that_node$ip, that_node$interface];
local total_lb_procs = worker_ip_interface[that_node$ip, that_node$interface];
++lb_proc_track[that_node$ip, that_node$interface];
if ( total_lb_procs > 1 )
{
that_node$lb_filter = PacketFilter::sample_filter(total_lb_procs, this_lb_proc);
Communication::nodes[no]$capture_filter = that_node$lb_filter;
}
}
}
}
#event remote_connection_established(p: event_peer) &priority=-5
# {
# if ( is_remote_event() )
# return;
#
# local for_node = p$descr;
# # Send the filter to the peer.
# if ( for_node in Cluster::nodes &&
# Cluster::nodes[for_node]?$lb_filter )
# {
# local filter = Cluster::nodes[for_node]$lb_filter;
# event LoadBalancing::send_filter(for_node, filter);
# }
# }
#event LoadBalancing::confirm_filter_installation(success: bool)
# {
# # This doesn't really matter yet since we aren't getting back a meaningful success response.
# }
@endif
@if ( Cluster::local_node_type() == Cluster::WORKER )
#event LoadBalancing::send_filter(for_node: string, filter: string)
event remote_capture_filter(p: event_peer, filter: string)
{
#if ( for_node !in Cluster::nodes )
# return;
#
#if ( Cluster::node == for_node )
# {
restrict_filters["lb_filter"] = filter;
PacketFilter::install();
#event LoadBalancing::confirm_filter_installation(T);
# }
}
@endif
@endif

View file

@ -3,7 +3,9 @@
## This normally isn't used because of the default open packet filter
## but we set it anyway in case the user is using a packet filter.
redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" };
## Note: This was removed because the default model now is to have a wide
## open packet filter.
#redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" };
## Shorten the fragment timeout from never expiring to expiring fragments after
## five minutes.