Merge remote-tracking branch 'origin/topic/dist-cleanup'

* origin/topic/dist-cleanup:
  Updating INSTALL
  Updating README
  Remove $Id$ tags
  Remove policy.old directory, adresses #511
This commit is contained in:
Robin Sommer 2011-09-18 16:17:42 -07:00
commit bd2e30f521
479 changed files with 23 additions and 22780 deletions

13
INSTALL
View file

@ -62,7 +62,6 @@ Installation
To build and install into /usr/local/bro: To build and install into /usr/local/bro:
> ./configure > ./configure
> cd build
> make > make
> make install > make install
@ -89,17 +88,19 @@ Running Bro
=========== ===========
Bro is a complex program and it takes a bit of time to get familiar Bro is a complex program and it takes a bit of time to get familiar
with it. In the following we give a few simple examples. See the with it. A good place for newcomers to start is the quick start guide
quickstart guide at http://www.bro-ids.org for more information; you available here:
can the source that in doc/quick-start.
http://www.bro-ids.org/documentation/quickstart.html
For developers that wish to run Bro from the the build/ directory For developers that wish to run Bro from the the build/ directory
after performing "make", but without performing "make install", they after performing "make", but without performing "make install", they
will have to first set BROPATH to look for scripts inside the build will have to first set BROPATH to look for scripts inside the build
directory. Sourcing either build/bro-path-dev.sh or build/bro-path-dev.csh directory. Sourcing either build/bro-path-dev.sh or build/bro-path-dev.csh
as appropriate for the current shell accomplishes this. e.g.: as appropriate for the current shell accomplishes this and also augments your
PATH so you can use Bro without qualifying the path to it. e.g.:
> ./configure > ./configure
> make > make
> source build/bro-path-dev.sh > source build/bro-path-dev.sh
> ./build/src/bro > bro <options>

38
README
View file

@ -1,31 +1,23 @@
This is release 1.6 of Bro, a system for detecting network intruders in ============================
real-time using passive network monitoring. Bro Network Security Monitor
============================
Please see the file INSTALL for installation instructions and Bro is a powerful framework for network analysis and security
pointers for getting started. For more documentation, see the monitoring.
documentation on Bro's home page:
http://www.bro-ids.org/docs Please see the INSTALL file for installation instructions and pointers
for getting started. For more documentation, research publications, or
community contact information see Bro's home page:
The main parts of Bro's documentation are also available in the doc/ http://www.bro-ids.org
directory of the distribution. (Please note that the documentation
is still a work in progress; there will be more in future releases.)
Numerous other Bro-related publications, including a paper describing the Please see COPYING for licensing information.
system, can be found at
http://www.bro-ids.org/publications.html On behalf of the Bro Development Team,
Send comments, etc., to the Bro mailing list, bro@bro-ids.org.
However, please note that you must first subscribe to the list in
order to be able to post to it.
- Vern Paxson & Robin Sommer, on behalf of the Bro development team
Vern Paxson & Robin Sommer,
International Computer Science Institute &
Lawrence Berkeley National Laboratory Lawrence Berkeley National Laboratory
University of California, Berkeley USA
ICSI Center for Internet Research (ICIR)
International Computer Science Institute
Berkeley, CA USA
vern@icir.org / robin@icir.org vern@icir.org / robin@icir.org

View file

@ -1,18 +0,0 @@
# $Id: OS-fingerprint.bro 1071 2005-03-08 14:09:31Z vern $
#
# Tracks operating system versioning using the "software" framework.
@load software
event OS_version_found(c: connection, host: addr, OS: OS_version)
{
local version: software_version;
version$major = version$minor = version$minor2 = -1;
version$addl = OS$detail;
local sw: software;
sw$name = OS$genre;
sw$version = version;
event software_version_found(c, host, sw, "OS");
}

View file

@ -1,278 +0,0 @@
# $Id: adu.bro 5152 2007-12-04 21:48:56Z vern $
@load conn-id
module adu;
# This script parses application-layer data (ADU) units, or "messages",
# out of the packet streams. Since the analysis is generic, we define
# an ADU simply as all application-layer data in a 5-tuple flow going
# in one direction without any data going the other way. Once we see
# data in the other direction, we finish the current ADU and start
# a new one (going the other way). While this approach is only
# approximate, it can work well for both UDP and TCP.
#
# The script reports ADUs as strings, up to a configurable maximum size, and
# up to a configurable depth into the flow.
#
# Generated events:
#
# - adu_tx(c: connection, a: adu_state) reports an ADU seen from
# c's originator to its responder.
#
# - adu_rx(c: connection, a: adu_state) reports an ADU seen from
# c's responder to the originator.
#
# - adu_done(c: connection) indicates that no more ADUs will be seen
# on connection c. This is useful to know in case your statekeeping
# relies on event connection_state_remove(), which is also used by
# adu.bro.
#
# --- Input configuration -- which ports to look at --------------------
# Right now: everything!
#
redef tcp_content_deliver_all_orig = T;
redef tcp_content_deliver_all_resp = T;
redef udp_content_deliver_all_orig = T;
redef udp_content_deliver_all_resp = T;
# --- Debugging -- should really be a separate policy ------------------
# Comment out to disable debugging output:
#global adu_debug = T;
# Uncomment to enable tests:
#global adu_test = T;
@ifdef (adu_debug)
function DBG(msg: string) { print fmt("DBG[adu.bro]: %s", msg); }
@else
function DBG(msg: string) { }
@endif
export {
# --- Constants --------------------------------------------------------
# The maximum depth in bytes up to which we follow a flow.
# This is counting bytes seen in both directions.
const adu_conn_max_depth = 100000 &redef;
# The maximum message depth that we report.
const adu_max_depth = 3 &redef;
# The maximum message size in bytes that we report.
const adu_max_size = 1000 &redef;
# Whether ADUs are reported beyond content gaps.
const adu_gaps_ok = F &redef;
# --- Types ------------------------------------------------------------
# adu_state records contain the latest ADU and aditional flags to help
# the user identify the direction of the message, its depth in the flow,
# etc.
type adu_state: record {
adu: string &default = ""; # the current ADU
# Message counter (>= 1), orig->resp and resp->orig.
depth_tx: count &default = 1;
depth_rx: count &default = 1;
# TCP: seqno tracking to recognize gaps.
seen_tx: count &default = 0;
seen_rx: count &default = 0;
size: count &default = 0; # total connection size in bytes
is_orig: bool &default = F; # whether ADU is orig->resp
ignore: bool &default = F; # ignore future activity on conn
};
# Tell the ADU policy that you do not wish to receive further
# adu_tx/adu_rx events for a given connection. Other policies
# may continue to process the connection.
#
global adu_skip_further_processing: function(cid: conn_id);
}
# --- Globals ----------------------------------------------------------
# A global table that tracks each flow's messages.
global adu_conns: table[conn_id] of adu_state;
# Testing invokes the following events.
global adu_tx: event(c: connection, astate: adu_state);
global adu_rx: event(c: connection, astate: adu_state);
global adu_done: event(c: connection);
# --- Functions --------------------------------------------------------
function adu_skip_further_processing(cid: conn_id)
{
if ( cid !in adu_conns )
return;
adu_conns[cid]$ignore = T;
}
function flow_contents(c: connection, is_orig: bool, seq: count, contents: string)
{
local astate: adu_state;
DBG(fmt("contents %s, %s: %s", id_string(c$id), is_orig, contents));
# Ensure we track the given connection.
if ( c$id !in adu_conns )
adu_conns[c$id] = astate;
else
astate = adu_conns[c$id];
# Forget it if we've been asked to ignore.
#
if ( astate$ignore == T )
return;
# Don't report if flow is too big.
#
if ( astate$size >= adu_conn_max_depth )
return;
# If we have an assembled message, we may now have something
# to report.
if ( |astate$adu| > 0 )
{
# If application-layer data flow is switching
# from resp->orig to orig->resp, report the assembled
# message as a received ADU.
if ( is_orig && ! astate$is_orig )
{
event adu_rx(c, copy(astate));
astate$adu = "";
if ( ++astate$depth_rx > adu_max_depth )
adu_skip_further_processing(c$id);
}
# If application-layer data flow is switching
# from orig->resp to resp->orig, report the assembled
# message as a transmitted ADU.
#
if ( !is_orig && astate$is_orig )
{
event adu_tx(c, copy(astate));
astate$adu = "";
if ( ++astate$depth_tx > adu_max_depth )
adu_skip_further_processing(c$id);
}
}
# Check for content gaps. If we identify one, only continue
# if user allowed it.
#
if ( !adu_gaps_ok && seq > 0 )
{
if ( is_orig )
{
if ( seq > astate$seen_tx + 1 )
return;
else
astate$seen_tx += |contents|;
}
else
{
if ( seq > astate$seen_rx + 1 )
return;
else
astate$seen_rx += |contents|;
}
}
# Append the contents to the end of the currently
# assembled message, if the message hasn't already
# reached the maximum size.
#
if ( |astate$adu| < adu_max_size )
{
astate$adu += contents;
# As a precaution, clip the string to the maximum
# size. A long content string with astate$adu just
# below its maximum allowed size could exceed that
# limit by a lot.
### str_clip(astate$adu, adu_max_size);
}
# Note that this counter is bumped up even if we have
# exceeded the maximum size of an individual message.
#
astate$size += |contents|;
astate$is_orig = is_orig;
}
# --- Event Handlers ---------------------------------------------------
event tcp_contents(c: connection, is_orig: bool, seq: count, contents: string)
{
flow_contents(c, is_orig, seq, contents);
}
event udp_contents(u: connection, is_orig: bool, contents: string)
{
flow_contents(u, is_orig, 0, contents);
}
event connection_state_remove(c: connection)
{
if ( c$id !in adu_conns )
return;
local astate = adu_conns[c$id];
# Forget it if we've been asked to ignore.
#
if ( astate$ignore == T )
return;
# Report the remaining data now, if any.
#
if ( |astate$adu| > 0 ) {
if ( astate$is_orig )
{
if ( astate$depth_tx <= adu_max_depth )
event adu_tx(c, copy(astate));
}
else
{
if ( astate$depth_rx <= adu_max_depth )
event adu_rx(c, copy(astate));
}
}
delete adu_conns[c$id];
event adu_done(c);
}
# --- Tests ------------------------------------------------------------
@ifdef (adu_test)
event adu_tx(c: connection, astate: adu_state)
{
print fmt("%s ---- %s, %d -> ----", network_time(), id_string(c$id), astate$depth_tx);
# print astate$adu;
}
event adu_rx(c: connection, astate: adu_state)
{
print fmt("%s ---- %s, %d <- ----", network_time(), id_string(c$id), astate$depth_rx);
# print astate$adu;
}
@endif

View file

@ -1,3 +0,0 @@
# $Id: alarm.bro 340 2004-09-09 06:38:27Z vern $
redef bro_alarm_file = open_log_file("alarm");

View file

@ -1,141 +0,0 @@
@load heavy-analysis
@load OS-fingerprint
@load adu
@load alarm
@load analy
@load anon
@load arp
@load backdoor
@load bittorrent
@load blaster
@load bt-tracker
@load brolite-backdoor
@load capture-events
@load capture-loss
@load capture-state-updates
@load checkpoint
@load clear-passwords
@load conn-flood
@load conn-id
@load conn
@load contents
@load cpu-adapt
@load dce
@load demux
@load detect-protocols-http
@load detect-protocols
@load dhcp
@load dns-info
@load dns-lookup
@load dns
@load dpd
@load drop-adapt
@load dyn-disable
@load file-flush
@load finger
@load firewall
@load flag-irc
@load flag-warez
@load frag
@load ftp
@load gnutella
@load hot-ids
@load hot
@load http-abstract
@load http-anon-server
@load http-anon-useragent
@load http-anon-utils
@load http-body
@load http-detect-passwd
@load http-entity
@load http-event
@load http-header
@load http-identified-files.bro
@load http-reply
@load http-request
@load http-rewriter
@load http
@load icmp
@load ident-rewriter
@load ident
@load inactivity
@load interconn
@load irc-bot-syslog
@load irc-bot
@load irc
@load large-conns
@load listen-clear
@load listen-ssl
@load load-level
@load load-sample
@load log-append
@load login
@load mime-pop
@load mime
@load mt
@load ncp
@load netflow
@load netstats
@load nfs
@load notice-action-filters
@load notice
@load ntp
@load passwords
@load pcap
@load pkt-profile
@load pop3
@load port-name
@load portmapper
@load print-filter
@load print-globals
@load print-resources
@load print-sig-states
@load profiling
@load proxy
@load remote-pcap
@load remote-ping
@load remote-print-id-reply
@load remote-print-id
@load remote-print
@load remote-report-notices
@load remote-send-id
@load remote
@load rotate-logs
@load rsh
@load scan
@load secondary-filter
@load sensor-sshd
@load server-ports
@load service-probe
@load signatures
@load site
@load smb
@load smtp-relay
@load smtp-rewriter
@load smtp
@load snort
@load software
@load ssh
@load ssh-stepping
@load ssl-alerts
@load ssl-ciphers
@load ssl-errors
@load ssl-worm
@load ssl
@load stats
@load stepping
@load synflood
@load targeted-scan
@load tcp
@load tftp
@load trw-impl
@load trw
@load udp-common
@load udp
@load vlan
@load weird
@load worm
@load notice-policy
# The following keeps us running after the bro_init event.
redef PrintFilter::terminate_bro = F;

View file

@ -1,16 +0,0 @@
# Statistical analysis of TCP connection in terms of the packet streams
# in each direction.
@load dns-lookup
@load udp
event conn_stats(c: connection, os: endpoint_stats, rs: endpoint_stats)
{
local id = c$id;
print fmt("%.6f %s %s %s %s %s %s %s %s %s",
c$start_time, c$duration, id$orig_p, id$resp_p,
conn_size(c$orig, tcp), conn_size(c$resp, tcp),
id$orig_h, id$resp_h, os, rs);
}

View file

@ -1,193 +0,0 @@
# $Id: anon.bro 6889 2009-08-21 16:45:17Z vern $
redef anonymize_ip_addr = T;
const orig_addr_anonymization = RANDOM_MD5 &redef;
const resp_addr_anonymization = RANDOM_MD5 &redef;
const other_addr_anonymization = SEQUENTIALLY_NUMBERED &redef;
const preserve_orig_addr: set[addr] = {} &redef;
const preserve_resp_addr: set[addr] = {} &redef;
const preserve_other_addr: set[addr] = {
0.0.0.0,
} &redef;
const preserved_subnet: set[subnet] = {
# 192.150.186/23,
} &redef;
const preserved_net: set[net] = {
# 192.150.186, 192.150.187,
} &redef;
global anon_log = open_log_file("anon") &redef;
global anonymized_args: table[string] of string;
global ip_anon_mapping: set[addr, addr];
event bro_init()
{
for ( n in preserved_net )
preserve_net(n);
}
function anonymize_address(a: addr, id: conn_id): addr
{
if ( a == id$orig_h )
return anonymize_addr(a, ORIG_ADDR);
else if ( a == id$resp_h )
return anonymize_addr(a, RESP_ADDR);
else
return anonymize_addr(a, OTHER_ADDR);
}
event anonymization_mapping(orig: addr, mapped: addr)
{
if ( [orig, mapped] !in ip_anon_mapping )
{
add ip_anon_mapping[orig, mapped];
print anon_log, fmt("%s -> %s", orig, mapped);
}
}
function string_anonymized(from: string, to: string, seed: count)
{
print anon_log, fmt("\"%s\" %d=> \"%s\"", from, seed, to);
}
global num_string_id: count = 0 &redef;
global anonymized_strings: table[string] of record {
s: string;
c: count;
} &redef;
# Hopefully, the total number of strings to anonymize is much less than
# 36^unique_string_length.
const unique_string_length = 8 &redef;
# const anonymized_string_pattern = /U[0-9a-f]+U/;
global unique_string_set: set[string];
event bro_init()
{
for ( s in anonymized_strings )
add unique_string_set[anonymized_strings[s]$s];
}
function unique_string(s: string, seed: count): string
{
local t = cat("U", sub_bytes(md5_hmac(seed, s),
1, unique_string_length), "U");
if ( t in unique_string_set )
return unique_string(s, seed+1);
anonymized_strings[s] = [$s = t, $c = 1];
add unique_string_set[t];
string_anonymized(s, t, seed);
return t;
}
function anonymize_string(from: string): string
{
if ( from in anonymized_strings )
{
++anonymized_strings[from]$c;
return anonymized_strings[from]$s;
}
local t = unique_string(from, 0);
return t;
}
function anonymize_arg(typ: string, arg: string): string
{
if ( arg == "" )
return ""; # an empty argument is safe
local arg_seed = string_cat(typ, arg);
if ( arg_seed in anonymized_args )
return anonymized_args[arg_seed];
local a = anonymize_string(arg_seed);
anonymized_args[arg_seed] = a;
print anon_log, fmt("anonymize_arg: (%s) {%s} -> %s ",
typ, to_string_literal(arg), to_string_literal(a));
return a;
}
# Does not contain ? and ends with an allowed suffix.
const path_to_file_pat =
/\/[^?]+\.(html|ico|icon|pdf|ps|doc|ppt|htm|js|crl|swf|shtml|h|old|c|cc|java|class|src|cfm|gif|jpg|php|rdf|rss|asp|bmp|owl|phtml|jpeg|jsp|cgi|png|txt|xml|css|avi|tex|dvi)/
;
# Acceptable domain names.
const kosher_dom_pat =
/ar|au|biz|br|ca|cc|cl|cn|co|com|cx|cz|de|ec|es|edu|fi|fm|fr|gov|hn|il|is|it|jp|lv|mx|net|no|nz|org|pe|pl|ru|sk|tv|tw|uk|us|arpa/
;
# Simple filename pattern.
const simple_filename =
/[0-9\-A-Za-z]+\.(html|ico|icon|pdf|ps|doc|ppt|htm|js|crl|swf|shtml|h|old|c|cc|java|class|src|cfm|gif|jpg|php|rdf|rss|asp|bmp|owl|phtml|jpeg|jsp|cgi|png|txt|xml|css|avi|tex|dvi)/
;
function anonymize_path(path: string): string
{
local hashed_path = "";
if ( to_lower(path) != path_to_file_pat )
{
hashed_path = anonymize_arg("path", path);
return hashed_path;
}
local file_parts = split(path, /\./);
local i = 1;
for ( part in file_parts )
{
# This looks broken to me - VP.
hashed_path = fmt("%s.%s", hashed_path, file_parts[i]);
if ( ++i == length(file_parts) )
break;
}
return fmt("%s.%s", anonymize_arg("path", hashed_path), file_parts[i]);
}
function anonymize_host(host: string): string
{
local hashed_host = "";
local host_parts = split(host, /\./);
local i = 1;
for ( hosty in host_parts )
{
if ( i == length(host_parts) )
break;
# Check against "kosher" tld list.
hashed_host = fmt("%s%s.", hashed_host,
anonymize_arg("host", host_parts[i]));
++i;
}
if ( host_parts[i] == kosher_dom_pat )
return string_cat(hashed_host, host_parts[i]);
print anon_log, fmt("anonymize_host: non-kosher domain %s", host);
return string_cat(hashed_host, anonymize_arg("host", host_parts[i]));
}
event bro_done()
{
for ( s in anonymized_strings )
{
print anon_log, fmt("appearance: %d: \"%s\" => \"%s\"",
anonymized_strings[s]$c, s, anonymized_strings[s]$s);
}
}

View file

@ -1,160 +0,0 @@
# $Id: arp.bro 4909 2007-09-24 02:26:36Z vern $
@load notice
module ARP;
export {
redef enum Notice += {
ARPSourceMAC_Mismatch, # source MAC doesn't match mappings
ARPAddlMAC_Mapping, # another MAC->addr seen beyond just one
ARPUnsolicitedReply, # could be poisoning; or just gratuitous
# ARPRequestProvidesTargetAddr, # request includes non-triv addr
# MAC/addr pair seen in request/reply different from
# that in the cache.
ARPCacheInconsistency,
# ARP reply gives different value than previously seen.
ARPMappingChanged,
};
const arp_log = open_log_file("arp") &redef;
}
redef capture_filters += { ["arp"] = "arp" };
# Abbreviations taken from RFC 826:
#
# SHA: source hardware address
# SPA: source protocol address (i.e., IP address)
# THA: target hardware address
# TPA: target protocol address
# ARP requests indexed on SHA/SPA/TPA (no THA, as it's what it's being
# queried).
global arp_requests: set[string, addr, addr] &create_expire = 1 min;
# ARP responses we've seen: indexed by IP address, yielding MAC address.
global ARP_cache: table[addr] of string;
# Bad ARPs can occur when:
# - type/size pairs are not OK for HW and L3 addresses (Ethernet=6, IP=4)
# - opcode is neither request (1) nor reply (2)
# - MAC src address != ARP sender MAC address
event bad_arp(SPA: addr, SHA: string, TPA: addr, THA: string,
explanation: string)
{
print arp_log, fmt("%.06f bad-arp %s(%s) ? %s(%s): %s",
network_time(), SPA, SHA, TPA, THA, explanation);
}
# The first of these maps a MAC address to the last protocol address seen
# for it. The second tracks every protocol address seen.
global mac_addr_map: table[string] of addr;
global mac_addr_associations: table[string] of set[addr];
# A somewhat general notion of broadcast MAC/IP addresses.
const broadcast_mac_addrs = { "00:00:00:00:00:00", "ff:ff:ff:ff:ff:ff", };
const broadcast_addrs = { 0.0.0.0, 255.255.255.255, };
# Called to note that we've seen an association between a MAC address
# and an IP address. Note that this is *not* an association advertised
# in an ARP reply (those are tracked in ARP_cache), but instead the
# pairing of hardware address + protocol address as expressed in
# an ARP request or reply header.
function mac_addr_association(mac_addr: string, a: addr)
{
# Ignore placeholders.
if ( mac_addr in broadcast_mac_addrs || a in broadcast_addrs )
return;
local is_addl = F;
if ( mac_addr in mac_addr_associations )
is_addl = a !in mac_addr_associations[mac_addr];
else
mac_addr_associations[mac_addr] = set();
print arp_log, fmt("%.06f association %s -> %s%s", network_time(),
mac_addr, a, is_addl ? " <addl>" : "");
mac_addr_map[mac_addr] = a;
add mac_addr_associations[mac_addr][a];
if ( a in ARP_cache && ARP_cache[a] != mac_addr )
NOTICE([$note=ARPCacheInconsistency, $src=a,
$msg=fmt("mapping for %s to %s doesn't match cache of %s",
mac_addr, a, ARP_cache[a])]);
}
# Returns the IP address associated with a MAC address, if we've seen one.
# Otherwise just returns the MAC address.
function addr_from_mac(mac_addr: string): string
{
return mac_addr in mac_addr_map ?
fmt("%s", mac_addr_map[mac_addr]) : mac_addr;
}
event arp_request(mac_src: string, mac_dst: string, SPA: addr, SHA: string,
TPA: addr, THA: string)
{
mac_addr_association(SHA, SPA);
local msg = fmt("%s -> %s who-has %s",
addr_from_mac(mac_src), addr_from_mac(mac_dst), TPA);
local mismatch = SHA != mac_src;
if ( mismatch )
NOTICE([$note=ARPSourceMAC_Mismatch, $src=SPA, $msg=msg]);
# It turns out that some hosts fill in the THA field even though
# that doesn't make sense. (The RFC specifically allows this,
# however.) Perhaps there's an attack that can be launched
# doing so, but it's hard to see what it might be, so for now
# we don't bother notice'ing these.
# if ( THA !in broadcast_addrs )
# NOTICE([$note=ARPRequestProvidesTargetAddr, $src=SPA,
# $msg=fmt("%s: %s", msg, THA)]);
print arp_log, fmt("%.06f %s%s", network_time(), msg,
mismatch ? " <source-mismatch>" : "");
add arp_requests[SHA, SPA, TPA];
}
event arp_reply(mac_src: string, mac_dst: string, SPA: addr, SHA: string,
TPA: addr, THA: string)
{
mac_addr_association(SHA, SPA);
mac_addr_association(THA, TPA);
local msg = fmt("%s -> %s: %s is-at %s",
addr_from_mac(mac_src), addr_from_mac(mac_dst),
SPA, SHA);
local unsolicited = [THA, TPA, SPA] !in arp_requests;
delete arp_requests[THA, TPA, SPA];
if ( unsolicited )
NOTICE([$note=ARPUnsolicitedReply, $src=SPA,
$msg=fmt("%s: request[%s, %s, %s]", msg, THA, TPA, SPA)]);
local mismatch = SHA != mac_src;
if ( mismatch )
NOTICE([$note=ARPSourceMAC_Mismatch, $src=SPA, $msg=msg]);
local mapping_changed = SPA in ARP_cache && ARP_cache[SPA] != SHA;
if ( mapping_changed )
NOTICE([$note=ARPMappingChanged, $src=SPA,
$msg=fmt("%s: was %s", msg, ARP_cache[SPA])]);
print arp_log, fmt("%.06f %s%s%s%s", network_time(), msg,
unsolicited ? " <unsolicited>" : "",
mismatch ? " <source-mismatch>" : "",
mapping_changed ?
fmt(" <changed from %s>", ARP_cache[SPA]) : "");
ARP_cache[SPA] = SHA;
}

View file

@ -1,559 +0,0 @@
# $Id: backdoor.bro 4909 2007-09-24 02:26:36Z vern $
# Looks for a variety of applications running on ports other than
# their usual ports.
#
# Note that this script by itself does *not* change capture_filters
# to add in the extra ports to look at. You need to specify that
# separately.
# Some tcpdump filters can be used to replace or work together with
# some detection algorithms. They could be used with the "secondary
# filter" for more efficient (but in some cases potentially less reliable)
# matching:
#
# - looking for "SSH-1." or "SSH-2." at the beginning of the packet;
# somewhat weaker than ssh-sig in that ssh-sig only looks for such
# pattern in the first packet of a connection:
#
# tcp[(tcp[12]>>2):4] = 0x5353482D and
# (tcp[((tcp[12]>>2)+4):2] = 0x312e or tcp[((tcp[12]>>2)+4):2] = 0x322e)
#
# - looking for pkts with 8k+4 (<=128) bytes of data (combined with ssh-len);
# only effective for ssh 1.x:
#
# (ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) & 0xFF87 = 4
#
# - looking for packets with <= 512 bytes of data that ends with a NUL
# (can be potentially combined with rlogin-sig or rlogin-sig-1byte):
#
# (tcp[(ip[2:2] - ((ip[0]&0x0f)<<2))-1] == 0) and
# ((ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) != 0) and
# ((ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) <= 512)
#
# - looking for telnet negotiation (can be combined with telnet-sig(-3byte)):
#
# (tcp[(tcp[12]>>2):2] > 0xfffa) and
# (tcp[(tcp[12]>>2):2] < 0xffff) and
# ((ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12] >> 2)) >= 3)
#
# - looking for packets with <= 20 bytes of data (combined with small-pkt):
#
# (ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) <= 20
#
# - looking for FTP servers by the initial "220-" or "220 " sent by the server:
#
# tcp[(tcp[12]>>2):4] = 0x3232302d or tcp[(tcp[12]>>2):4] = 0x32323020
#
# - looking for root backdoors by seeing a server payload of exactly "# ":
#
# tcp[(tcp[12]>>2):2] = 0x2320 and
# (ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) == 2
#
# - looking for Napster by the initial "GET" or "SEND" sent by the originator:
#
# ((ip[2:2]-((ip[0]&0x0f)<<2)-(tcp[12]>>2))=4 and
# tcp[(tcp[12]>>2):4]=0x53454e44) or
# ((ip[2:2]-((ip[0]&0x0f)<<2)-(tcp[12]>>2))=3 and
# tcp[(tcp[12]>>2):2]=0x4745 and tcp[(tcp[12]>>2)+2]=0x54)
#
# - looking for Gnutella handshaking "GNUTELLA "
#
# tcp[(tcp[12]>>2):4] = 0x474e5554 and
# tcp[(4+(tcp[12]>>2)):4] = 0x454c4c41 and
# tcp[8+(tcp[12]>>2)] = 0x20
#
# - looking for KaZaA via "GIVE " (not present in all connections)
#
# tcp[(tcp[12]>>2):4] = 0x47495645 and
# tcp[(4+(tcp[12]>>2)):1] = 0x20
#
@load site
@load port-name
@load demux
@load notice
redef enum Notice += { BackdoorFound, };
# Set to dump the packets that trigger the backdoor detector to a file.
const dump_backdoor_packets = F &redef;
redef backdoor_stat_period = 60 sec;
redef backdoor_stat_backoff = 2.0;
const ssh_min_num_pkts = 8 &redef;
const ssh_min_ssh_pkts_ratio = 0.6 &redef;
const backdoor_min_num_lines = 2 &redef;
const backdoor_min_normal_line_ratio = 0.5 &redef;
const backdoor_min_bytes = 10 &redef;
const backdoor_min_7bit_ascii_ratio = 0.75 &redef;
type rlogin_conn_info : record {
o_num_null: count;
o_len: count;
r_num_null: count;
r_len: count;
};
const backdoor_demux_disabled = T &redef;
const backdoor_demux_skip_tags: set[string] &redef;
const ftp_backdoor_sigs = "ftp-sig";
const ssh_backdoor_sigs = { "ssh-sig", "ssh-len-v1.x", "ssh-len-v2.x" };
const rlogin_backdoor_sigs = { "rlogin-sig", "rlogin-sig-1byte" };
const root_backdoor_sigs = "root-bd-sig";
const telnet_backdoor_sigs = { "telnet-sig", "telnet-sig-3byte" };
const napster_backdoor_sigs = "napster-sig";
const gnutella_backdoor_sigs = "gnutella-sig";
const kazaa_backdoor_sigs = "kazaa-sig";
const http_backdoor_sigs = "http-sig";
const http_proxy_backdoor_sigs = "http-proxy-sig";
const smtp_backdoor_sigs = "smtp-sig";
const irc_backdoor_sigs = "irc-sig";
const gaobot_backdoor_sigs = "gaobot-sig";
# List of backdoors, so you can use it when defining sets and tables
# with values over all of them.
const backdoor_sigs = {
ftp_backdoor_sigs, ssh_backdoor_sigs, rlogin_backdoor_sigs,
root_backdoor_sigs, telnet_backdoor_sigs,
napster_backdoor_sigs, gnutella_backdoor_sigs, kazaa_backdoor_sigs,
http_backdoor_sigs, http_proxy_backdoor_sigs,
smtp_backdoor_sigs, irc_backdoor_sigs, gaobot_backdoor_sigs,
};
# List of address-port pairs that if present in a backdoor are ignored.
# Note that these can be either the client and its source port (unusual)
# or the server and its service port (the common case).
const backdoor_ignore_host_port_pairs: set[addr, port] &redef;
const backdoor_ignore_ports: table[string, port] of bool = {
# The following ignore backdoors that are detected on their
# usual ports. The definitions for ftp-sig, telnet-sig and
# telnet-sig-3byte are somehwat broad since those backdoors
# are also frequently triggered for other similar protocols.
[ftp_backdoor_sigs, [ftp, smtp, 587/tcp ]] = T,
[ssh_backdoor_sigs, ssh] = T,
[rlogin_backdoor_sigs , [512/tcp, rlogin, 514/tcp]] = T,
[root_backdoor_sigs, [telnet, 512/tcp, rlogin, 514/tcp]] = T,
[telnet_backdoor_sigs, [telnet, ftp, smtp, 143/tcp, 110/tcp]] = T,
# The following don't have well-known ports (well, Napster does
# somewhat, as shown below), hence the definitions are F rather
# than T.
[napster_backdoor_sigs, [6688/tcp, 6699/tcp]] = F,
[gnutella_backdoor_sigs, 6346/tcp] = F,
[kazaa_backdoor_sigs, 1214/tcp] = F,
[http_backdoor_sigs, [http, 8000/tcp, 8080/tcp]] = T,
[smtp_backdoor_sigs, [smtp, 587/tcp]] = T,
# Skip FTP, as "USER foo" generates false positives. There's
# also a lot of IRC on 7000/tcp.
[irc_backdoor_sigs, [ftp, 6666/tcp, 6667/tcp, 7000/tcp]] = T,
# The following are examples of wildcards, and since they're defined
# to be F, they don't affect the policy unless redefined.
["*", http] = F, # entry for "any backdoor, service http"
["ssh-sig", 0/tcp] = F, # entry for "ssh-sig, any port"
} &redef &default = F;
# Indexed by the backdoor, indicates which backdoors residing on
# a local (remote) host should be ignored.
const backdoor_ignore_local: set[string] &redef;
const backdoor_ignore_remote: set[string] &redef;
# Indexed by the source (destination) address and the backdoor.
# Also indexed by the /24 and /16 versions of the source address.
# backdoor "*" means "all backdoors".
const backdoor_ignore_src_addrs: table[string, addr] of bool &redef &default=F;
const backdoor_ignore_dst_addrs: table[string, addr] of bool &redef &default=F;
const backdoor_standard_ports = {
telnet, rlogin, 512/tcp, 514/tcp, ftp, ssh, smtp, 143/tcp,
110/tcp, 6667/tcp,
} &redef;
const backdoor_annotate_standard_ports = T &redef;
const backdoor_ignore_hosts: set[addr] &redef;
const backdoor_ignore_src_nets: set[subnet] &redef;
const backdoor_ignore_dst_nets: set[subnet] &redef;
# Most backdoors are enabled by default, but a few are disabled by
# default (T below) because they generated too many false positives
# (or, for HTTP, too many uninteresting true positives).
const ftp_sig_disabled = F &redef;
const gaobot_sig_disabled = F &redef;
const gnutella_sig_disabled = F &redef;
const http_proxy_sig_disabled = T &redef;
const http_sig_disabled = T &redef;
const irc_sig_disabled = F &redef;
const kazaa_sig_disabled = F &redef;
const napster_sig_disabled = F &redef;
const rlogin_sig_1byte_disabled = T &redef;
const rlogin_sig_disabled = T &redef;
const root_backdoor_sig_disabled = T &redef;
const smtp_sig_disabled = F &redef;
# Note, for the following there's a corresponding variable
# interconn_ssh_len_disabled in interconn.bro.
const ssh_len_disabled = T &redef;
const ssh_sig_disabled = F &redef;
const telnet_sig_3byte_disabled = T &redef;
const telnet_sig_disabled = T &redef;
global ssh_len_conns: set[conn_id];
global rlogin_conns: table[conn_id] of rlogin_conn_info;
global root_backdoor_sig_conns: set[conn_id];
global did_sig_conns: table[conn_id] of set[string];
const BACKDOOR_UNKNOWN = 0;
const BACKDOOR_YES = 1;
const BACKDOOR_NO = 2;
const BACKDOOR_SIG_FOUND = 3;
global telnet_sig_conns: table[conn_id] of count;
global telnet_sig_3byte_conns: table[conn_id] of count;
global smtp_sig_conns: table[conn_id] of count;
global irc_sig_conns: table[conn_id] of count;
global gaobot_sig_conns: table[conn_id] of count;
const backdoor_log = open_log_file("backdoor") &redef;
function ignore_backdoor_conn(c: connection, bd: string): bool
{
local oa = c$id$orig_h;
local ra = c$id$resp_h;
local op = c$id$orig_p;
local rp = c$id$resp_p;
if ( backdoor_ignore_ports[bd, op] ||
backdoor_ignore_ports[bd, rp] ||
# Check port wildcards.
backdoor_ignore_ports[bd, 0/tcp] ||
(ra in local_nets && bd in backdoor_ignore_local) ||
(ra !in local_nets && bd in backdoor_ignore_remote) ||
backdoor_ignore_src_addrs[bd, oa] ||
backdoor_ignore_src_addrs[bd, mask_addr(oa, 16)] ||
backdoor_ignore_src_addrs[bd, mask_addr(oa, 24)] ||
backdoor_ignore_dst_addrs[bd, ra] ||
backdoor_ignore_dst_addrs[bd, mask_addr(ra, 16)] ||
backdoor_ignore_dst_addrs[bd, mask_addr(ra, 24)] )
return T;
if ( [oa, op] in backdoor_ignore_host_port_pairs ||
[ra, rp] in backdoor_ignore_host_port_pairs )
return T;
if ( bd != "*" )
# Evaluate again, but for wildcarding the backdoor.
return ignore_backdoor_conn(c, "*");
else
return F;
}
function log_backdoor(c: connection, tag: string): bool
{
if ( ignore_backdoor_conn(c, tag) )
return F;
local id = c$id;
if ( backdoor_annotate_standard_ports &&
(id$orig_p in backdoor_standard_ports ||
id$resp_p in backdoor_standard_ports) )
append_addl(c, fmt("[%s]", tag));
else if ( id$orig_h in backdoor_ignore_hosts ||
id$resp_h in backdoor_ignore_hosts ||
id$orig_h in backdoor_ignore_src_nets ||
id$resp_h in backdoor_ignore_dst_nets )
return F;
else
{
print backdoor_log, fmt("%.6f %s > %s %s",
c$start_time,
endpoint_id(id$orig_h, id$orig_p),
endpoint_id(id$resp_h, id$resp_p),
tag);
NOTICE([$note=BackdoorFound, $msg=tag, $conn=c]);
if ( dump_backdoor_packets )
{
mkdir("backdoor-packets");
local fname = fmt("backdoor-packets/%s:%.2f",
tag, current_time());
dump_current_packet(fname);
}
if ( backdoor_demux_disabled ||
tag in backdoor_demux_skip_tags )
{
if ( active_connection(c$id) )
skip_further_processing(c$id);
}
else
demux_conn(id, tag, "orig", "resp");
}
return T;
}
event new_connection(c: connection)
{
local id = c$id;
if ( ! rlogin_sig_disabled || ! rlogin_sig_1byte_disabled )
{
local i: rlogin_conn_info;
i$o_num_null = i$o_len = i$r_num_null = i$r_len = 0;
rlogin_conns[id] = i;
}
}
event backdoor_remove_conn(c: connection)
{
local id = c$id;
delete ssh_len_conns[id];
delete telnet_sig_conns[id];
delete telnet_sig_3byte_conns[id];
delete rlogin_conns[id];
delete root_backdoor_sig_conns[id];
delete smtp_sig_conns[id];
delete irc_sig_conns[id];
delete gaobot_sig_conns[id];
delete did_sig_conns[id];
}
event root_backdoor_signature_found(c: connection)
{
if ( root_backdoor_sig_disabled ||
ignore_backdoor_conn(c, "root-bd-sig") )
return;
local id = c$id;
# For root backdoors, don't ignore standard ports. This is because
# we shouldn't see such a backdoor even 23/tcp or 513/tcp!
if ( id !in root_backdoor_sig_conns )
{
add root_backdoor_sig_conns[id];
log_backdoor(c, "root-bd-sig");
}
}
function signature_found(c: connection, sig_disabled: bool, sig_name: string)
{
if ( sig_disabled )
return;
if ( ignore_backdoor_conn(c, sig_name) )
return;
if ( c$id !in did_sig_conns )
did_sig_conns[c$id] = set();
if ( sig_name !in did_sig_conns[c$id] )
{
add did_sig_conns[c$id][sig_name];
log_backdoor(c, sig_name);
}
}
event ftp_signature_found(c: connection)
{
signature_found(c, ftp_sig_disabled, "ftp-sig");
}
event napster_signature_found(c: connection)
{
signature_found(c, napster_sig_disabled, "napster-sig");
}
event gnutella_signature_found(c: connection)
{
signature_found(c, gnutella_sig_disabled, "gnutella-sig");
}
event kazaa_signature_found(c: connection)
{
signature_found(c, kazaa_sig_disabled, "kazaa-sig");
}
event http_signature_found(c: connection)
{
signature_found(c, http_sig_disabled, "http-sig");
}
event http_proxy_signature_found(c: connection)
{
signature_found(c, http_proxy_sig_disabled, "http-proxy-sig");
}
event ssh_signature_found(c: connection, is_orig: bool)
{
signature_found(c, ssh_sig_disabled, "ssh-sig");
}
event smtp_signature_found(c: connection)
{
signature_found(c, smtp_sig_disabled, "smtp-sig");
}
event irc_signature_found(c: connection)
{
signature_found(c, irc_sig_disabled, "irc-sig");
}
event gaobot_signature_found(c: connection)
{
signature_found(c, gaobot_sig_disabled, "gaobot-sig");
}
event telnet_signature_found(c: connection, is_orig: bool, len: count)
{
local id = c$id;
if ( ignore_backdoor_conn(c, "telnet-sig") )
return;
if ( ! telnet_sig_disabled && id !in telnet_sig_conns )
telnet_sig_conns[id] = BACKDOOR_SIG_FOUND;
if ( ! telnet_sig_3byte_disabled && len == 3 &&
id !in telnet_sig_3byte_conns )
telnet_sig_3byte_conns[id] = BACKDOOR_SIG_FOUND;
}
event rlogin_signature_found(c: connection, is_orig: bool,
num_null: count, len: count)
{
local id = c$id;
if ( (rlogin_sig_disabled && rlogin_sig_1byte_disabled) ||
ignore_backdoor_conn(c, "rlogin-sig") )
return;
local ri = rlogin_conns[id];
if ( is_orig && ri$o_num_null == 0 )
ri$o_num_null = num_null;
else if ( ! is_orig && ri$r_num_null == 0 )
{
ri$r_num_null = num_null;
ri$r_len = len;
}
else
return;
if ( ri$o_num_null == 0 || ri$r_num_null == 0 )
return;
if ( ! rlogin_sig_1byte_disabled && ri$r_len == 1 )
log_backdoor(c, "rlogin-sig-1byte");
if ( ! rlogin_sig_disabled )
log_backdoor(c, "rlogin-sig");
}
function ssh_len_stats(c: connection, os: backdoor_endp_stats,
rs: backdoor_endp_stats) : bool
{
if ( ssh_len_disabled || c$id in ssh_len_conns )
return F;
if ( os$num_pkts == 0 || rs$num_pkts == 0 )
return F;
# xxx: only use ssh-len for partial connection
local is_partial = os$is_partial || rs$is_partial;
if ( ! is_partial )
return F;
local num_pkts = os$num_pkts + rs$num_pkts;
if ( num_pkts < ssh_min_num_pkts )
return F;
local num_8k0_pkts = os$num_8k0_pkts + rs$num_8k0_pkts;
local num_8k4_pkts = os$num_8k4_pkts + rs$num_8k4_pkts;
local id = c$id;
if ( num_8k0_pkts >= num_pkts * ssh_min_ssh_pkts_ratio )
{
add ssh_len_conns[id];
log_backdoor(c, "ssh-len-v2.x");
}
else if ( num_8k4_pkts >= num_pkts * ssh_min_ssh_pkts_ratio )
{
add ssh_len_conns[id];
log_backdoor(c, "ssh-len-v1.x");
}
return T;
}
function telnet_stats(c: connection, os: backdoor_endp_stats,
rs: backdoor_endp_stats) : bool
{
local num_lines = os$num_lines + rs$num_lines;
local num_normal_lines = os$num_normal_lines + rs$num_normal_lines;
if ( num_lines < backdoor_min_num_lines ||
num_normal_lines < num_lines * backdoor_min_normal_line_ratio )
return F;
local num_bytes = os$num_bytes + rs$num_bytes;
local num_7bit_ascii = os$num_7bit_ascii + rs$num_7bit_ascii;
if ( num_bytes < backdoor_min_bytes ||
num_7bit_ascii < num_bytes * backdoor_min_7bit_ascii_ratio )
return F;
local id = c$id;
if ( id in telnet_sig_conns &&
telnet_sig_conns[id] != BACKDOOR_YES )
{
telnet_sig_conns[id] = BACKDOOR_YES;
log_backdoor(c, "telnet-sig");
}
if ( id in telnet_sig_3byte_conns &&
telnet_sig_3byte_conns[id] != BACKDOOR_YES )
{
telnet_sig_3byte_conns[id] = BACKDOOR_YES;
log_backdoor(c, "telnet-sig-3byte");
}
return T;
}
event backdoor_stats(c: connection,
os: backdoor_endp_stats, rs: backdoor_endp_stats)
{
telnet_stats(c, os, rs);
ssh_len_stats(c, os, rs);
}

View file

@ -1,277 +0,0 @@
# $Id:$
#
# bittorrent.bro - policy script for analyzing BitTorrent traffic
# ---------------------------------------------------------------
# This code contributed by Nadi Sarrar.
@load dpd
@load weird
module BitTorrent;
export {
# Whether to log the length of PDUs.
global log_pdu_length = T &redef;
}
redef capture_filters += { ["bittorrent"] = "tcp" };
type bt_peer_state: enum {
choked, # peer won't receive any responses to requests (initial state)
unchoked # peer may do requests
};
type bt_peer_info: record {
# Total of pure peer wire protocol overhead data (w/o pieces).
protocol_total: count &default = 0;
# State of the peer - choked or unchoked.
state: bt_peer_state &default = choked;
# Total number of seconds the peer was unchoked.
unchoked: interval &default = 0 secs;
# Time of the last received unchoke message.
time_last_unchoked: time;
};
type bt_peer_conn: record {
id: count;
orig: bt_peer_info;
resp: bt_peer_info;
weird: bool &default = F;
};
global bittorrent_log = open_log_file("bittorrent") &redef;
global bt_peer_conns : table[conn_id] of bt_peer_conn;
global peer_conn_count = 0;
function record_peer_protocol_traffic(c: connection, is_orig: bool,
protocol_len: count): count
{
if ( c$id in bt_peer_conns )
{
local pc = bt_peer_conns[c$id];
if ( is_orig )
pc$orig$protocol_total += protocol_len;
else
pc$resp$protocol_total += protocol_len;
return pc$id;
}
return 0;
}
function record_choke(pi: bt_peer_info, now: time)
{
if ( pi$state == unchoked )
{
pi$state = choked;
pi$unchoked += now - pi$time_last_unchoked;
}
}
function record_unchoke(pi: bt_peer_info, now: time)
{
if ( pi$state == choked )
{
pi$state = unchoked;
pi$time_last_unchoked = now;
}
}
function lookup_bt_peer(id: conn_id): bt_peer_conn
{
if ( id in bt_peer_conns )
return bt_peer_conns[id];
local orig: bt_peer_info;
local resp: bt_peer_info;
local pc: bt_peer_conn;
pc$orig = orig;
pc$resp = resp;
pc$id = ++peer_conn_count;
bt_peer_conns[id] = pc;
return pc;
}
function bt_log_id(id: conn_id, cid: count, tag: string, is_orig: bool): string
{
return fmt("%.6f P%d %s %s:%d %s %s:%d",
network_time(), cid, tag, id$orig_h, id$orig_p,
is_orig ? ">" : "<", id$resp_h, id$resp_p);
}
function pdu_log_len(len: count): string
{
return log_pdu_length ? fmt("[PDU-len:%d]", len) : "";
}
function log_pdu(c: connection, is_orig: bool, tag: string, len: count): count
{
local cid = record_peer_protocol_traffic(c, is_orig, len);
print bittorrent_log,
fmt("%s %s", bt_log_id(c$id, cid, tag, is_orig),
pdu_log_len(len));
return cid;
}
function log_pdu_str(c: connection, is_orig: bool, tag: string, len: count,
str: string)
{
local cid = record_peer_protocol_traffic(c, is_orig, len);
print bittorrent_log,
fmt("%s %s %s", bt_log_id(c$id, cid, tag, is_orig),
pdu_log_len(len), str);
}
function log_pdu_str_n(c: connection, is_orig: bool, tag: string, len: count,
n: count, str: string)
{
local cid = record_peer_protocol_traffic(c, is_orig, len);
print bittorrent_log,
fmt("%s %s %s", bt_log_id(c$id, cid, tag, is_orig),
pdu_log_len(n), str);
}
event bittorrent_peer_handshake(c: connection, is_orig: bool, reserved: string,
info_hash: string, peer_id: string)
{
local pc = lookup_bt_peer(c$id);
log_pdu_str(c, is_orig, "handshake", 68,
fmt("[peer_id:%s info_hash:%s reserved:%s]",
bytestring_to_hexstr(peer_id),
bytestring_to_hexstr(info_hash),
bytestring_to_hexstr(reserved)));
}
event bittorrent_peer_keep_alive(c: connection, is_orig: bool)
{
log_pdu(c, is_orig, "keep-alive", 4);
}
event bittorrent_peer_choke(c: connection, is_orig: bool)
{
local cid = log_pdu(c, is_orig, "choke", 5);
if ( cid > 0 )
{
local pc = bt_peer_conns[c$id];
record_choke(is_orig ? pc$resp : pc$orig, network_time());
}
}
event bittorrent_peer_unchoke(c: connection, is_orig: bool)
{
local cid = log_pdu(c, is_orig, "unchoke", 5);
if ( cid > 0 )
{
local pc = bt_peer_conns[c$id];
record_unchoke(is_orig ? pc$resp : pc$orig, network_time());
}
}
event bittorrent_peer_interested(c: connection, is_orig: bool)
{
log_pdu(c, is_orig, "interested", 5);
}
event bittorrent_peer_not_interested(c: connection, is_orig: bool)
{
log_pdu(c, is_orig, "not-interested", 5);
}
event bittorrent_peer_have(c: connection, is_orig: bool, piece_index: count)
{
log_pdu(c, is_orig, "have", 9);
}
event bittorrent_peer_bitfield(c: connection, is_orig: bool, bitfield: string)
{
log_pdu_str(c, is_orig, "bitfield", 5 + byte_len(bitfield),
fmt("[bitfield:%s]",
bytestring_to_hexstr(bitfield)));
}
event bittorrent_peer_request(c: connection, is_orig: bool, index: count,
begin: count, length: count)
{
log_pdu_str(c, is_orig, "request", 17,
fmt("[index:%d begin:%d length:%d]", index, begin, length));
}
event bittorrent_peer_piece(c: connection, is_orig: bool, index: count,
begin: count, piece_length: count)
{
log_pdu_str_n(c, is_orig, "piece", 13, 13 + piece_length,
fmt("[index:%d begin:%d piece_length:%d]",
index, begin, piece_length));
}
event bittorrent_peer_cancel(c: connection, is_orig: bool, index: count,
begin: count, length: count)
{
log_pdu_str(c, is_orig, "cancel", 7,
fmt("[index:%d begin:%d length:%d]",
index, begin, length));
}
event bittorrent_peer_port(c: connection, is_orig: bool, listen_port: port)
{
log_pdu_str(c, is_orig, "port", 5,
fmt("[listen_port:%s]", listen_port));
}
event bittorrent_peer_unknown(c: connection, is_orig: bool, message_id: count,
data: string)
{
log_pdu_str(c, is_orig, "<unknown>", 5 + byte_len(data),
fmt("[message_id:%d]", message_id));
}
event bittorrent_peer_weird(c: connection, is_orig: bool, msg: string)
{
local pc = lookup_bt_peer(c$id);
pc$weird = T;
print bittorrent_log,
fmt("%s [%s]", bt_log_id(c$id, pc$id, "<weird>", is_orig), msg);
event conn_weird(msg, c);
}
function log_close(c: connection, pc: bt_peer_conn, is_orig: bool)
{
local endp = is_orig ? c$orig : c$resp;
local peer_i = is_orig ? pc$orig : pc$resp;
local status =
pc$weird ?
fmt("size:%d", endp$size) :
fmt("unchoked:%.06f size_protocol:%d size_pieces:%d",
peer_i$unchoked, peer_i$protocol_total,
endp$size - peer_i$protocol_total);
print bittorrent_log,
fmt("%s [duration:%.06f %s]",
bt_log_id(c$id, pc$id, "<closed>", is_orig),
c$duration, status);
}
event connection_state_remove(c: connection)
{
if ( c$id !in bt_peer_conns )
return;
local pc = bt_peer_conns[c$id];
delete bt_peer_conns[c$id];
record_choke(pc$orig, c$start_time + c$duration);
record_choke(pc$resp, c$start_time + c$duration);
log_close(c, pc, T);
log_close(c, pc, F);
}

View file

@ -1,52 +0,0 @@
# $Id: blaster.bro 5952 2008-07-13 19:45:15Z vern $
#
# Identifies W32.Blaster-infected hosts by observing their scanning
# activity.
@load notice
@load site
# Which hosts have scanned which addresses via 135/tcp.
global w32b_scanned: table[addr] of set[addr] &write_expire = 5min;
global w32b_reported: set[addr] &persistent;
const W32B_port = 135/tcp;
const W32B_MIN_ATTEMPTS = 50 &redef;
redef enum Notice += {
W32B_SourceLocal,
W32B_SourceRemote,
};
event connection_attempt(c: connection)
{
if ( c$id$resp_p != W32B_port )
return;
local ip = c$id$orig_h;
if ( ip in w32b_reported )
return;
if ( ip in w32b_scanned )
{
add (w32b_scanned[ip])[c$id$resp_h];
if ( length(w32b_scanned[ip]) >= W32B_MIN_ATTEMPTS )
{
if ( is_local_addr(ip) )
NOTICE([$note=W32B_SourceLocal, $conn=c,
$msg=fmt("W32.Blaster local source: %s",
ip)]);
else
NOTICE([$note=W32B_SourceRemote, $conn=c,
$msg=fmt("W32.Blaster remote source: %s",
ip)]);
add w32b_reported[ip];
}
}
else
w32b_scanned[ip] = set(ip) &mergeable;
}

View file

@ -1,55 +0,0 @@
# $Id: brolite-backdoor.bro 2956 2006-05-14 01:08:34Z vern $
# Sample file for running backdoor detector
#
# Note, this can consume significant processing resources when running
# on live traffic.
#
# To run bro with this script using a Bro Lite setup:
#
# rename this script to hostname.bro
# run: $BROHOME/etc/bro.rc start
# or bro -i interface brolite-backdoor.bro
@load site
@load backdoor
@load weird
# By default, do backdoor detection on everything except standard HTTP
# and SMTP ports.
redef capture_filters += [ ["tcp"] = "tcp" ];
redef restrict_filters +=
[ ["not-http"] = "not (port 80 or port 8000 or port 8080)" ];
redef restrict_filters += [ ["not-smtp"] = "not (port 25 or port 587)" ];
redef use_tagging = T;
# Set if you want to dump packets that trigger the detections.
redef dump_backdoor_packets = T;
# Disable (set to T) if you don't care about this traffic.
# redef gnutella_sig_disabled = T;
# redef kazaa_sig_disabled = T;
redef napster_sig_disabled = T; # too many false positives
# Ignore outgoing, only report incoming backdoors.
redef backdoor_ignore_remote += {
ftp_backdoor_sigs, ssh_backdoor_sigs, rlogin_backdoor_sigs,
http_backdoor_sigs, http_proxy_backdoor_sigs, smtp_backdoor_sigs,
};
# Set these to send mail on backdoor alarms.
# redef mail_dest = "youremail@yourhost.dom";
# redef notice_action_filters += {
# [BackdoorFound] = send_email_notice,
#};
# Tuning: use more aggressive timeouts to reduce CPU and memory, as these
# have little effect on backdoor analysis.
redef tcp_SYN_timeout = 1 sec;
redef tcp_attempt_delay = 1 sec;
redef tcp_inactivity_timeout = 1 min;
redef udp_inactivity_timeout = 5 secs;
redef icmp_inactivity_timeout = 5 secs;

View file

@ -1,82 +0,0 @@
# $Id: brolite-sigs.bro 3856 2006-12-02 00:18:57Z vern $
# Bro Lite signature configuration file
# General policy - these scripts are more infrastructural than service
# oriented, so in general avoid changing anything here.
# Set global constant. This can be used in ifdef statements to determine
# if signatures are enabled.
const use_signatures = T;
@load snort # basic definitions for signatures
@load signatures # the signature policy engine
@load sig-functions # addl. functions added for signature accuracy
@load sig-action # actions related to particular signatures
# Flag HTTP worm sources such as Code Red.
@load worm
# Do worm processing
redef notice_action_filters += { [RemoteWorm] = file_notice };
# Ports that need to be captured for signatures to see a useful
# cross section of traffic.
redef capture_filters += {
["sig-http"] =
"tcp port 80 or tcp port 8080 or tcp port 8000 or tcp port 8001",
["sig-ftp"] = "port ftp",
["sig-telnet"] = "port telnet",
["sig-portmapper"] = "port 111",
["sig-smtp"] = "port smtp",
["sig-imap"] = "port 143",
["sig-snmp"] = "port 161 or port 162",
["sig-dns"] = "port 53",
# rsh/rlogin/rexec
["sig-rfoo"] = "port 512 or port 513 or port 515",
# Range of TCP ports for general RPC traffic. This can also
# occur on other ports, but these should catch a lot without
# a major performance hit. We skip ports assosciated with
# HTTP, SSH and M$.
["sig-rpc"] = "tcp[2:2] > 32770 and tcp[2:2] < 32901 and tcp[0:2] != 80 and tcp[0:2] != 22 and tcp[0:2] != 139",
};
### Why is this called "tcp3"?
# Catch outbound M$ scanning. Returns filter listing local addresses
# along with the interesting ports.
function create_tcp3_filter(): string
{
local local_addrs = "";
local firsttime = T;
for ( l in local_nets )
{
if ( firsttime )
{
local_addrs = fmt("src net %s", l);
firsttime = F;
}
else
local_addrs = fmt("%s or src net %s", local_addrs, l);
}
local MS_scan_ports =
"dst port 135 or dst port 137 or dst port 139 or dst port 445";
if ( local_addrs == "" )
return MS_scan_ports;
else
return fmt("(%s) and (%s)", local_addrs, MS_scan_ports);
}
# Create and apply the filter.
redef capture_filters += { ["tcp3"] = create_tcp3_filter()};
# Turn on ICMP analysis.
redef capture_filters += { ["icmp"] = "icmp"};
# Load the addendum signatures. These are utility signatures that do not
# produce event messages.
redef signature_files += "sig-addendum";

View file

@ -1,195 +0,0 @@
# Bro Lite base configuration file.
# General policy - these scripts are more infrastructural than service
# oriented, so in general avoid changing anything here.
@load site # defines local and neighbor networks from static config
@load tcp # initialize BPF filter for SYN/FIN/RST TCP packets
@load weird # initialize generic mechanism for unusual events
@load conn # access and record connection events
@load hot # defines certain forms of sensitive access
@load frag # process TCP fragments
@load print-resources # on exit, print resource usage information
# Scan detection policy.
@load scan # generic scan detection mechanism
@load trw # additional, more sensitive scan detection
#@load drop # include if installation has ability to drop hostile remotes
# Application level policy - these scripts operate on the specific service.
@load http # general http analyzer, low level of detail
@load http-request # detailed analysis of http requests
@load http-reply # detailed analysis of http reply's
# Track software versions; required for some signature matching. Also
# can be used by http and ftp policies.
@load software
@load ftp # FTP analysis
@load portmapper # record and analyze RPC portmapper requests
@load tftp # identify and log TFTP sessions
@load login # rlogin/telnet analyzer
@load irc # IRC analyzer
@load blaster # blaster worm detection
@load stepping # "stepping stone" detection
@load synflood # synflood attacks detection
@load smtp # record and analyze email traffic - somewhat expensive
@load notice-policy # tuning of notices to downgrade some alarms
# off by default
#@load icmp # icmp analysis
# Tuning of memory consumption.
@load inactivity # time out connections for certain services more quickly
# @load print-globals # on exit, print the size of global script variables
# Record system statistics to the notice file
@load stats
# udp analysis - potentially expensive, depending on a site's traffic profile
#@load udp.all
#@load remove-multicast
# Prints the pcap filter and immediately exits. Not used during
# normal operation.
#@load print-filter
## End policy script loading.
## General configuration.
@load rotate-logs
redef log_rotate_base_time = "0:00";
redef log_rotate_interval = 24 hr;
# Set additional policy prefixes.
@prefixes += lite
## End basic configuration.
## Scan configuration.
@ifdef ( Scan::analyze_all_services )
redef Scan::analyze_all_services = T;
# The following turns off scan detection.
#redef Scan::suppress_scan_checks = T;
# Be a bit more aggressive than default (though the defaults
# themselves should be fixed).
redef Scan::report_outbound_peer_scan = { 100, 1000, };
# These services are skipped for scan detection due to excessive
# background noise.
redef Scan::skip_services += {
http, # Avoid Code Red etc. overload
27374/tcp, # Massive scanning in Jan 2002
1214/tcp, # KaZaa scans
12345/tcp, # Massive scanning in Apr 2002
445/tcp, # Massive distributed scanning Oct 2002
135/tcp, # These days, NetBIOS scanning is endemic
137/udp, # NetBIOS
139/tcp, # NetBIOS
1025/tcp,
6129/tcp, # Dameware
3127/tcp, # MyDoom worms worms worms!
2745/tcp, # Bagel worm
1433/tcp, # Distributed scanning, April 2004
5000/tcp, # Distributed scanning, May 2004
5554/tcp, # More worm food, May 2004
9898/tcp, # Worms attacking worms. ugh - May 2004
3410/tcp, # More worm food, June 2004
3140/tcp, # Dyslexic worm food, June 2004
27347/tcp, # Can't kids type anymore?
1023/tcp, # Massive scanning, July 2004
17300/tcp, # Massive scanning, July 2004
};
@endif
@ifdef ( ICMP::detect_scans )
# Whether to detect ICMP scans.
redef ICMP::detect_scans = F;
redef ICMP::scan_threshold = 100;
@endif
@ifdef ( TRW::TRWAddressScan )
# remove logging TRW scan events
redef notice_action_filters += {
[TRW::TRWAddressScan] = ignore_notice,
};
@endif
# Note: default scan configuration is conservative in terms of memory use and
# might miss slow scans. Consider uncommenting these based on your sites scan
# traffic.
#redef distinct_peers &create_expire = 30 mins;
#redef distinct_ports &create_expire = 30 mins;
#redef distinct_low_ports &create_expire= 30 mins;
## End scan configuration.
## additional IRC checks
redef IRC::hot_words += /.*exe/ ;
## Dynamic Protocol Detection configuration
#
# This is off by default, as it requires a more powerful Bro host.
# Uncomment next line to activate.
# const use_dpd = T;
@ifdef ( use_dpd )
@load dpd
@load irc-bot
@load dyn-disable
@load detect-protocols
@load detect-protocols-http
@load proxy
@load ssh
# By default, DPD looks at all traffic except port 80.
# For lightly loaded networks, comment out the restrict_filters line.
# For heavily loaded networks, try adding addition ports (e.g., 25) to
# the restrict filters.
redef capture_filters += [ ["tcp"] = "tcp" ];
redef restrict_filters += [ ["not-http"] = "not (port 80)" ];
@endif
@ifdef ( ProtocolDetector::ServerFound )
# Report servers on non-standard ports only for local addresses.
redef notice_policy += {
[$pred(a: notice_info) =
{ return a$note == ProtocolDetector::ServerFound &&
! is_local_addr(a$src); },
$result = NOTICE_FILE,
$priority = 1],
# Report protocols on non-standard ports only for local addresses
# (unless it's IRC).
[$pred(a: notice_info) =
{ return a$note == ProtocolDetector::ProtocolFound &&
! is_local_addr(a$dst) &&
a$sub != "IRC"; },
$result = NOTICE_FILE,
$priority = 1],
};
@endif
# The following is used to transfer state between Bro's when one
# takes over from another.
#
# NOTE: not implemented in the production version, so ignored for now.
@ifdef ( remote_peers_clear )
redef remote_peers_clear += {
[127.0.0.1, 55555/tcp] = [$hand_over = T],
[127.0.0.1, 0/tcp] = [$hand_over = T]
};
@endif
# Use tagged log files for notices.
redef use_tagging = T;

View file

@ -1,190 +0,0 @@
# $Id:$
#
# bt-tracker.bro - analysis of BitTorrent tracker traffic
# ------------------------------------------------------------------------------
# This code contributed by Nadi Sarrar.
@load dpd
@load weird
module BitTorrent;
export {
# Whether to log tracker URIs.
global log_tracker_request_uri = F &redef;
}
redef capture_filters += { ["bittorrent"] = "tcp", };
global bt_tracker_log = open_log_file("bt-tracker") &redef;
global bt_tracker_conns: table[conn_id] of count;
global tracker_conn_count: count = 0;
function bt_log_tag(id: conn_id, cid: count, tag: string, is_orig: bool): string
{
return fmt("%.6f T%d %s %s:%d %s %s:%d",
network_time(), cid, tag, id$orig_h, id$orig_p,
is_orig ? ">" : "<", id$resp_h, id$resp_p);
}
event bt_tracker_request(c: connection, uri: string,
headers: bt_tracker_headers)
{
# Parse and validate URI.
local pair = split1(uri, /\?/);
local keys = split(pair[2], /&/);
local info_hash = "";
local peer_ide = "";
local peer_port = 0/udp;
local uploaded = -1;
local downloaded = -1;
local left = -1;
local compact = T;
local peer_event = "empty";
for ( idx in keys )
{
local keyval = split1(keys[idx], /=/);
if ( length(keyval) != 2 )
next;
local key = to_lower(keyval[1]);
local val = keyval[2];
if ( key == "info_hash" )
info_hash = unescape_URI(val);
else if ( key == "peer_id" )
peer_ide = unescape_URI(val);
else if ( key == "port" )
peer_port = to_port(to_count(val), tcp);
else if ( key == "uploaded" )
uploaded = to_int(val);
else if ( key == "downloaded" )
downloaded = to_int(val);
else if ( key == "left" )
left = to_int(val);
else if ( key == "compact" )
compact = (to_int(val) == 1);
else if ( key == "event" )
{
val = to_lower(val);
if ( val == /started|stopped|completed/ )
peer_event = val;
}
}
if ( info_hash == "" || peer_ide == "" || peer_port == 0/udp )
{ # Does not look like BitTorrent.
disable_analyzer(c$id, current_analyzer());
delete bt_tracker_conns[c$id];
return;
}
if ( peer_port != 0/tcp )
expect_connection(to_addr("0.0.0.0"), c$id$orig_h,
peer_port, ANALYZER_BITTORRENT, 1 min);
local id: count;
if ( c$id in bt_tracker_conns )
id = bt_tracker_conns[c$id];
else
{
id = ++tracker_conn_count;
bt_tracker_conns[c$id] = id;
}
print bt_tracker_log,
fmt("%s [peer_id:%s info_hash:%s port:%s event:%s up:%d down:%d left:%d compact:%s]%s",
bt_log_tag(c$id, id, "request", T),
bytestring_to_hexstr(peer_ide),
bytestring_to_hexstr(info_hash),
peer_port, peer_event,
uploaded, downloaded, left,
compact ? "yes" : "no",
log_tracker_request_uri ? fmt(" GET %s", uri) : "");
}
function benc_status(benc: bittorrent_benc_dir, tag: string): string
{
if ( tag !in benc || ! benc[tag]?$i )
return "";
local fmt_tag = sub(tag, / /, "_");
return fmt("%s:%d", fmt_tag, benc[tag]$i);
}
event bt_tracker_response(c: connection, status: count,
headers: bt_tracker_headers,
peers: bittorrent_peer_set,
benc: bittorrent_benc_dir)
{
if ( c$id !in bt_tracker_conns )
return;
local id = bt_tracker_conns[c$id];
for ( peer in peers )
expect_connection(c$id$orig_h, peer$h, peer$p,
ANALYZER_BITTORRENT, 1 min);
if ( "failure reason" in benc )
{
print bt_tracker_log,
fmt("%s [failure_reason:\"%s\"]",
bt_log_tag(c$id, id, "response", F),
benc["failure reason"]?$s ?
benc["failure reason"]$s : "");
return;
}
print bt_tracker_log,
fmt("%s [%s%s%s%s%speers:%d]",
bt_log_tag(c$id, id, "response", F),
benc_status(benc, "warning message"),
benc_status(benc, "complete"),
benc_status(benc, "incomplete"),
benc_status(benc, "interval"),
benc_status(benc, "min interval"),
length(peers));
}
event bt_tracker_response_not_ok(c: connection, status: count,
headers: bt_tracker_headers)
{
if ( c$id in bt_tracker_conns )
{
local id = bt_tracker_conns[c$id];
print bt_tracker_log,
fmt("%s [status:%d]",
bt_log_tag(c$id, id, "response", F), status);
}
}
event bt_tracker_weird(c: connection, is_orig: bool, msg: string)
{
local id = (c$id in bt_tracker_conns) ? bt_tracker_conns[c$id] : 0;
print bt_tracker_log,
fmt("%s [%s]", bt_log_tag(c$id, id, "<weird>", is_orig), msg);
event conn_weird(msg, c);
}
event connection_state_remove(c: connection)
{
if ( c$id !in bt_tracker_conns )
return;
local id = bt_tracker_conns[c$id];
delete bt_tracker_conns[c$id];
print bt_tracker_log,
fmt("%s [duration:%.06f total:%d]",
# Ideally the direction here wouldn't be T or F
# but both, displayed as "<>".
bt_log_tag(c$id, id, "<closed>", T), c$duration,
c$orig$size + c$resp$size);
}

View file

@ -1,9 +0,0 @@
#! $Id: capture-events.bro 4674 2007-07-30 22:00:43Z vern $
#
# Captures all events to events.bst.
#
event bro_init()
{
capture_events("events.bst");
}

View file

@ -1,74 +0,0 @@
# $Id:$
# Logs evidence regarding the degree to which the packet capture process
# suffers from measurment loss.
#
# By default, only reports loss computed in terms of number of "gap events"
# (ACKs for a sequence number that's above a gap). You can also get an
# estimate in terms of number of bytes missing; this however is sometimes
# heavily affected by miscomputations due to broken packets with incorrect
# sequence numbers. (These packets also affect the first estimator, but
# only to a quite minor degree.)
@load notice
module CaptureLoss;
export {
redef enum Notice += {
CaptureLossReport, # interval report
CaptureLossSummary, # end-of-run summary
};
# Whether to also report byte-weighted estimates.
global report_byte_based_estimates = F &redef;
# Whether to generate per-interval reports even if there
# was no evidence of loss.
global report_if_none = F &redef;
# Whether to generate a summary even if there was no
# evidence of loss.
global summary_if_none = F &redef;
}
# Redefine this to be non-zero to get per-interval reports.
redef gap_report_freq = 0 sec;
event gap_report(dt: interval, info: gap_info)
{
if ( info$gap_events > 0 || report_if_none )
{
local msg = report_byte_based_estimates ?
fmt("gap-dt=%.6f acks=%d bytes=%d gaps=%d gap-bytes=%d",
dt, info$ack_events, info$ack_bytes,
info$gap_events, info$gap_bytes) :
fmt("gap-dt=%.6f acks=%d gaps=%d",
dt, info$ack_events, info$gap_events);
NOTICE([$note=CaptureLossReport, $msg=msg]);
}
}
event bro_done()
{
local g = get_gap_summary();
local gap_rate =
g$ack_events == 0 ? 0.0 :
(1.0 * g$gap_events) / (1.0 * g$ack_events);
local gap_bytes =
g$ack_bytes == 0 ? 0.0 :
(1.0 * g$gap_bytes) / (1.0 * g$ack_bytes);
if ( gap_rate == 0.0 && gap_bytes == 0.0 && ! summary_if_none )
return;
local msg = report_byte_based_estimates ?
fmt("estimated rate = %g / %g (events/bytes)",
gap_rate, gap_bytes) :
fmt("estimated rate = %g", gap_rate);
NOTICE([$note=CaptureLossSummary, $msg=msg]);
}

View file

@ -1,9 +0,0 @@
#! $Id: capture-events.bro 6 2004-04-30 00:31:26Z jason $
#
# Captures all operations on &synchronized variables to state-updates.bst.
#
event bro_init()
{
capture_state_updates("state-updates.bst");
}

View file

@ -1,54 +0,0 @@
# $Id: checkpoint.bro 6724 2009-06-07 09:23:03Z vern $
#
# Checkpoints Bro's persistent state at regular intervals and scans
# the state directory for external updates.
const state_rescan_interval = 15 secs &redef;
const state_checkpoint_interval = 15 min &redef;
# Services for which the internal connection state is stored.
const persistent_services = {
21/tcp, # ftp
22/tcp, # ssh
23/tcp, # telnet
513/tcp, # rlogin
} &redef;
# The first timer fires immediately. This flags lets us ignore it.
global state_ignore_first = T;
event state_checkpoint()
{
if ( state_ignore_first )
state_ignore_first = F;
else if ( ! bro_is_terminating() )
checkpoint_state();
if ( state_checkpoint_interval > 0 secs )
schedule state_checkpoint_interval { state_checkpoint() };
}
event state_rescan()
{
rescan_state();
if ( state_rescan_interval > 0 secs )
schedule state_rescan_interval { state_rescan() };
}
event bro_init()
{
if ( state_checkpoint_interval > 0 secs )
schedule state_checkpoint_interval { state_checkpoint() };
if ( state_rescan_interval > 0 secs )
schedule state_rescan_interval { state_rescan() };
}
event connection_established(c: connection)
{
# Buggy?
# if ( c$id$resp_p in persistent_services )
# make_connection_persistent(c);
}

View file

@ -1,36 +0,0 @@
# $Id: clear-passwords.bro 4758 2007-08-10 06:49:23Z vern $
# Monitoring for use of cleartext passwords.
@load ftp
@load login
@load pop3
@load irc
const passwd_file = open_log_file("passwords") &redef;
# ftp, login and pop3 call login_{success,failure}, which in turn
# calls account_tried(), so we can snarf all at once here:
event account_tried(c: connection, user: string, passwd: string)
{
print passwd_file, fmt("%s account name '%s', password '%s': %s",
is_local_addr(c$id$orig_h) ? "local" : "remote",
user, passwd, id_string(c$id));
}
# IRC raises a different event on login, so we hook into it here:
event irc_join_message(c: connection, info_list: irc_join_list)
{
for ( l in info_list)
{
print passwd_file, fmt("IRC JOIN name '%s', password '%s'",
l$nick, l$password);
}
}
# Raised if IRC user tries to become operator:
event irc_oper_message(c: connection, user: string, password: string)
{
print passwd_file, fmt("IRC OPER name '%s', password '%s'",
user, password);
}

View file

@ -1,71 +0,0 @@
# $Id$
#
# Script which alarms if the number of connections per time interval
# exceeds a threshold.
#
# This script is mainly meant as a demonstration; it hasn't been hardened
# with/for operational use.
@load notice
module ConnFlood;
export {
redef enum Notice += {
ConnectionFloodStart, ConnectionFloodEnd,
};
# Thresholds to reports (conns/sec).
const thresholds: set[count] =
{ 1000, 2000, 4000, 6000, 8000, 10000, 20000, 50000 }
&redef;
# Average over this time interval.
const avg_interval = 10 sec &redef;
}
global conn_counter = 0;
global last_thresh = 0;
# Note: replace with connection_attempt if too expensive.
event new_connection(c: connection)
{
++conn_counter;
}
event check_flood()
{
local thresh = 0;
local rate = double_to_count(interval_to_double((conn_counter / avg_interval)));
# Find the largest threshold reached this interval.
for ( i in thresholds )
{
if ( rate >= i && rate > thresh )
thresh = i;
}
# Report if larger than last reported threshold.
if ( thresh > last_thresh )
{
NOTICE([$note=ConnectionFloodStart, $n=thresh,
$msg=fmt("flood begins at rate %d conns/sec", rate)]);
last_thresh = thresh;
}
# If no threshold was reached, the flood is over.
else if ( thresh == 0 && last_thresh > 0 )
{
NOTICE([$note=ConnectionFloodEnd, $n=thresh,
$msg=fmt("flood ends at rate %d conns/sec", rate)]);
last_thresh = 0;
}
conn_counter = 0;
schedule avg_interval { check_flood() };
}
event bro_init()
{
schedule avg_interval { check_flood() };
}

View file

@ -1,24 +0,0 @@
# $Id: conn-id.bro 45 2004-06-09 14:29:49Z vern $
# Simple functions for generating ASCII connection identifiers.
@load port-name
function id_string(id: conn_id): string
{
return fmt("%s > %s",
endpoint_id(id$orig_h, id$orig_p),
endpoint_id(id$resp_h, id$resp_p));
}
function reverse_id_string(id: conn_id): string
{
return fmt("%s < %s",
endpoint_id(id$orig_h, id$orig_p),
endpoint_id(id$resp_h, id$resp_p));
}
function directed_id_string(id: conn_id, is_orig: bool): string
{
return is_orig ? id_string(id) : reverse_id_string(id);
}

View file

@ -1,425 +0,0 @@
# $Id: conn.bro 6782 2009-06-28 02:19:03Z vern $
@load notice
@load hot
@load port-name
@load netstats
@load conn-id
redef enum Notice += {
SensitiveConnection, # connection marked "hot"
};
const conn_closed = { TCP_CLOSED, TCP_RESET };
global have_FTP = F; # if true, we've loaded ftp.bro
global have_SMTP = F; # if true, we've loaded smtp.bro
# TODO: Do we have a nicer way of doing this?
export { global FTP::is_ftp_data_conn: function(c: connection): bool; }
# Whether to include connection state history in the logs generated
# by record_connection.
const record_state_history = F &redef;
# Whether to translate the local address in SensitiveConnection notices
# to a hostname. Meant as a demonstration of the "when" construct.
const xlate_hot_local_addr = F &redef;
# Whether to use DPD for generating the service field in the summaries.
# Default off, because it changes the format of conn.log in a way
# potentially incompatible with existing scripts.
const dpd_conn_logs = F &redef;
# Maps a given port on a given server's address to an RPC service.
# If we haven't loaded portmapper.bro, then it will be empty
# (and, ideally, queries to it would be optimized away ...).
global RPC_server_map: table[addr, port] of string;
const conn_file = open_log_file("conn") &redef;
function conn_state(c: connection, trans: transport_proto): string
{
local os = c$orig$state;
local rs = c$resp$state;
local o_inactive = os == TCP_INACTIVE || os == TCP_PARTIAL;
local r_inactive = rs == TCP_INACTIVE || rs == TCP_PARTIAL;
if ( trans == tcp )
{
if ( rs == TCP_RESET )
{
if ( os == TCP_SYN_SENT || os == TCP_SYN_ACK_SENT ||
(os == TCP_RESET &&
c$orig$size == 0 && c$resp$size == 0) )
return "REJ";
else if ( o_inactive )
return "RSTRH";
else
return "RSTR";
}
else if ( os == TCP_RESET )
return r_inactive ? "RSTOS0" : "RSTO";
else if ( rs == TCP_CLOSED && os == TCP_CLOSED )
return "SF";
else if ( os == TCP_CLOSED )
return r_inactive ? "SH" : "S2";
else if ( rs == TCP_CLOSED )
return o_inactive ? "SHR" : "S3";
else if ( os == TCP_SYN_SENT && rs == TCP_INACTIVE )
return "S0";
else if ( os == TCP_ESTABLISHED && rs == TCP_ESTABLISHED )
return "S1";
else
return "OTH";
}
else if ( trans == udp )
{
if ( os == UDP_ACTIVE )
return rs == UDP_ACTIVE ? "SF" : "S0";
else
return rs == UDP_ACTIVE ? "SHR" : "OTH";
}
else
return "OTH";
}
function conn_size(e: endpoint, trans: transport_proto): string
{
if ( e$size > 0 || (trans == tcp && e$state == TCP_CLOSED) )
return fmt("%d", e$size);
else
### should return 0 for TCP_RESET that went through TCP_CLOSED
return "?";
}
function service_name(c: connection): string
{
local p = c$id$resp_p;
if ( p in port_names )
return port_names[p];
else
return "other";
}
const state_graphic = {
["OTH"] = "?>?", ["REJ"] = "[",
["RSTO"] = ">]", ["RSTOS0"] = "}]", ["RSTR"] = ">[", ["RSTRH"] = "<[",
["S0"] = "}", ["S1"] = ">", ["S2"] = "}2", ["S3"] = "}3",
["SF"] = ">", ["SH"] = ">h", ["SHR"] = "<h",
};
function full_id_string(c: connection): string
{
local id = c$id;
local trans = get_port_transport_proto(id$orig_p);
local state = conn_state(c, trans);
local state_gr = state_graphic[state];
local service = service_name(c);
if ( state == "S0" || state == "S1" || state == "REJ" )
return fmt("%s %s %s/%s %s", id$orig_h, state_gr,
id$resp_h, service, c$addl);
else
return fmt("%s %sb %s %s/%s %sb %.1fs %s",
id$orig_h, conn_size(c$orig, trans),
state_gr, id$resp_h, service,
conn_size(c$resp, trans), c$duration, c$addl);
}
# The sets are indexed by the complete hot messages.
global hot_conns_reported: table[conn_id] of set[string];
# Low-level routine that generates the actual SensitiveConnection
# notice associated with a "hot" connection.
function do_hot_notice(c: connection, dir: string, host: string)
{
NOTICE([$note=SensitiveConnection, $conn=c,
$msg=fmt("hot: %s %s local host: %s",
full_id_string(c), dir, host)]);
}
# Generate a SensitiveConnection notice with the local hostname
# translated. Mostly intended as a demonstration of using "when".
function gen_hot_notice_with_hostnames(c: connection)
{
local id = c$id;
local inbound = is_local_addr(id$resp_h);
local dir = inbound ? "to" : "from";
local local_addr = inbound ? id$orig_h : id$resp_h;
add_notice_tag(c);
when ( local hostname = lookup_addr(local_addr) )
do_hot_notice(c, dir, hostname);
timeout 5 sec
{ do_hot_notice(c, dir, fmt("%s", local_addr)); }
}
function log_hot_conn(c: connection)
{
if ( c$id !in hot_conns_reported )
hot_conns_reported[c$id] = set() &mergeable;
local msg = full_id_string(c);
if ( msg !in hot_conns_reported[c$id] )
{
if ( xlate_hot_local_addr )
gen_hot_notice_with_hostnames(c);
else
NOTICE([$note=SensitiveConnection, $conn=c,
$msg=fmt("hot: %s", full_id_string(c))]);
add hot_conns_reported[c$id][msg];
}
}
function determine_service_non_DPD(c: connection) : string
{
if ( length(c$service) != 0 )
{
for ( i in c$service )
return i; # return first;
}
else if ( have_FTP && FTP::is_ftp_data_conn(c) )
return port_names[20/tcp];
else if ( [c$id$resp_h, c$id$resp_p] in RPC_server_map )
# Alternatively, perhaps this should be stored in $addl
# rather than $service, so the port number remains
# visible .... ?
return RPC_server_map[c$id$resp_h, c$id$resp_p];
else if ( c$orig$state == TCP_INACTIVE )
{
# We're seeing a half-established connection. Use the
# service of the originator if it's well-known and the
# responder isn't.
if ( c$id$resp_p !in port_names && c$id$orig_p in port_names )
return port_names[c$id$orig_p];
}
return service_name(c);
}
function determine_service(c: connection) : string
{
if ( ! dpd_conn_logs )
return determine_service_non_DPD(c);
if ( [c$id$resp_h, c$id$resp_p] in RPC_server_map )
add c$service[RPC_server_map[c$id$resp_h, c$id$resp_p]];
if ( length(c$service) == 0 )
{
# Empty service set. Use port as a hint.
if ( c$orig$state == TCP_INACTIVE )
{
# We're seeing a half-established connection. Use the
# service of the originator if it's well-known and the
# responder isn't.
if ( c$id$resp_p !in port_names &&
c$id$orig_p in port_names )
return fmt("%s?", port_names[c$id$orig_p]);
}
if ( c$id$resp_p in port_names )
return fmt("%s?", port_names[c$id$resp_p]);
return "other";
}
local service = "";
for ( s in c$service )
{
if ( sub_bytes(s, 0, 1) != "-" )
service = service == "" ? s : cat(service, ",", s);
}
return service != "" ? to_lower(service) : "other";
}
function record_connection(f: file, c: connection)
{
local id = c$id;
local local_init = is_local_addr(id$orig_h);
local local_addr = local_init ? id$orig_h : id$resp_h;
local remote_addr = local_init ? id$resp_h : id$orig_h;
local flags = local_init ? "L" : "X";
local trans = get_port_transport_proto(id$orig_p);
local duration: string;
# Do this first so we see the tag in addl.
if ( c$hot > 0 )
log_hot_conn(c);
if ( trans == tcp )
{
if ( c$orig$state in conn_closed || c$resp$state in conn_closed )
duration = fmt("%.06f", c$duration);
else
duration = "?";
}
else
duration = fmt("%.06f", c$duration);
local addl = c$addl;
@ifdef ( estimate_flow_size_and_remove )
# Annotate connection with separately-estimated size, if present.
local orig_est = estimate_flow_size_and_remove(id, T);
local resp_est = estimate_flow_size_and_remove(id, F);
if ( orig_est$have_est )
addl = fmt("%s olower=%.0fMB oupper=%.0fMB oincon=%s", addl,
orig_est$lower / 1e6, orig_est$upper / 1e6,
orig_est$num_inconsistent);
if ( resp_est$have_est )
addl = fmt("%s rlower=%.0fMB rupper=%.0fMB rincon=%s", addl,
resp_est$lower / 1e6, resp_est$upper / 1e6,
resp_est$num_inconsistent);
@endif
local service = determine_service(c);
local log_msg =
fmt("%.6f %s %s %s %s %d %d %s %s %s %s %s",
c$start_time, duration, id$orig_h, id$resp_h, service,
id$orig_p, id$resp_p, trans,
conn_size(c$orig, trans), conn_size(c$resp, trans),
conn_state(c, trans), flags);
if ( record_state_history )
log_msg = fmt("%s %s", log_msg,
c$history == "" ? "X" : c$history);
if ( addl != "" )
log_msg = fmt("%s %s", log_msg, addl);
print f, log_msg;
}
event connection_established(c: connection)
{
Hot::check_hot(c, Hot::CONN_ESTABLISHED);
if ( c$hot > 0 )
log_hot_conn(c);
}
event partial_connection(c: connection)
{
if ( c$orig$state == TCP_PARTIAL && c$resp$state == TCP_INACTIVE )
# This appears to be a stealth scan. Don't do hot-checking
# as there wasn't an established connection.
;
else
{
Hot::check_hot(c, Hot::CONN_ESTABLISHED);
Hot::check_hot(c, Hot::APPL_ESTABLISHED); # assume it's been established
}
if ( c$hot > 0 )
log_hot_conn(c);
}
event connection_attempt(c: connection)
{
Hot::check_spoof(c);
Hot::check_hot(c, Hot::CONN_ATTEMPTED);
}
event connection_finished(c: connection)
{
if ( c$orig$size == 0 || c$resp$size == 0 )
# Hard to get excited about this - not worth logging again.
c$hot = 0;
else
Hot::check_hot(c, Hot::CONN_FINISHED);
}
event connection_partial_close(c: connection)
{
if ( c$orig$size == 0 || c$resp$size == 0 )
# Hard to get excited about this - not worth logging again.
c$hot = 0;
else
Hot::check_hot(c, Hot::CONN_FINISHED);
}
event connection_half_finished(c: connection)
{
Hot::check_hot(c, Hot::CONN_ATTEMPTED);
}
event connection_rejected(c: connection)
{
Hot::check_hot(c, Hot::CONN_REJECTED);
}
event connection_reset(c: connection)
{
Hot::check_hot(c, Hot::CONN_FINISHED);
}
event connection_pending(c: connection)
{
if ( c$orig$state in conn_closed &&
(c$resp$state == TCP_INACTIVE || c$resp$state == TCP_PARTIAL) )
# This is a stray FIN or RST - don't bother reporting.
return;
if ( c$orig$state == TCP_RESET || c$resp$state == TCP_RESET )
# We already reported this connection when the RST
# occurred.
return;
Hot::check_hot(c, Hot::CONN_FINISHED);
}
function connection_gone(c: connection, gone_type: string)
{
if ( c$orig$size == 0 || c$resp$size == 0 )
{
if ( c$orig$state == TCP_RESET && c$resp$state == TCP_INACTIVE)
# A bare RST, no other context. Ignore it.
return;
# Hard to get excited about this - not worth logging again,
# per connection_finished().
c$hot = 0;
}
else
Hot::check_hot(c, Hot::CONN_TIMEOUT);
}
event connection_state_remove(c: connection) &priority = -10
{
local os = c$orig$state;
local rs = c$resp$state;
if ( os == TCP_ESTABLISHED && rs == TCP_ESTABLISHED )
# It was still active, no summary generated.
connection_gone(c, "remove");
else if ( (os == TCP_CLOSED || rs == TCP_CLOSED) &&
(os == TCP_ESTABLISHED || rs == TCP_ESTABLISHED) )
# One side has closed, the other hasn't - it's in state S2
# or S3, hasn't been reported yet.
connection_gone(c, "remove");
record_connection(conn_file, c);
delete hot_conns_reported[c$id];
}

View file

@ -1,40 +0,0 @@
# $Id: contents.bro 47 2004-06-11 07:26:32Z vern $
redef capture_filters += { ["contents"] = "tcp" };
# Keeps track of to which given contents files we've written.
global contents_files: set[string];
event new_connection_contents(c: connection)
{
local id = c$id;
local orig_file =
fmt("contents.%s.%d-%s.%d",
id$orig_h, id$orig_p, id$resp_h, id$resp_p);
local resp_file =
fmt("contents.%s.%d-%s.%d",
id$resp_h, id$resp_p, id$orig_h, id$orig_p);
local orig_f: file;
local resp_f: file;
if ( orig_file !in contents_files )
{
add contents_files[orig_file];
orig_f = open(orig_file);
}
else
orig_f = open_for_append(orig_file);
if ( resp_file !in contents_files )
{
add contents_files[resp_file];
resp_f = open(resp_file);
}
else
resp_f = open_for_append(resp_file);
set_contents_file(id, CONTENTS_ORIG, orig_f);
set_contents_file(id, CONTENTS_RESP, resp_f);
}

View file

@ -1,62 +0,0 @@
# $Id: cpu-adapt.bro 1904 2005-12-14 03:27:15Z vern $
#
# Adjust load level based on cpu load.
@load load-level
# We increase the load-level if the average CPU load (percentage) is
# above this limit.
global cpu_upper_limit = 70.0 &redef;
# We derease the load-level if the average CPU load is below this limit.
global cpu_lower_limit = 30.0 &redef;
# Time interval over which we average the CPU load.
global cpu_interval = 1 min &redef;
global cpu_last_proc_time = 0 secs;
global cpu_last_wall_time: time = 0;
event cpu_measure_load()
{
local res = resource_usage();
local proc_time = res$user_time + res$system_time;
local wall_time = current_time();
if ( cpu_last_proc_time > 0 secs )
{
local dproc = proc_time - cpu_last_proc_time;
local dwall = wall_time - cpu_last_wall_time;
local load = dproc / dwall * 100.0;
print ll_file, fmt("%.6f CPU load %.02f", network_time(), load);
# Second test is for whether we have any room to change
# things. It shouldn't be hardwired to "xxx10" ....
if ( load > cpu_upper_limit &&
current_load_level != LoadLevel10 )
{
print ll_file, fmt("%.6f CPU load above limit: %.02f",
network_time(), load);
increase_load_level();
}
else if ( load < cpu_lower_limit &&
current_load_level != LoadLevel1 )
{
print ll_file, fmt("%.6f CPU load below limit: %.02f",
network_time(), load);
decrease_load_level();
}
}
cpu_last_proc_time = proc_time;
cpu_last_wall_time = wall_time;
schedule cpu_interval { cpu_measure_load() };
}
event bro_init()
{
schedule cpu_interval { cpu_measure_load() };
}

View file

@ -1,8 +0,0 @@
# $Id:$
redef capture_filters += { ["dce"] = "port 135" };
global dce_ports = { 135/tcp } &redef;
redef dpd_config += { [ANALYZER_DCE_RPC] = [$ports = dce_ports] };
# No default implementation for events.

View file

@ -1,41 +0,0 @@
# $Id: demux.bro 4758 2007-08-10 06:49:23Z vern $
const demux_dir = log_file_name("xscript") &redef;
global created_demux_dir = F;
# Table of which connections we're demuxing.
global demuxed_conn: set[conn_id];
# tag: identifier to use for the reason for demuxing
# otag: identifier to use for originator side of the connection
# rtag: identifier to use for responder side of the connection
function demux_conn(id: conn_id, tag: string, otag: string, rtag: string): bool
{
if ( id in demuxed_conn || ! active_connection(id) )
return F;
if ( ! created_demux_dir )
{
mkdir(demux_dir);
created_demux_dir = T;
}
local orig_file =
fmt("%s/%s.%s.%s.%d-%s.%d", demux_dir, otag, tag,
id$orig_h, id$orig_p, id$resp_h, id$resp_p);
local resp_file =
fmt("%s/%s.%s.%s.%d-%s.%d", demux_dir, rtag, tag,
id$resp_h, id$resp_p, id$orig_h, id$orig_p);
set_contents_file(id, CONTENTS_ORIG, open(orig_file));
set_contents_file(id, CONTENTS_RESP, open(resp_file));
add demuxed_conn[id];
return T;
}
event connection_finished(c: connection)
{
delete demuxed_conn[c$id];
}

View file

@ -1,156 +0,0 @@
# $Id: detect-protocols-http.bro,v 1.1.4.2 2006/05/31 00:16:21 sommer Exp $
#
# Identifies protocols that use HTTP.
@load detect-protocols
module DetectProtocolHTTP;
export {
# Defines characteristics of a protocol. All attributes must match
# to trigger the detection. We match patterns against lower-case
# versions of the data.
type protocol : record {
url: pattern &optional;
client_header: pattern &optional;
client_header_content: pattern &optional;
server_header: pattern &optional;
server_header_content: pattern &optional;
};
const protocols: table[string] of protocol = {
["Kazaa"] = [$url=/^\/\.hash=.*/, $server_header=/^x-kazaa.*/],
["Gnutella"] = [$url=/^\/(uri-res|gnutella).*/,
$server_header=/^x-gnutella-.*/],
["Gnutella_"] = [$url=/^\/(uri-res|gnutella).*/,
$server_header=/^x-(content-urn|features).*/],
["Gnutella__"] = [$url=/^\/(uri-res|gnutella).*/,
$server_header=/^content-type/,
$server_header_content=/.*x-gnutella.*/],
["BitTorrent"] = [$url=/^.*\/(scrape|announce)\?.*info_hash.*/],
["SOAP"] = [$client_header=/^([:print:]+-)?(soapaction|methodname|messagetype).*/],
["Squid"] = [$server_header=/^x-squid.*/],
} &redef;
}
# Bit masks.
const url_found = 1;
const client_header_found = 2;
const server_header_found = 2;
type index : record {
id: conn_id;
pid: string;
};
# Maps to characteristics found so far.
# FIXME: An integer would suffice for the bit-field
# if we had bit-operations ...
global conns: table[index] of set[count] &read_expire = 1hrs;
function check_match(c: connection, pid: string, mask: set[count])
{
conns[[$id=c$id, $pid=pid]] = mask;
local p = protocols[pid];
if ( p?$url && url_found !in mask )
return;
if ( p?$client_header && client_header_found !in mask )
return;
if ( p?$server_header && server_header_found !in mask )
return;
# All found.
ProtocolDetector::found_protocol(c, ANALYZER_HTTP, pid);
}
event http_request(c: connection, method: string, original_URI: string,
unescaped_URI: string, version: string)
{
for ( pid in protocols )
{
local p = protocols[pid];
if ( ! p?$url )
next;
local mask: set[count];
local idx = [$id=c$id, $pid=pid];
if ( idx in conns )
mask = conns[idx];
if ( url_found in mask )
# Already found a match.
next;
# FIXME: There are people putting NULs into the URLs
# (BitTorrent), which to_lower() does not like. Not sure
# what the right fix is, though.
unescaped_URI = subst_string(unescaped_URI, "\x00", "");
if ( to_lower(unescaped_URI) == p$url )
{
add mask[url_found];
check_match(c, pid, mask);
}
}
}
event http_header(c: connection, is_orig: bool, name: string, value: string)
{
if ( name == /[sS][eE][rR][vV][eE][rR]/ )
{
# Try to extract the server software.
local s = split1(strip(value), /[[:space:]\/]/);
if ( s[1] == /[-a-zA-Z0-9_]+/ )
ProtocolDetector::found_protocol(c, ANALYZER_HTTP, s[1]);
}
for ( pid in protocols )
{
local p = protocols[pid];
local mask: set[count];
local idx = [$id=c$id, $pid=pid];
if ( idx in conns )
mask = conns[idx];
if ( p?$client_header && is_orig )
{
if ( client_header_found in mask )
return;
if ( to_lower(name) == p$client_header )
{
if ( p?$client_header_content )
if ( to_lower(value) !=
p$client_header_content )
return;
add mask[client_header_found];
check_match(c, pid, mask);
}
}
if ( p?$server_header && ! is_orig )
{
if ( server_header_found in mask )
return;
if ( to_lower(name) == p$server_header )
{
if ( p?$server_header_content )
if ( to_lower(value) !=
p$server_header_content )
return;
add mask[server_header_found];
check_match(c, pid, mask);
}
}
}
}

View file

@ -1,258 +0,0 @@
# $Id: detect-protocols.bro,v 1.1.4.4 2006/05/31 18:07:27 sommer Exp $
#
# Finds connections with protocols on non-standard ports using the DPM
# framework.
@load site
@load conn-id
@load notice
module ProtocolDetector;
export {
redef enum Notice += {
ProtocolFound, # raised for each connection found
ServerFound, # raised once per dst host/port/protocol tuple
};
# Table of (protocol, resp_h, resp_p) tuples known to be uninteresting
# in the given direction. For all other protocols detected on
# non-standard ports, we raise a ProtocolFound notice. (More specific
# filtering can then be done via notice_filters.)
#
# Use 0.0.0.0 for to wildcard-match any resp_h.
type dir: enum { NONE, INCOMING, OUTGOING, BOTH };
const valids: table[count, addr, port] of dir = {
# A couple of ports commonly used for benign HTTP servers.
# For now we want to see everything.
# [ANALYZER_HTTP, 0.0.0.0, 81/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 82/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 83/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 88/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8001/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8090/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8081/tcp] = OUTGOING,
#
# [ANALYZER_HTTP, 0.0.0.0, 6346/tcp] = BOTH, # Gnutella
# [ANALYZER_HTTP, 0.0.0.0, 6347/tcp] = BOTH, # Gnutella
# [ANALYZER_HTTP, 0.0.0.0, 6348/tcp] = BOTH, # Gnutella
} &redef;
# Set of analyzers for which we suppress ServerFound notices
# (but not ProtocolFound). Along with avoiding clutter in the
# log files, this also saves memory because for these we don't
# need to remember which servers we already have reported, which
# for some can be a lot.
const suppress_servers: set [count] = {
# ANALYZER_HTTP
} &redef;
# We consider a connection to use a protocol X if the analyzer for X
# is still active (i) after an interval of minimum_duration, or (ii)
# after a payload volume of minimum_volume, or (iii) at the end of the
# connection.
const minimum_duration = 30 secs &redef;
const minimum_volume = 4e3 &redef; # bytes
# How often to check the size of the connection.
const check_interval = 5 secs;
# Entry point for other analyzers to report that they recognized
# a certain (sub-)protocol.
global found_protocol: function(c: connection, analyzer: count,
protocol: string);
# Table keeping reported (server, port, analyzer) tuples (and their
# reported sub-protocols).
global servers: table[addr, port, string] of set[string]
&read_expire = 14 days;
}
# Table that tracks currently active dynamic analyzers per connection.
global conns: table[conn_id] of set[count];
# Table of reports by other analyzers about the protocol used in a connection.
global protocols: table[conn_id] of set[string];
type protocol : record {
a: string; # analyzer name
sub: string; # "sub-protocols" reported by other sources
};
function get_protocol(c: connection, a: count) : protocol
{
local str = "";
if ( c$id in protocols )
{
for ( p in protocols[c$id] )
str = |str| > 0 ? fmt("%s/%s", str, p) : p;
}
return [$a=analyzer_name(a), $sub=str];
}
function fmt_protocol(p: protocol) : string
{
return p$sub != "" ? fmt("%s (via %s)", p$sub, p$a) : p$a;
}
function do_notice(c: connection, a: count, d: dir)
{
if ( d == BOTH )
return;
if ( d == INCOMING && is_local_addr(c$id$resp_h) )
return;
if ( d == OUTGOING && ! is_local_addr(c$id$resp_h) )
return;
local p = get_protocol(c, a);
local s = fmt_protocol(p);
NOTICE([$note=ProtocolFound,
$msg=fmt("%s %s on port %s", id_string(c$id), s, c$id$resp_p),
$sub=s, $conn=c, $n=a]);
# We report multiple ServerFound's per host if we find a new
# sub-protocol.
local known = [c$id$resp_h, c$id$resp_p, p$a] in servers;
local newsub = F;
if ( known )
newsub = (p$sub != "" &&
p$sub !in servers[c$id$resp_h, c$id$resp_p, p$a]);
if ( (! known || newsub) && a !in suppress_servers )
{
NOTICE([$note=ServerFound,
$msg=fmt("%s: %s server on port %s%s", c$id$resp_h, s,
c$id$resp_p, (known ? " (update)" : "")),
$p=c$id$resp_p, $sub=s, $conn=c, $src=c$id$resp_h, $n=a]);
if ( ! known )
servers[c$id$resp_h, c$id$resp_p, p$a] = set();
add servers[c$id$resp_h, c$id$resp_p, p$a][p$sub];
}
}
function report_protocols(c: connection)
{
# We only report the connection if both sides have transferred data.
if ( c$resp$size == 0 || c$orig$size == 0 )
{
delete conns[c$id];
delete protocols[c$id];
return;
}
local analyzers = conns[c$id];
for ( a in analyzers )
{
if ( [a, c$id$resp_h, c$id$resp_p] in valids )
do_notice(c, a, valids[a, c$id$resp_h, c$id$resp_p]);
else if ( [a, 0.0.0.0, c$id$resp_p] in valids )
do_notice(c, a, valids[a, 0.0.0.0, c$id$resp_p]);
else
do_notice(c, a, NONE);
append_addl(c, analyzer_name(a));
}
delete conns[c$id];
delete protocols[c$id];
}
event ProtocolDetector::check_connection(c: connection)
{
if ( c$id !in conns )
return;
local duration = network_time() - c$start_time;
local size = c$resp$size + c$orig$size;
if ( duration >= minimum_duration || size >= minimum_volume )
report_protocols(c);
else
{
local delay = min_interval(minimum_duration - duration,
check_interval);
schedule delay { ProtocolDetector::check_connection(c) };
}
}
event connection_state_remove(c: connection)
{
if ( c$id !in conns )
{
delete protocols[c$id];
return;
}
# Reports all analyzers that have remained to the end.
report_protocols(c);
}
event protocol_confirmation(c: connection, atype: count, aid: count)
{
# Don't report anything running on a well-known port.
if ( atype in dpd_config && c$id$resp_p in dpd_config[atype]$ports )
return;
if ( c$id in conns )
{
local analyzers = conns[c$id];
add analyzers[atype];
}
else
{
conns[c$id] = set(atype);
local delay = min_interval(minimum_duration, check_interval);
schedule delay { ProtocolDetector::check_connection(c) };
}
}
# event connection_analyzer_disabled(c: connection, analyzer: count)
# {
# if ( c$id !in conns )
# return;
#
# delete conns[c$id][analyzer];
# }
function append_proto_addl(c: connection)
{
for ( a in conns[c$id] )
append_addl(c, fmt_protocol(get_protocol(c, a)));
}
function found_protocol(c: connection, analyzer: count, protocol: string)
{
# Don't report anything running on a well-known port.
if ( analyzer in dpd_config &&
c$id$resp_p in dpd_config[analyzer]$ports )
return;
if ( c$id !in protocols )
protocols[c$id] = set();
add protocols[c$id][protocol];
}
event connection_state_remove(c: connection)
{
if ( c$id !in conns )
return;
append_proto_addl(c);
}

View file

@ -1,525 +0,0 @@
# $Id: dhcp.bro 4054 2007-08-14 21:45:58Z pclin $
@load dpd
@load weird
module DHCP;
export {
# Set to false to disable printing to dhcp.log.
const logging = T &redef;
}
# Type of states in DHCP client. See Figure 5 in RFC 2131.
# Each state name is prefixed with DHCP_ to avoid name conflicts.
type dhcp_state: enum {
DHCP_INIT_REBOOT,
DHCP_INIT,
DHCP_SELECTING,
DHCP_REQUESTING,
DHCP_REBINDING,
DHCP_BOUND,
DHCP_RENEWING,
DHCP_REBOOTING,
# This state is not in Figure 5. Client has been externally configured.
DHCP_INFORM,
};
global dhcp_log: file;
# Source port 68: client -> server; source port 67: server -> client.
global dhcp_ports: set[port] = { 67/udp, 68/udp } &redef;
redef dpd_config += { [ANALYZER_DHCP_BINPAC] = [$ports = dhcp_ports] };
# Default handling for peculiarities in DHCP analysis.
redef Weird::weird_action += {
["DHCP_no_type_option"] = Weird::WEIRD_FILE,
["DHCP_wrong_op_type"] = Weird::WEIRD_FILE,
["DHCP_wrong_msg_type"] = Weird::WEIRD_FILE,
};
# Types of DHCP messages, identified from the 'options' field. See RFC 1533.
global dhcp_msgtype_name: table[count] of string = {
[1] = "DHCP_DISCOVER",
[2] = "DHCP_OFFER",
[3] = "DHCP_REQUEST",
[4] = "DHCP_DECLINE",
[5] = "DHCP_ACK",
[6] = "DHCP_NAK",
[7] = "DHCP_RELEASE",
[8] = "DHCP_INFORM",
};
# Type of DHCP client state, inferred from the messages. See RFC 2131, fig 5.
global dhcp_state_name: table[dhcp_state] of string = {
[DHCP_INIT_REBOOT] = "INIT-REBOOT",
[DHCP_INIT] = "INIT",
[DHCP_SELECTING] = "SELECTING",
[DHCP_REQUESTING] = "REQUESTING",
[DHCP_REBINDING] = "REBINDING",
[DHCP_BOUND] = "BOUND",
[DHCP_RENEWING] = "RENEWING",
[DHCP_REBOOTING] = "REBOOTING",
[DHCP_INFORM] = "INFORM",
};
type dhcp_session_info: record {
state: dhcp_state; # the state of a DHCP client
seq: count; # sequence of session in the trace
lease: interval; # lease time of an IP address
h_addr: string; # hardware/MAC address of the client
};
# Track the DHCP session info of each client, indexed by the transaction ID.
global dhcp_session: table[count] of dhcp_session_info
&default = record($state = DHCP_INIT_REBOOT, $seq = 0, $lease = 0 sec,
$h_addr = "")
&write_expire = 5 min
;
# We need the following table to track some DHCPINFORM messages since they
# use xid = 0 (I do not know why), starting from the second pair of INFORM
# and ACK. Since the client address is ready before DHCPINFORM, we can use
# it as the index to find its corresponding xid.
global session_xid: table[addr] of count &read_expire = 30 sec;
# Count how many DHCP sessions have been detected, for use in dhcp_session_seq.
global pkt_cnt: count = 0;
global session_cnt: count = 0;
# Record the address of client that sends a DHCPINFORM message with xid = 0.
global recent_client: addr;
global BROADCAST_ADDR = 255.255.255.255;
global NULL_ADDR = 0.0.0.0;
# Used to detect if an ACK is duplicated. They are used only in dhcp_ack().
# We put them here since Bro scripts lacks the equivalent of "static" variables.
global ack_from: addr;
global duplicated_ack: bool;
function warning_wrong_state(msg_type: count): string
{
return fmt("%s not sent in a correct state.",
dhcp_msgtype_name[msg_type]);
}
function dhcp_message(c: connection, seq: count, show_conn: bool): string
{
local conn_info = fmt("%.06f #%d", network_time(), seq);
if ( show_conn )
return fmt("%s %s > %s", conn_info,
endpoint_id(c$id$orig_h, c$id$orig_p),
endpoint_id(c$id$resp_h, c$id$resp_p));
return conn_info;
}
function new_dhcp_session(xid: count, state: dhcp_state, h_addr: string)
: dhcp_session_info
{
local session: dhcp_session_info;
session$state = state;
session$seq = ++session_cnt;
session$lease = 0 sec;
session$h_addr = h_addr;
dhcp_session[xid] = session;
return session;
}
event bro_init()
{
if ( logging )
dhcp_log = open_log_file("dhcp");
}
event dhcp_discover(c: connection, msg: dhcp_msg, req_addr: addr)
{
local old_session = T;
if ( msg$xid !in dhcp_session )
{
local session =
new_dhcp_session(msg$xid, DHCP_SELECTING, msg$h_addr);
old_session = F;
}
if ( logging )
{
if ( old_session &&
dhcp_session[msg$xid]$state == DHCP_SELECTING )
print dhcp_log, fmt("%s DISCOVER (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else
print dhcp_log,
fmt("%s DISCOVER (xid = %x, client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
msg$xid, dhcp_state_name[dhcp_session[msg$xid]$state]);
}
}
event dhcp_offer(c: connection, msg: dhcp_msg, mask: addr,
router: dhcp_router_list, lease: interval, serv_addr: addr)
{
local standalone = msg$xid !in dhcp_session;
local err_state =
standalone && dhcp_session[msg$xid]$state != DHCP_SELECTING;
if ( logging )
{
# Note that no OFFER messages are considered duplicated,
# since they may come from multiple DHCP servers in a session.
if ( standalone )
print dhcp_log, fmt("%s OFFER (standalone)",
dhcp_message(c, ++session_cnt, T));
else if ( err_state )
print dhcp_log, fmt("%s OFFER (in error state %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
dhcp_state_name[dhcp_session[msg$xid]$state]);
else
print dhcp_log, fmt("%s OFFER (client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
dhcp_state_name[DHCP_SELECTING]);
}
}
event dhcp_request(c: connection, msg: dhcp_msg,
req_addr: addr, serv_addr: addr)
{
local log_info: string;
if ( msg$xid in dhcp_session )
{
if ( ! logging )
return;
local state = dhcp_session[msg$xid]$state;
if ( state == DHCP_REBOOTING )
recent_client = req_addr;
else
recent_client = c$id$orig_h;
session_xid[recent_client] = msg$xid;
if ( state == DHCP_RENEWING || state == DHCP_REBINDING ||
state == DHCP_REQUESTING || state == DHCP_REBOOTING )
print dhcp_log, fmt("%s REQUEST (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else
{
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, T);
print dhcp_log, fmt("%s REQUEST (in error state %s)",
log_info,
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
}
else
{
local d_state = DHCP_REBOOTING;
if ( c$id$resp_h != BROADCAST_ADDR )
d_state = DHCP_RENEWING;
else if ( msg$ciaddr != NULL_ADDR )
d_state = DHCP_REBINDING;
else if ( serv_addr != NULL_ADDR )
d_state = DHCP_REQUESTING;
local session = new_dhcp_session(msg$xid, d_state, msg$h_addr);
if ( session$state == DHCP_REBOOTING )
recent_client = req_addr;
else
recent_client = c$id$orig_h;
session_xid[recent_client] = msg$xid;
if ( logging )
{
log_info = dhcp_message(c, session$seq, T);
if ( req_addr != NULL_ADDR )
log_info = fmt("%s REQUEST %As",
log_info, req_addr);
else
log_info = fmt("%s REQUEST", log_info);
print dhcp_log, fmt("%s (xid = %x, client state = %s)",
log_info, msg$xid,
dhcp_state_name[session$state]);
}
}
}
event dhcp_decline(c: connection, msg: dhcp_msg)
{
local old_session = msg$xid in dhcp_session;
local err_state = F;
if ( old_session )
{
if ( dhcp_session[msg$xid]$state == DHCP_REQUESTING )
dhcp_session[msg$xid]$state = DHCP_INIT;
else
err_state = T;
}
else
new_dhcp_session(msg$xid, DHCP_INIT, "");
if ( ! logging )
return;
if ( old_session )
{
if ( err_state )
print dhcp_log, fmt("%s DECLINE (in error state %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
dhcp_state_name[dhcp_session[msg$xid]$state]);
else
print dhcp_log, fmt("%s DECLINE (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
}
else
print dhcp_log, fmt("%s DECLINE (xid = %x)",
dhcp_message(c, ++session_cnt, T), msg$xid);
}
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr,
router: dhcp_router_list, lease: interval, serv_addr: addr)
{
local log_info: string;
if ( msg$xid == 0 )
{ # An ACK for a DHCPINFORM message with xid = 0.
local xid =
c$id$orig_h in session_xid ?
# An ACK to the client.
session_xid[c$id$orig_h]
:
# Assume ACK from a relay agent to the server.
session_xid[recent_client];
local seq: count;
if ( xid > 0 )
{
duplicated_ack = dhcp_session[xid]$state != DHCP_INFORM;
dhcp_session[xid]$state = DHCP_BOUND;
seq = dhcp_session[xid]$seq;
}
else
{
# This is a weird situation. We arbitrarily set
# duplicated_ack to false to have more information
# shown.
duplicated_ack = F;
seq = session_cnt;
}
if ( ! logging )
return;
log_info = dhcp_message(c, seq, F);
if ( c$id$orig_h in session_xid )
{
if ( duplicated_ack )
print dhcp_log, fmt("%s ACK (duplicated)",
log_info);
else
print dhcp_log,
fmt("%s ACK (client state = %s)",
log_info,
dhcp_state_name[DHCP_BOUND]);
}
else
print dhcp_log,
fmt("%s ACK (relay agent at = %As)",
log_info, c$id$orig_h);
return;
}
if ( msg$xid in dhcp_session )
{
local last_state = dhcp_session[msg$xid]$state;
local from_reboot_state = last_state == DHCP_REBOOTING;
if ( last_state == DHCP_REQUESTING ||
last_state == DHCP_REBOOTING ||
last_state == DHCP_RENEWING ||
last_state == DHCP_REBINDING ||
last_state == DHCP_INFORM )
{
dhcp_session[msg$xid]$state = DHCP_BOUND;
dhcp_session[msg$xid]$lease = lease;
}
if ( ! logging )
return;
if ( last_state == DHCP_BOUND )
{
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, F);
if ( c$id$orig_h == ack_from )
log_info = fmt("%s ACK (duplicated)",
log_info);
else
# Not a duplicated ACK.
log_info = fmt("%s ACK (relay agent at = %As)",
log_info, c$id$orig_h);
}
else
{
ack_from = c$id$orig_h;
# If in a reboot state, we had better
# explicitly show the original address
# and the destination address of ACK,
# because the client initally has a
# zero address.
if ( from_reboot_state )
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, T);
else
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, F);
if ( last_state != DHCP_INFORM &&
lease > 0 sec )
log_info = fmt("%s ACK (lease time = %s, ",
log_info, lease);
else
log_info = fmt("%s ACK (", log_info);
log_info = fmt("%sclient state = %s)",
log_info,
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
print dhcp_log, log_info;
}
else if ( logging )
print dhcp_log, fmt("%s ACK (standalone)",
dhcp_message(c, ++session_cnt, T));
}
event dhcp_nak(c: connection, msg: dhcp_msg)
{
if ( msg$xid in dhcp_session )
{
local last_state = dhcp_session[msg$xid]$state;
if ( last_state == DHCP_REQUESTING ||
last_state == DHCP_REBOOTING ||
last_state == DHCP_RENEWING ||
last_state == DHCP_REBINDING )
dhcp_session[msg$xid]$state = DHCP_INIT;
if ( logging )
print dhcp_log, fmt("%s NAK (client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F),
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
else if ( logging )
print dhcp_log, fmt("%s NAK (standalone)",
dhcp_message(c, ++session_cnt, T));
}
event dhcp_release(c: connection, msg: dhcp_msg)
{
local old_session = msg$xid in dhcp_session;
if ( ! old_session )
# We assume the client goes back to DHCP_INIT
# because the RFC does not specify which state to go to.
new_dhcp_session(msg$xid, DHCP_INIT, "");
if ( ! logging )
return;
if ( old_session )
{
if ( dhcp_session[msg$xid]$state == DHCP_INIT )
print dhcp_log, fmt("%s RELEASE (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else
print dhcp_log, fmt("%s RELEASE, (client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F),
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
else
print dhcp_log, fmt("%s RELEASE (xid = %x, IP addr = %As)",
dhcp_message(c, session_cnt, T), msg$xid, c$id$orig_h);
}
event dhcp_inform(c: connection, msg: dhcp_msg)
{
recent_client = c$id$orig_h;
if ( msg$xid == 0 )
{
# Oops! Try to associate message with transaction ID 0 with
# a previous session.
local xid: count;
local seq: count;
if ( c$id$orig_h in session_xid )
{
xid = session_xid[c$id$orig_h];
dhcp_session[xid]$state = DHCP_INFORM;
seq = dhcp_session[xid]$seq;
}
else
{
# Weird: xid = 0 and no previous INFORM-ACK dialog.
xid = 0;
seq = ++session_cnt;
# Just record that a INFORM message has appeared,
# although the xid is not useful.
session_xid[c$id$orig_h] = 0;
}
if ( logging )
print dhcp_log,
fmt("%s INFORM (xid = %x, client state = %s)",
dhcp_message(c, seq, T),
xid, dhcp_state_name[DHCP_INFORM]);
return;
}
if ( msg$xid in dhcp_session )
{
if ( logging )
if ( dhcp_session[msg$xid]$state == DHCP_INFORM )
print dhcp_log, fmt("%s INFORM (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else {
print dhcp_log,
fmt("%s INFORM (duplicated, client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F),
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
return;
}
local session = new_dhcp_session(msg$xid, DHCP_INFORM, msg$h_addr);
# Associate this transaction ID with the host so we can identify
# subsequent pairs of INFORM/ACK if client uses xid=0.
session_xid[c$id$orig_h] = msg$xid;
if ( logging )
print dhcp_log, fmt("%s INFORM (xid = %x, client state = %s)",
dhcp_message(c, session$seq, T),
msg$xid, dhcp_state_name[session$state]);
}

View file

@ -1,81 +0,0 @@
# $Id: dns-info.bro 3919 2007-01-14 00:27:09Z vern $
# Types, errors, and fields for analyzing DNS data. A helper file
# for dns.bro.
const PTR = 12;
const EDNS = 41;
const ANY = 255;
const query_types = {
[1] = "A", [2] = "NS", [3] = "MD", [4] = "MF",
[5] = "CNAME", [6] = "SOA", [7] = "MB", [8] = "MG",
[9] = "MR", [10] = "NULL", [11] = "WKS", [PTR] = "PTR",
[13] = "HINFO", [14] = "MINFO", [15] = "MX", [16] = "TXT",
[17] = "RP", [18] = "AFSDB", [19] = "X25", [20] = "ISDN",
[21] = "RT", [22] = "NSAP", [23] = "NSAP-PTR", [24] = "SIG",
[25] = "KEY", [26] = "PX" , [27] = "GPOS", [28] = "AAAA",
[29] = "LOC", [30] = "EID", [31] = "NIMLOC", [32] = "NB",
[33] = "SRV", [34] = "ATMA", [35] = "NAPTR", [36] = "KX",
[37] = "CERT", [38] = "A6", [39] = "DNAME", [40] = "SINK",
[EDNS] = "EDNS", [42] = "APL", [43] = "DS", [44] = "SINK",
[45] = "SSHFP", [46] = "RRSIG", [47] = "NSEC", [48] = "DNSKEY",
[49] = "DHCID", [99] = "SPF", [100] = "DINFO", [101] = "UID",
[102] = "GID", [103] = "UNSPEC", [249] = "TKEY", [250] = "TSIG",
[251] = "IXFR", [252] = "AXFR", [253] = "MAILB", [254] = "MAILA",
[32768] = "TA", [32769] = "DLV",
[ANY] = "*",
} &default = function(n: count): string { return fmt("query-%d", n); };
const DNS_code_types = {
[0] = "X0",
[1] = "Xfmt",
[2] = "Xsrv",
[3] = "Xnam",
[4] = "Ximp",
[5] = "X[",
} &default = function(n: count): string { return "?"; };
# Used for non-TSIG/EDNS types.
const base_error = {
[0] = "NOERROR", # No Error
[1] = "FORMERR", # Format Error
[2] = "SERVFAIL", # Server Failure
[3] = "NXDOMAIN", # Non-Existent Domain
[4] = "NOTIMP", # Not Implemented
[5] = "REFUSED", # Query Refused
[6] = "YXDOMAIN", # Name Exists when it should not
[7] = "YXRRSET", # RR Set Exists when it should not
[8] = "NXRRSet", # RR Set that should exist does not
[9] = "NOTAUTH", # Server Not Authoritative for zone
[10] = "NOTZONE", # Name not contained in zone
[11] = "unassigned-11", # available for assignment
[12] = "unassigned-12", # available for assignment
[13] = "unassigned-13", # available for assignment
[14] = "unassigned-14", # available for assignment
[15] = "unassigned-15", # available for assignment
[16] = "BADVERS", # for EDNS, collision w/ TSIG
[17] = "BADKEY", # Key not recognized
[18] = "BADTIME", # Signature out of time window
[19] = "BADMODE", # Bad TKEY Mode
[20] = "BADNAME", # Duplicate key name
[21] = "BADALG", # Algorithm not supported
[22] = "BADTRUNC", # draft-ietf-dnsext-tsig-sha-05.txt
[3842] = "BADSIG", # 16 <= number collision with EDNS(16);
# this is a translation from TSIG(16)
} &default = function(n: count): string { return "?"; };
# This deciphers EDNS Z field values.
const edns_zfield = {
[0] = "NOVALUE", # regular entry
[32768] = "DNS_SEC_OK", # accepts DNS Sec RRs
} &default = function(n: count): string { return "?"; };
const dns_class = {
[1] = "C_INTERNET",
[2] = "C_CSNET",
[3] = "C_CHAOS",
[4] = "C_HESOD",
[254] = "C_NONE",
[255] = "C_ANY",
} &default = function(n: count): string { return "?"; };

View file

@ -1,65 +0,0 @@
# $Id: dns-lookup.bro 340 2004-09-09 06:38:27Z vern $
@load notice
redef enum Notice += {
DNS_MappingChanged, # some sort of change WRT previous Bro lookup
};
const dns_interesting_changes = {
"unverified", "old name", "new name", "mapping",
} &redef;
function dump_dns_mapping(msg: string, dm: dns_mapping): bool
{
if ( msg in dns_interesting_changes ||
127.0.0.1 in dm$addrs )
{
local req = dm$req_host == "" ?
fmt("%As", dm$req_addr) : dm$req_host;
NOTICE([$note=DNS_MappingChanged,
$msg=fmt("DNS %s: %s/%s %s-> %As", msg, req,
dm$hostname, dm$valid ?
"" : "(invalid) ", dm$addrs),
$sub=msg]);
return T;
}
else
return F;
}
event dns_mapping_valid(dm: dns_mapping)
{
dump_dns_mapping("valid", dm);
}
event dns_mapping_unverified(dm: dns_mapping)
{
dump_dns_mapping("unverified", dm);
}
event dns_mapping_new_name(dm: dns_mapping)
{
dump_dns_mapping("new name", dm);
}
event dns_mapping_lost_name(dm: dns_mapping)
{
dump_dns_mapping("lost name", dm);
}
event dns_mapping_name_changed(old_dm: dns_mapping, new_dm: dns_mapping)
{
if ( dump_dns_mapping("old name", old_dm) )
dump_dns_mapping("new name", new_dm);
}
event dns_mapping_altered(dm: dns_mapping,
old_addrs: set[addr], new_addrs: set[addr])
{
if ( dump_dns_mapping("mapping", dm) )
NOTICE([$note=DNS_MappingChanged,
$msg=fmt("changed addresses: %As -> %As", old_addrs, new_addrs),
$sub="changed addresses"]);
}

View file

@ -1,675 +0,0 @@
# $Id: dns.bro 6724 2009-06-07 09:23:03Z vern $
@load notice
@load weird
@load udp-common
@load dns-info
module DNS;
export {
# Lookups of hosts in here are flagged ...
const sensitive_lookup_hosts: set[addr] &redef;
# ... unless the lookup comes from one of these hosts.
const okay_to_lookup_sensitive_hosts: set[addr] &redef;
# Start considering whether we're seeing PTR scanning if we've seen
# at least this many rejected PTR queries.
const report_rejected_PTR_thresh = 100 &redef;
# Generate a PTR_scan event if at any point (once we're above
# report_rejected_PTR_thresh) we see this many more distinct
# rejected PTR requests than distinct answered PTR requests.
const report_rejected_PTR_factor = 2.0 &redef;
# The following sources are allowed to do PTR scanning.
const allow_PTR_scans: set[addr] &redef;
# Annotations that if returned for a PTR lookup actually indicate
# a rejected query; for example, "illegal-address.lbl.gov".
const actually_rejected_PTR_anno: set[string] &redef;
# Hosts allowed to do zone transfers.
const zone_transfers_okay: set[addr] &redef;
# Set to false to disable printing to dns.log.
const logging = T &redef;
redef enum Notice += {
SensitiveDNS_Lookup, # DNS lookup of sensitive hostname/addr
DNS_PTR_Scan, # A set of PTR lookups
DNS_PTR_Scan_Summary, # Summary of a set of PTR lookups
ResolverInconsistency, # DNS answer changed
ZoneTransfer, # a DNS zone transfer request was seen
};
# This is a list of domains that have a history of providing
# more RR's in response than they are supposed to. There is
# some danger here in that record inconsistancies will not be
# identified for these domains...
const bad_domain_resp: set[string] &redef;
# Same idea, except that it applies to a list of host names.
const bad_host_resp: set[string] &redef;
# Turn resolver consistancy checking on/off.
const resolver_consist_check = F &redef;
# Should queries be checked against 'bad' domains?
const check_domain_list = T;
# List of 'bad' domains.
const hostile_domain_list: set[string] &redef;
# Used for PTR scan detection. Exported so their timeouts can be
# adjusted.
global distinct_PTR_requests:
table[addr, string] of count &default = 0 &write_expire = 5 min;
global distinct_rejected_PTR_requests:
table[addr] of count &default = 0 &write_expire = 5 min;
global distinct_answered_PTR_requests:
table[addr] of count &default = 0 &write_expire = 5 min;
}
redef capture_filters += {
["dns"] = "port 53",
["netbios-ns"] = "udp port 137",
};
# DPM configuration.
global dns_ports = { 53/udp, 53/tcp, 137/udp } &redef;
redef dpd_config += { [ANALYZER_DNS] = [$ports = dns_ports] };
global dns_udp_ports = { 53/udp, 137/udp } &redef;
global dns_tcp_ports = { 53/tcp } &redef;
redef dpd_config += { [ANALYZER_DNS_UDP_BINPAC] = [$ports = dns_udp_ports] };
redef dpd_config += { [ANALYZER_DNS_TCP_BINPAC] = [$ports = dns_tcp_ports] };
# Default handling for peculiarities in DNS analysis. You can redef these
# again in your site-specific script if you want different behavior.
redef Weird::weird_action += {
["DNS_AAAA_neg_length"] = Weird::WEIRD_FILE,
["DNS_Conn_count_too_large"] = Weird::WEIRD_FILE,
["DNS_NAME_too_long"] = Weird::WEIRD_FILE,
["DNS_RR_bad_length"] = Weird::WEIRD_FILE,
["DNS_RR_length_mismatch"] = Weird::WEIRD_FILE,
["DNS_RR_unknown_type"] = Weird::WEIRD_FILE,
["DNS_label_forward_compress_offset"] = Weird::WEIRD_FILE,
["DNS_label_len_gt_name_len"] = Weird::WEIRD_FILE,
["DNS_label_len_gt_pkt"] = Weird::WEIRD_FILE,
["DNS_label_too_long"] = Weird::WEIRD_FILE,
["DNS_name_too_long"] = Weird::WEIRD_FILE,
["DNS_truncated_RR_rdlength_lt_len"] = Weird::WEIRD_FILE,
["DNS_truncated_ans_too_short"] = Weird::WEIRD_FILE,
["DNS_truncated_len_lt_hdr_len"] = Weird::WEIRD_FILE,
["DNS_truncated_quest_too_short"] = Weird::WEIRD_FILE,
};
type dns_session_info: record {
id: count;
is_zone_transfer: bool;
last_active: time; # when we last saw activity
# Indexed by query id, returns string annotation corresponding to
# queries for which no answer seen yet.
pending_queries: table[count] of string;
};
# Indexed by client and server.
global dns_sessions: table[addr, addr, count] of dns_session_info;
global num_dns_sessions = 0;
const PTR_pattern = /[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\.in-addr\.arpa/;
# Keeps track of for which addresses we processed a PTR_scan event.
global did_PTR_scan_event: table[addr] of count &default = 0;
# The following definitions relate to tracking when DNS records
# change and whether they do so in a consistent fashion.
type dns_response_record: record {
dns_name: string; # domain name in question
dns_type: count; # type of query
num_resp: count; # number of responses
resp_count: count; # how many responses have been registered
addrs: set[addr]; # addresses in response
};
global dns_history: table[string, count, count] of dns_response_record;
global did_zone_transfer_notice: table[addr] of count &default = 0;
# Sample known irregular domains.
redef bad_domain_resp += { "instacontent.net", "mirror-image.net", };
# Sample hostile domains.
redef hostile_domain_list += { "undernet.org", "afraid.org", };
global dns_log : file;
event bro_init()
{
if ( logging )
dns_log = open_log_file("dns");
}
event remove_name(name: string, qtype: count, id: count)
{
if ( [name, qtype, id] in dns_history )
{
# We need to remove the dns_history record and the assosciated
# dns_consistency_info records.
local drr = dns_history[name, qtype, id];
local a: addr;
for ( a in drr$addrs )
delete drr$addrs[a];
delete dns_history[name, qtype, id];
}
else if ( logging )
print dns_log, fmt("ERROR in history session removal: %s/%d doesn't exist", name, qtype);
}
# Returns the second-level domain, so for example an argument of "a.b.c.d"
# returns "c.d".
function second_level_domain(name: string): string
{
local split_on_dots = split(name, /\./);
local num_dots = length(split_on_dots);
if ( num_dots <= 1 )
return name;
return fmt("%s.%s", split_on_dots[num_dots-1], split_on_dots[num_dots]);
}
function insert_name(c: connection, msg: dns_msg, ans: dns_answer, a: addr)
{
local drr: dns_response_record;
if ( [ans$query, ans$qtype, msg$id] !in dns_history )
{ # add record
drr$dns_name = ans$query;
drr$dns_type = ans$qtype;
# Here we modified the expected number of addresses to allow
# for the number of answer RR's along with the provided
# additional RR's.
drr$num_resp = msg$num_answers+msg$num_addl;
drr$resp_count = 0;
add drr$addrs[a];
dns_history[ans$query, ans$qtype, msg$id] = drr;
if ( ans$TTL < 0 sec )
# Strangely enough, the spec allows this,
# though it's hard to see why! But because
# of that, we don't generate a Weird, we
# just change the TTL to 0.
ans$TTL = 0 sec;
# Check the TTL, but allow a smidgen of skew to avoid
# possible race conditions.
schedule ans$TTL + 1 sec
{ remove_name(ans$query, ans$qtype, msg$id) };
}
else
{ # extract record and do some counting
drr = dns_history[ans$query, ans$qtype, msg$id];
# In some broken records, the number of reported records is 0.
# This makes the test below fail, to 'fix' set to 1 ...
if ( drr$num_resp == 0 )
drr$num_resp = 1;
# Check if we have filled in the expected number of responses
# already - it should be > current responder count to allow
# for resolver timeouts. Addresses are only added if they
# are not already prsent. This comes at a slight performance
# cost.
if ( a !in drr$addrs )
{
add drr$addrs[a];
++drr$resp_count;
dns_history[ans$query, ans$qtype, msg$id]=drr;
}
if ( drr$num_resp >= drr$resp_count )
return;
if ( second_level_domain(ans$query) in bad_domain_resp )
return;
if ( ans$query in bad_host_resp )
return;
# Too many responses to the request, or some other
# inconsistency has been introduced.
NOTICE([$note=ResolverInconsistency, $conn=c,
$msg=fmt("address inconsistency for %s, %s", ans$query, a),
$dst=a]);
}
}
event expire_DNS_session(orig: addr, resp: addr, trans_id: count)
{
if ( [orig, resp, trans_id] in dns_sessions )
{
local session = dns_sessions[orig, resp, trans_id];
local last_active = session$last_active;
if ( network_time() > last_active + dns_session_timeout ||
done_with_network )
{
# Flush out any pending requests.
if ( logging )
{
for ( query in session$pending_queries )
print dns_log, fmt("%0.6f #%d %s",
network_time(), session$id,
session$pending_queries[query]);
print dns_log, fmt("%.06f #%d finish",
network_time(), session$id);
}
delete dns_sessions[orig, resp, trans_id];
}
else
schedule dns_session_timeout {
expire_DNS_session(orig, resp, trans_id)
};
}
}
function lookup_DNS_session(c: connection, trans_id: count): dns_session_info
{
local id = c$id;
local orig = id$orig_h;
local resp = id$resp_h;
if ( [orig, resp, trans_id] !in dns_sessions )
{
local session: dns_session_info;
session$id = ++num_dns_sessions;
session$last_active = network_time();
session$is_zone_transfer = F;
if ( logging )
print dns_log, fmt("%.06f #%d %s start",
c$start_time, session$id, id_string(id));
dns_sessions[orig, resp, trans_id] = session;
schedule 15 sec { expire_DNS_session(orig, resp, trans_id) };
append_addl(c, fmt("#%d", session$id));
return session;
}
else
return dns_sessions[orig, resp, trans_id];
}
event sensitive_addr_lookup(c: connection, a: addr, is_query: bool)
{
local orig = c$id$orig_h;
local resp = c$id$resp_h;
local holding = 0;
if ( orig in okay_to_lookup_sensitive_hosts )
return;
local session_id: string;
if ( [orig, resp, holding] in dns_sessions )
session_id = fmt("#%d", dns_sessions[orig, resp, holding]$id);
else
session_id = "#?";
local id = fmt("%s > %s (%s)", orig, resp, session_id);
if ( is_query )
NOTICE([$note=SensitiveDNS_Lookup, $conn=c,
$msg=fmt("%s PTR lookup of %s", id, a),
$sub="PTR lookup"]);
else
NOTICE([$note=SensitiveDNS_Lookup, $conn=c,
$msg=fmt("%s name lookup of %s", id, a),
$sub="name lookup"]);
}
function DNS_query_annotation(c: connection, msg: dns_msg, query: string,
qtype: count, is_zone_xfer: bool): string
{
local anno: string;
if ( (qtype == PTR || qtype == ANY) && query == PTR_pattern )
{
# convert PTR text to more readable form.
local a = ptr_name_to_addr(query);
if ( a in sensitive_lookup_hosts && ! is_zone_xfer )
event sensitive_addr_lookup(c, a, T);
anno = fmt("?%s %As", query_types[qtype], a);
}
else
anno = fmt("%s %s", query_types[qtype], query);
if ( ! is_zone_xfer &&
(msg$num_answers > 0 || msg$num_auth > 0 || msg$num_addl > 0) )
anno = fmt("%s <query addl = %d/%d/%d>", anno,
msg$num_answers, msg$num_auth, msg$num_addl);
return anno;
}
event dns_zone_transfer_request(c: connection, session: dns_session_info,
msg: dns_msg, query: string)
{
session$is_zone_transfer = T;
if ( ! is_tcp_port(c$id$orig_p) )
event conn_weird("UDP_zone_transfer", c);
local src = c$id$orig_h;
if ( src !in zone_transfers_okay &&
++did_zone_transfer_notice[src] == 1 )
{
NOTICE([$note=ZoneTransfer, $src=src, $conn=c,
$msg=fmt("transfer of %s requested by %s", query, src)]);
}
}
event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count)
{
local id = c$id;
local orig = id$orig_h;
local resp = id$resp_h;
local session = lookup_DNS_session(c, msg$id);
local anno = DNS_query_annotation(c, msg, query, qtype, F);
local report = fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
local q: string;
if ( query_types[qtype] == "AXFR" )
{
event dns_zone_transfer_request(c, session, msg, query);
q = DNS_query_annotation(c, msg, query, qtype, T);
report = fmt("%s ?%s", report, q);
}
else
report = fmt("%s <query ?%s> %s Trunc:%s Recurs:%s",
report, query_types[qtype], query, msg$TC, msg$RD);
if ( logging )
print dns_log, fmt("%s", report);
# Check to see if this is a host or MX lookup for a designated
# hostile domain.
if ( check_domain_list &&
(query_types[qtype] == "A" || query_types[qtype] == "MX") &&
second_level_domain(query) in hostile_domain_list )
{
NOTICE([$note=SensitiveDNS_Lookup, $conn=c,
$msg=fmt("%s suspicious domain lookup: %s", id, query)]);
}
session$pending_queries[msg$id] = anno;
session$last_active = network_time();
}
event dns_rejected(c: connection, msg: dns_msg,
query: string, qtype: count, qclass: count)
{
local session = lookup_DNS_session(c, msg$id);
local code = DNS_code_types[msg$rcode];
local id = msg$id;
if ( id in session$pending_queries )
{
if ( logging )
print dns_log, fmt("%.06f #%d %s %s", network_time(),
session$id,
session$pending_queries[id],
code);
delete session$pending_queries[id];
}
else if ( logging )
{
if ( c$start_time == network_time() )
print dns_log, fmt("%.06f #%d [?%s] %s", network_time(),
session$id, query, code);
else
print dns_log, fmt("%.06f #%d %s", network_time(),
session$id, code);
}
}
event PTR_scan_summary(src: addr)
{
NOTICE([$note=DNS_PTR_Scan_Summary, $src=src,
$msg=fmt("%s totaled %d/%d un/successful PTR lookups", src,
distinct_rejected_PTR_requests[src],
distinct_answered_PTR_requests[src]),
$sub="final summary"]);
}
event PTR_scan(src: addr)
{
++did_PTR_scan_event[src];
if ( src !in allow_PTR_scans && src !in okay_to_lookup_sensitive_hosts )
{
NOTICE([$note=DNS_PTR_Scan, $src=src,
$msg=fmt("%s has made %d/%d un/successful PTR lookups",
src, distinct_rejected_PTR_requests[src],
distinct_answered_PTR_requests[src]),
$sub="scan detected"]);
schedule 1 day { PTR_scan_summary(src) };
}
}
function check_PTR_scan(src: addr)
{
if ( src !in did_PTR_scan_event &&
distinct_rejected_PTR_requests[src] >=
distinct_answered_PTR_requests[src] * report_rejected_PTR_factor )
event PTR_scan(src);
}
function DNS_answer(c: connection, msg: dns_msg,
ans: dns_answer, annotation: string)
{
local is_answer = ans$answer_type == DNS_ANS;
local session = lookup_DNS_session(c, msg$id);
local report =
fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
local id = msg$id;
local query: string;
if ( id in session$pending_queries )
{
query = fmt("%s = <ans %s>", session$pending_queries[id],
query_types[ans$qtype]);
delete session$pending_queries[id];
report = fmt("%s %s", report, query);
}
else if ( session$is_zone_transfer )
{ # need to provide the query directly.
query = fmt("<ans %s>", query_types[ans$qtype]);
report = fmt("%s ?%s", report, query);
}
else
{
# No corresponding query. This can happen if it's
# already been deleted because we've already processed
# an answer to it; or if the session itself was timed
# out prior to this answer being generated. In the
# first case, we don't want to provide the query again;
# in the second, we do. We can determine that we're
# likely in the second case if either (1) this session
# was just now created, or (2) we're now processing the
# sole answer to the original query.
#
# However, for now we punt.
#
# if ( c$start_time == network_time() ||
# (is_answer && msg$num_answers == 1) )
# {
# query = DNS_query_annotation(c, msg, ans$query, ans$qtype, F);
# report = fmt("%s [?%s]", report, query);
# }
# else
# query = "";
query = fmt("<ans %s>", query_types[ans$qtype]);
report = fmt("%s %s", report, query);
}
# Append a bunch of additional annotation.
report = fmt("%s %s RCode:%s AA=%s TR=%s %s/%s/%s/%s",
report, annotation, base_error[msg$rcode], msg$AA, msg$TC,
msg$num_queries, msg$num_answers, msg$num_auth, msg$num_addl );
local src = c$id$orig_h;
if ( msg$rcode != 0 )
{
if ( /\?(PTR|\*.*in-addr).*/ in query )
##### should check for private address
{
if ( ++distinct_PTR_requests[src, query] == 1 &&
++distinct_rejected_PTR_requests[src] >=
report_rejected_PTR_thresh )
check_PTR_scan(src);
}
report = fmt("%s %s", report, DNS_code_types[msg$rcode]);
}
else if ( is_answer )
{
if ( /\?(PTR|\*.*in-addr).*/ in query )
{
if ( annotation in actually_rejected_PTR_anno )
{
if ( ++distinct_PTR_requests[src, query] == 1 &&
++distinct_rejected_PTR_requests[src] >=
report_rejected_PTR_thresh )
check_PTR_scan(src);
}
else
{
if ( ++distinct_PTR_requests[src, query] == 1 )
++distinct_answered_PTR_requests[src];
}
}
}
if ( logging )
print dns_log, fmt("%s TTL=%g", report, ans$TTL);
### Note, DNS_AUTH and DNS_ADDL not processed.
session$last_active = network_time();
}
event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr)
{
if ( a in sensitive_lookup_hosts )
event sensitive_addr_lookup(c, a, F);
DNS_answer(c, msg, ans, fmt("%As", a));
if ( resolver_consist_check )
insert_name(c, msg, ans, a );
}
event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string)
{
DNS_answer(c, msg, ans, fmt("%s", name));
}
event dns_CNAME_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string)
{
DNS_answer(c, msg, ans, fmt("%s %s", query_types[ans$qtype], name));
}
event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string)
{
DNS_answer(c, msg, ans, fmt("%s", name));
}
event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa)
{
DNS_answer(c, msg, ans, fmt("%s", soa$mname));
}
event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string,
preference: count)
{
DNS_answer(c, msg, ans, fmt("%s/%d", name, preference));
}
event dns_EDNS(c: connection, msg: dns_msg, ans: dns_answer)
{
DNS_answer(c, msg, ans, "<---?--->");
}
# From here on down we need to modify the way that data is recorded. The
# standard resource record format is no longer universally applicable in
# that we may see modified structs or some number of value pairs that may take
# more flexability in reporting.
event dns_EDNS_addl(c: connection, msg: dns_msg, ans: dns_edns_additional)
{
local session = lookup_DNS_session(c, msg$id);
local report =
fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
if ( ans$is_query == 1 )
report = fmt("%s <addl_edns ?>", report);
else
report = fmt("%s <addl_edns> ", report);
if ( logging )
print dns_log, fmt("%s pldsize:%s RCode:%s VER:%s Z:%s",
report, ans$payload_size,
base_error[ans$extended_rcode],
ans$version, edns_zfield[ans$z_field]);
}
event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional)
{
local session = lookup_DNS_session(c, msg$id);
local report =
fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
# Error handling with this is a little odd: number collision with EDNS.
# We set the collided value to the first private space number. gross.
local trans_error_num = (ans$rr_error == 16) ? 3842 : ans$rr_error;
if ( ans$is_query == 1 )
report = fmt("%s <addl_tsig ?> ", report);
else
report = fmt("%s <addl_tsig> ", report);
if ( logging )
print dns_log, fmt("%s name:%s alg:%s origID:%s RCode:%s",
report, ans$query, ans$alg_name,
ans$orig_id, base_error[trans_error_num]);
}

View file

@ -1,74 +0,0 @@
# $Id: drop-adapt.bro 6940 2009-11-14 00:38:53Z robin $
#
# Adjust load level based on packet drops.
#
@load load-level
# Increase load-level if packet drops are successively 'count' times
# above 'threshold' percent.
const drop_increase_count = 5 &redef;
const drop_increase_threshold = 5.0 &redef;
# Same for decreasing load-level.
const drop_decrease_count = 15 &redef;
const drop_decrease_threshold = 0.0 &redef;
# Minimum time to wait after a load-level increase before new decrease.
const drop_decrease_wait = 20 mins &redef;
global drop_last_stat: net_stats;
global drop_have_stats = F;
global drop_above = 0;
global drop_below = 0;
global drop_last_increase: time = 0;
event net_stats_update(t: time, ns: net_stats)
{
if ( drop_have_stats )
{
local new_recvd = ns$pkts_recvd - drop_last_stat$pkts_recvd;
local new_dropped =
ns$pkts_dropped - drop_last_stat$pkts_dropped;
local p = new_dropped * 100.0 / new_recvd;
drop_last_stat = ns;
if ( p >= 0 )
{
if ( p >= drop_increase_threshold )
{
if ( ++drop_above >= drop_increase_count )
{
increase_load_level();
drop_above = 0;
drop_last_increase = t;
}
}
else
drop_above = 0;
if ( t - drop_last_increase < drop_decrease_wait )
return;
if ( p <= drop_decrease_threshold )
{
if ( ++drop_below >= drop_decrease_count )
{
decrease_load_level();
drop_below = 0;
}
}
else
drop_below = 0;
}
}
else
{
drop_have_stats = T;
drop_last_stat = ns;
}
}

View file

@ -1,340 +0,0 @@
# $Id:$
#
# drop.bro implements a drop/restore policy termed "catch-and-release"
# whereby the first time an address is dropped, it is restored a while after
# the last connection attempt seen. If a connection attempt is subsequently
# seen, however, then the system is blocked again, and for a longer time.
#
# This policy has significant benefits when using Bro to update router
# ACLs for which:
# - The router has a limited number of ACLs slots.
# - You care about possible reuse of IP addresses by now-benign hosts,
# so don't want blocks to last forever.
#
# Original code by Jim Mellander, LBNL.
# Updated by Brian Tierney, LBNL and by Robin Sommer, ICSI.
@load site
module Drop;
export {
redef enum Notice += {
# Connectivity with given address has been dropped.
AddressDropped,
# A request to drop connectivity has been ignored.
AddressDropIgnored,
# Connectivity with given address has been restored.
AddressRestored,
AddressAlreadyDropped, # host is already dropped
# Previously dropped host connects again.
AddressSeenAgain,
# Previous offenders re-dropped or re-restored.
RepeatAddressDropped,
RepeatAddressRestored,
};
# True if we have the capability to drop hosts at all.
const can_drop_connectivity = F &redef;
# True if we never want to drop local addresses.
const dont_drop_locals = T &redef;
# True if we should use the catch-and-release scheme. If not then
# we simply drop addresses via the drop_connectivity_script and
# never restore them (they must be restored out-of-band).
const use_catch_release = F &redef;
# Catch-and-release parameters.
# Interval to wait for release following inactivity after
# first offense.
global drop_time = 5 min &redef;
# For repeat offenders: if the total time a host has already been
# dropped reaches persistent_offender_time, we drop the host for
# long_drop_time. Setting persistent_offender_time to zero disables
# this functionality.
const persistent_offender_time = 2 hr &redef;
global long_drop_time = 12 hr &redef;
# Scripts to perform the actual dropping/restore. They get the
# IP address as their first argument.
const drop_connectivity_script = "drop-connectivity" &redef;
const restore_connectivity_script = "restore-connectivity" &redef;
const root_servers = {
a.root-servers.net, b.root-servers.net, c.root-servers.net,
d.root-servers.net, e.root-servers.net, f.root-servers.net,
g.root-servers.net, h.root-servers.net, i.root-servers.net,
j.root-servers.net, k.root-servers.net, l.root-servers.net,
m.root-servers.net,
} &redef;
const gtld_servers = {
a.gtld-servers.net, b.gtld-servers.net, c.gtld-servers.net,
d.gtld-servers.net, e.gtld-servers.net, f.gtld-servers.net,
g.gtld-servers.net, h.gtld-servers.net, i.gtld-servers.net,
j.gtld-servers.net, k.gtld-servers.net, l.gtld-servers.net,
m.gtld-servers.net,
} &redef;
const never_shut_down = {
root_servers, gtld_servers,
} &redef;
const never_drop_nets: set[subnet] &redef;
# Drop the connectivity for the address. "msg" gives a reason.
# It returns a copy of the NOTICE generated for the drop, which
# gives more information about the kind of dropping performed.
# If the notice type is NoticeNone, the drop was not successful
# (e.g., because this Bro instance is not configured to do drops.)
global drop_address: function(a: addr, msg: string) : notice_info;
# The following events are used to communicate information about the
# drops, in particular for C&R in the cluster setting.
# Address has been dropped.
global address_dropped: event(a: addr);
# Raised when an IP is restored.
global address_restored: event(a: addr);
# Raised when an that was dropped in the past is no
# longer monitored specifically for new connections.
global address_cleared: event(a: addr);
const debugging = F &redef;
global debug_log: function(msg: string);
}
type drop_rec: record {
tot_drop_count: count &default=0;
tot_restore_count: count &default=0;
actual_restore_count: count &default=0;
tot_drop_time: interval &default=0secs;
last_timeout: interval &default=0secs;
};
global clear_host: function(t: table[addr] of drop_rec, a: addr): interval;
global drop_info: table[addr] of drop_rec
&read_expire = 1 days &expire_func=clear_host &persistent;
global last_notice: notice_info;
function do_notice(n: notice_info)
{
last_notice = n;
NOTICE(n);
}
function dont_drop(a: addr) : bool
{
return ! can_drop_connectivity || a in never_shut_down ||
a in never_drop_nets || (dont_drop_locals && is_local_addr(a));
}
function is_dropped(a: addr) : bool
{
if ( a !in drop_info )
return F;
local di = drop_info[a];
if ( di$tot_drop_count < di$tot_restore_count )
{ # This shouldn't happen.
# FIXME: We need an assert().
print "run-time error: more restores than drops!";
return F;
}
return di$tot_drop_count > di$tot_restore_count;
}
global debug_log_file: file;
function debug_log(msg: string)
{
if ( ! debugging )
return;
print debug_log_file,
fmt("%.6f [%s] %s", network_time(), peer_description, msg);
}
event bro_init()
{
if ( debugging )
{
debug_log_file =
open_log_file(fmt("drop-debug.%s", peer_description));
set_buf(debug_log_file, F);
}
}
function do_direct_drop(a: addr, msg: string)
{
if ( msg != "" )
msg = fmt(" (%s)", msg);
if ( a !in drop_info )
{
local tmp: drop_rec;
drop_info[a] = tmp;
}
local di = drop_info[a];
if ( is_dropped(a) )
# Already dropped. Nothing to do.
do_notice([$note=Drop::AddressAlreadyDropped, $src=a,
$msg=fmt("%s%s", a, msg)]);
else
{
system(fmt("%s %s", Drop::drop_connectivity_script, a));
debug_log(fmt("sending drop for %s", a));
event Drop::address_dropped(a);
if ( di$tot_drop_count == 0 )
do_notice([$note=Drop::AddressDropped, $src=a,
$msg=fmt("%s%s", a, msg)]);
else
{
local s = fmt("(%d times)", di$tot_drop_count + 1);
do_notice([$note=Drop::RepeatAddressDropped,
$src=a, $n=di$tot_drop_count+1,
$msg=fmt("%s%s %s", a, msg, s), $sub=s]);
}
}
++di$tot_drop_count;
debug_log(fmt("dropped %s: tot_drop_count=%d tot_restore_count=%d",
a, di$tot_drop_count, di$tot_restore_count));
}
# Restore a previously dropped address.
global do_restore: function(a: addr, force: bool);
event restore_dropped_address(a: addr)
{
do_restore(a, F);
}
function do_catch_release_drop(a: addr, msg: string)
{
do_direct_drop(a, msg);
local di = drop_info[a];
local t = (persistent_offender_time != 0 sec &&
di$tot_drop_time >= persistent_offender_time) ?
long_drop_time : drop_time;
di$tot_drop_time += t;
di$last_timeout = t;
schedule t { restore_dropped_address(a) };
}
function do_restore(a: addr, force: bool)
{
if ( a !in drop_info )
return;
local di = drop_info[a];
++drop_info[a]$tot_restore_count;
debug_log(fmt("restored %s: tot_drop_count=%d tot_restore_count=%d force=%s", a, drop_info[a]$tot_drop_count, drop_info[a]$tot_restore_count, force));
if ( di$tot_drop_count == di$tot_restore_count || force )
{
++di$actual_restore_count;
system(fmt("%s %s", Drop::restore_connectivity_script, a));
debug_log(fmt("sending restored for %s", a));
event Drop::address_restored(a);
local t = di$last_timeout;
if ( di$actual_restore_count == 1 )
{
local s1 = fmt("(timeout %.1f)", t);
do_notice([$note=Drop::AddressRestored, $src=a,
$msg=fmt("%s %s", a, s1), $sub=s1]);
}
else
{
local s2 = fmt("(%d times, timeout %.1f)",
di$actual_restore_count, t);
do_notice([$note=Drop::RepeatAddressRestored, $src=a,
$n=di$tot_restore_count,
$msg=fmt("%s %s", a, s2), $sub=s2]);
}
}
}
function clear_host(t: table[addr] of drop_rec, a: addr): interval
{
if ( is_dropped(a) )
# Restore address.
do_restore(a, T);
debug_log(fmt("sending cleared for %s", a));
event Drop::address_cleared(a);
return 0 secs;
}
# Returns true if drop was successful (or IP was already dropped).
function drop_address(a: addr, msg: string) : notice_info
{
debug_log(fmt("drop_address(%s, %s)", a, msg));
last_notice = [$note=NoticeNone];
if ( dont_drop(a) )
do_notice([$note=AddressDropIgnored, $src=a,
$msg=fmt("ignoring request to drop %s (%s)", a, msg)]);
else if ( use_catch_release )
do_catch_release_drop(a, msg);
else
do_direct_drop(a, msg);
if ( last_notice$note == NoticeNone )
print "run-time error: drop_address did not raise a NOTICE";
return last_notice;
}
event new_connection(c: connection)
{
if ( ! can_drop_connectivity )
return;
# With Catch & Release, 1 connection from a previously dropped system
# triggers an immediate redrop.
if ( ! use_catch_release )
return;
local a = c$id$orig_h;
if ( a !in drop_info )
# Never dropped.
return;
local di = drop_info[a];
if ( is_dropped(a) )
# Still dropped.
return;
NOTICE([$note=AddressSeenAgain, $src=a,
$msg=fmt("%s seen again after release", a)]);
}

View file

@ -1,53 +0,0 @@
# $Id: dyn-disable.bro,v 1.1.4.3 2006/05/31 01:52:02 sommer Exp $
#
# When this script is loaded, analyzers that raise protocol_violation events
# are disabled for the affected connection.
# Note that this a first-shot solution. Eventually, we should make the
# disable-decision more fine-grained/sophisticated.
@load conn
@load notice
module DynDisable;
export {
redef enum Notice += {
ProtocolViolation
};
# Ignore violations which go this many bytes into the connection.
const max_volume = 10 * 1024 &redef;
}
global conns: table[conn_id] of set[count];
event protocol_violation(c: connection, atype: count, aid: count,
reason: string)
{
if ( c$id in conns && aid in conns[c$id] )
return;
local size = c$orig$size + c$resp$size;
if ( max_volume > 0 && size > max_volume )
return;
# Disable the analyzer that raised the last core-generated event.
disable_analyzer(c$id, aid);
NOTICE([$note=ProtocolViolation, $conn=c,
$msg=fmt("%s analyzer %s disabled due to protocol violation",
id_string(c$id), analyzer_name(atype)),
$sub=reason, $n=atype]);
if ( c$id !in conns )
conns[c$id] = set();
add conns[c$id][aid];
}
event connection_state_remove(c: connection)
{
delete conns[$id=c$id];
}

View file

@ -1,18 +0,0 @@
# $Id: file-flush.bro 786 2004-11-24 08:25:16Z vern $
# Causes all files to be flushed every file_flush_interval seconds.
# Useful if you want to poke through the log files in real time,
# particularly if network traffic is light.
global file_flush_interval = 10 sec &redef;
event file_flush_event()
{
flush_all();
schedule file_flush_interval { file_flush_event() };
}
event bro_init()
{
schedule file_flush_interval { file_flush_event() };
}

View file

@ -1,69 +0,0 @@
# $Id: finger.bro 4758 2007-08-10 06:49:23Z vern $
module Finger;
export {
const hot_names = {
"root", "lp", "uucp", "nuucp", "demos", "operator", "sync",
"r00t", "tutor", "tour", "admin", "system", "guest", "visitor",
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
} &redef;
const max_finger_request_len = 80 &redef;
}
redef capture_filters += { ["finger"] = "port finger" };
# DPM configuration.
global finger_ports = { 79/tcp } &redef;
redef dpd_config += { [ANALYZER_FINGER] = [$ports = finger_ports] };
function public_user(user: string): bool
{
return T;
}
function authorized_client(host: addr): bool
{
return T;
}
event finger_request(c: connection, full: bool, username: string, hostname: string)
{
local id = c$id;
local request: string;
if ( hostname != "" )
request = cat(username, "@", hostname);
else
request = username;
if ( byte_len(request) > max_finger_request_len )
{
request = fmt("%s...", sub_bytes(request, 1, max_finger_request_len));
++c$hot;
}
if ( hostname != "" )
++c$hot;
if ( username in hot_names )
++c$hot;
local req = request == "" ? "ALL" : fmt("\"%s\"", request);
if ( full )
req = fmt("%s (/W)", req);
if ( c$addl != "" )
# This is an additional request.
req = fmt("(%s)", req);
append_addl_marker(c, req, " *");
}
function is_finger_conn(c: connection): bool
{
return c$id$resp_p == finger;
}

View file

@ -1,195 +0,0 @@
# $Id: firewall.bro 4758 2007-08-10 06:49:23Z vern $
#
# Firewall-like rules.
@load notice
@load conn
@load ftp
module Firewall;
export {
type action: enum { ALLOW, DENY };
type cmp: enum { EQ, NE };
type rule: record {
label: string &default = "<no-label>";
orig: subnet &default = 0.0.0.0/0;
orig_set: set[addr] &optional;
orig_cmp: cmp &default = EQ;
orig_p: port &default = 0/tcp;
orig_p_cmp: cmp &default = EQ;
resp: subnet &default = 0.0.0.0/0;
resp_set: set[addr] &optional;
resp_cmp: cmp &default = EQ;
resp_p: port &default = 0/tcp;
resp_p_cmp: cmp &default = EQ;
prot: transport_proto &default = unknown_transport;
prot_cmp: cmp &default = EQ;
state: string &default = "";
state_cmp: cmp &default = EQ;
is_ftp: bool &default = F;
action: action &default = ALLOW;
};
redef enum Notice += {
DenyRuleMatched
};
global begin: function(c: connection);
global match_rule: function(c: connection, r: rule);
}
const log_file = open_log_file("firewall") &redef;
global stop_matching = F;
function do_match(c: connection, r: rule): bool
{
if ( r$orig_cmp == EQ )
{
if ( r?$orig_set )
{
if ( c$id$orig_h !in r$orig_set && c$id$orig_h !in r$orig )
return F;
}
else
{
if ( c$id$orig_h !in r$orig )
return F;
}
}
else
{
if ( r?$orig_set )
{
if ( c$id$orig_h in r$orig_set || c$id$orig_h in r$orig )
return F;
}
else
{
if ( c$id$orig_h in r$orig )
return F;
}
}
if ( r$resp_cmp == EQ )
{
if ( r?$resp_set )
{
if ( c$id$resp_h !in r$resp_set && c$id$resp_h !in r$resp )
return F;
}
else
{
if ( c$id$resp_h !in r$resp )
return F;
}
}
else
{
if ( r?$resp_set )
{
if ( c$id$resp_h in r$resp_set || c$id$resp_h in r$resp )
return F;
}
else
{
if ( c$id$resp_h in r$resp )
return F;
}
}
if ( r$orig_p != 0/tcp )
{
if ( r$orig_p_cmp == EQ )
{
if ( c$id$orig_p != r$orig_p )
return F;
}
else
if ( c$id$orig_p == r$orig_p )
return F;
}
if ( r$resp_p != 0/tcp )
{
if ( r$resp_p_cmp == EQ )
{
if ( c$id$resp_p != r$resp_p )
return F;
}
else
if ( c$id$resp_p == r$resp_p )
return F;
}
if ( r$state != "" )
{
local state = conn_state(c, get_port_transport_proto(c$id$orig_p));
if ( r$state_cmp == EQ )
{
if ( state != r$state )
return F;
}
else
if ( state == r$state )
return F;
}
if ( r$prot != unknown_transport )
{
local proto = get_port_transport_proto(c$id$orig_p);
if ( r$prot_cmp == EQ )
{
if ( proto != r$prot )
return F;
}
else
if ( proto == r$prot )
return F;
}
if ( r$is_ftp && ! FTP::is_ftp_data_conn(c) )
return F;
return T;
}
function report_violation(c: connection, r:rule)
{
local trans = get_port_transport_proto(c$id$orig_p);
local state = conn_state(c, trans);
NOTICE([$note=DenyRuleMatched,
$msg=fmt("%s %s",
id_string(c$id), trans), $conn=c, $sub=r$label]);
append_addl(c, fmt("<%s>", r$label));
record_connection(log_file, c);
}
function begin(c: connection)
{
stop_matching = F;
}
function match_rule(c: connection, r: rule)
{
if ( stop_matching )
return;
if ( do_match(c, r) )
{
stop_matching = T;
if ( r$action == DENY )
report_violation(c, r);
}
}
event bro_init()
{
set_buf(log_file, F);
}

View file

@ -1,18 +0,0 @@
# $Id: flag-irc.bro 4758 2007-08-10 06:49:23Z vern $
#
# include this module to flag various forms of IRC access.
@load ftp
redef FTP::hot_files +=
/.*eggdrop.*/
| /.*eggsun.*/
;
redef Hot::flag_successful_inbound_service: table[port] of string += {
[[6666/tcp, 6667/tcp]] = "inbound IRC",
};
redef Hot::hot_dsts: table[addr] of string += {
[bitchx.com] = "IRC source sites",
};

View file

@ -1,11 +0,0 @@
# $Id: flag-warez.bro 416 2004-09-17 03:52:28Z vern $
#
# include this module to flag various forms of Warez access.
@load hot-ids
@load ftp
redef FTP::hot_files += /.*[wW][aA][rR][eE][zZ].*/ ;
redef always_hot_ids += { "warez", "hanzwarez", "zeraw", };
redef hot_ids += { "warez", "hanzwarez", "zeraw", };

View file

@ -1,6 +0,0 @@
# Capture TCP fragments, but not UDP (or ICMP), since those are a lot more
# common due to high-volume, fragmenting protocols such as NFS :-(.
redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" };
redef frag_timeout = 5 min;

File diff suppressed because it is too large Load diff

View file

@ -1,61 +0,0 @@
# $Id: gnutella.bro 4017 2007-02-28 07:11:54Z vern $
redef capture_filters += { ["gnutella"] = "port 6346 or port 8436" };
global gnutella_ports = { 6346/tcp, 8436/tcp } &redef;
redef dpd_config += { [ANALYZER_GNUTELLA] = [$ports = gnutella_ports] };
event gnutella_text_msg(c: connection, orig: bool, headers: string)
{
if ( orig )
print fmt("gnu txt %s -> %s %s", c$id$orig_h, c$id$resp_h, headers);
else
print fmt("gnu txt %s -> %s %s", c$id$resp_h, c$id$orig_h, headers);
}
event gnutella_binary_msg(c: connection, orig: bool, msg_type: count,
ttl: count, hops: count, msg_len: count,
payload: string, payload_len: count,
trunc: bool, complete: bool)
{
local s = "";
if ( orig )
s = fmt("gnu bin %s -> %s", c$id$orig_h, c$id$resp_h);
else
s = fmt("gnu bin %s -> %s", c$id$resp_h, c$id$orig_h);
print fmt("%s %d %d %d %d %d %d %d %s",
s, msg_type, ttl, hops, msg_len,
trunc, complete, payload_len, payload);
}
event gnutella_partial_binary_msg(c: connection, orig: bool,
msg: string, len: count)
{
if ( orig )
print fmt("gnu pbin %s -> %s", c$id$orig_h, c$id$resp_h);
else
print fmt("gnu pbin %s -> %s", c$id$resp_h, c$id$orig_h);
}
event gnutella_establish(c: connection)
{
print fmt("gnu est %s <-> %s", c$id$orig_h, c$id$resp_h);
}
event gnutella_not_establish(c: connection)
{
print fmt("gnu !est %s <-> %s", c$id$orig_h, c$id$resp_h);
}
event gnutella_http_notify(c: connection)
{
print fmt("gnu http %s/%s <-> %s/%s", c$id$orig_h, c$id$orig_p,
c$id$resp_h, c$id$resp_p);
}

View file

@ -1,144 +0,0 @@
# $Id: hand-over.bro 617 2004-11-02 00:54:31Z scottc $
#
# Hand-over between two instances of Bro.
@load remote
# The host from which we want to take over the state has to be
# added to remote_peers_{clear,ssl}, setting hand_over to T.
#
# The host which we want to allow to perform a hand-over with us
# has to be added to remote_peers with a port of 0/tcp and
# hand_over = T.
function is_it_us(host: addr, p: port): bool
{
@ifdef ( listen_if_clear )
if ( is_local_interface(host) && p == listen_port_clear )
return T;
@endif
@ifdef ( listen_if_ssl )
if ( is_local_interface(host) && p == listen_port_ssl )
return T;
@endif
return F;
}
function is_handover_peer(p: event_peer): bool
{
local peer: Remote::Destination;
if ( p$id in Remote::pending_peers )
peer = Remote::pending_peers[p$id];
else
return F;
return peer$hand_over;
}
function handover_start_processing()
{
uninstall_src_net_filter(0.0.0.0/0);
}
event bro_init()
{
# Disable packet processing.
install_src_net_filter(0.0.0.0/0, 0, 100);
# Reporter::message("waiting for hand-over - packet processing disabled.");
}
event remote_connection_error(p: event_peer, reason: string)
{
if ( is_remote_event() || ! ( p$id in Remote::connected_peers) )
return;
# Seems that the other side in not running.
# Reporter::error("can't connect for hand-over - starting processing ...");
handover_start_processing();
}
event remote_connection_established(p: event_peer)
{
if ( is_remote_event() )
return;
# If [p$id] is defined in Remote::connected_peers and p != 0, we have connected
# to the host.
if ( p$p != 0/tcp &&
([p$id] in Remote::connected_peers ) )
{
if ( ! is_handover_peer(p) )
return;
# Reporter::message(fmt("requesting hand-over from %s:%d", p$host, p$p));
request_remote_events(p, /handover_.*|finished_send_state/);
# Give the remote side some time to register its handlers.
schedule 3 secs { handover_request(p$host, p$p) };
return;
}
# If the other side connected to us, we will allow the hand-over
# if the remote host is defined as a hand-over host in remote_peers.
if ( is_handover_peer(p) )
{
# Reporter::message(fmt("allowing hand-over from %s:%d", p$host, p$p));
request_remote_events(p, /handover_.*|finished_send_state/);
}
}
event handover_send_state(p: event_peer)
{
if ( is_remote_event() )
return;
# There may be a serialization in progress in which case
# we will have to try again.
if ( ! send_state(p) )
{
# Reporter::message("can't send state; serialization in progress");
schedule 5 secs { handover_send_state(p$host, p$p) };
}
}
event handover_request(p: event_peer)
{
# Make sure the event is for us.
if ( ! (is_remote_event() && is_it_us(p$host, p$p)) )
return;
# Send state to other side.
schedule 1 sec { handover_send_state(p) };
}
event finished_send_state(p: event_peer)
{
# We will get this event from the remote side.
# Make sure it's indeed for us.
if ( ! is_remote_event() )
return;
if ( ! is_handover_peer(p) )
return;
#Reporter::message(fmt("full state received from %s:%d - starting processing ...",
# p$host, p$p));
event handover_got_state(p);
# Start processing.
handover_start_processing();
}
event handover_got_state(p: event_peer)
{
# Make sure the event is for us.
if ( ! (is_remote_event() && is_it_us(p$host, p$p)) )
return;
# Reporter::message(fmt("%s:%d received our state - terminating", p$host, p$p));
terminate();
}

View file

@ -1,26 +0,0 @@
# $Id: heavy-analysis.bro 2771 2006-04-18 23:53:09Z vern $
#
# Loading this files enables somewhat more accurate, yet also significantly
# more expensive, analysis (in terms of memory as well as CPU time).
#
# This script only sets core-level options. Script-level timeouts are
# adjusted in heavy.*.bro, loaded via Bro's prefix mechanism. To make this
# work, the prefix has to be set *before* reading other scripts, either by
# loading this script first of all, or by manually putting a @prefix
# at the start of Bro's configuration.
@prefixes += heavy
redef tcp_SYN_timeout = 120 secs;
redef tcp_session_timer = 30 secs;
redef tcp_connection_linger = 30 secs;
redef tcp_attempt_delay = 300 secs;
redef tcp_close_delay = 15 secs;
redef tcp_reset_delay = 15 secs;
redef tcp_partial_close_delay = 10 secs;
redef max_timer_expires = 32;
redef tcp_inactivity_timeout = 2 hrs;
redef udp_inactivity_timeout = 1 hrs;
redef icmp_inactivity_timeout = 1 hrs;

View file

@ -1,4 +0,0 @@
# $Id: heavy.irc.bro 4723 2007-08-07 18:14:35Z vern $
redef active_users &persistent &read_expire = 1 days;
redef active_channels &persistent &read_expire = 1 days;

View file

@ -1,6 +0,0 @@
# $Id: heavy.scan.bro 4758 2007-08-10 06:49:23Z vern $
redef distinct_peers &create_expire = 10 hrs;
redef distinct_ports &create_expire = 10 hrs;
redef distinct_low_ports &create_expire = 10 hrs;
redef possible_scan_sources &create_expire = 10 hrs;

View file

@ -1,3 +0,0 @@
# $Id: heavy.software.bro 2771 2006-04-18 23:53:09Z vern $
redef only_report_local = F;

View file

@ -1,8 +0,0 @@
# $Id: heavy.trw.bro 4723 2007-08-07 18:14:35Z vern $
redef TRW::scan_sources &write_expire = 1 day;
redef TRW::benign_sources &write_expire = 1 day;
redef TRW::failed_locals &write_expire = 12 hrs;
redef TRW::successful_locals &write_expire = 12 hrs;
redef TRW::lambda &write_expire = 12 hrs;
redef TRW::num_scanned_locals &write_expire = 12 hrs;

View file

@ -1,29 +0,0 @@
# @(#) $Id: hot-ids.bro 785 2004-11-24 05:56:06Z rwinslow $ (LBL)
# If these ids are seen, the corresponding connection is terminated.
const forbidden_ids = {
"uucp", "daemon", "rewt", "nuucp",
"EZsetup", "OutOfBox", "4Dgifts",
"ezsetup", "outofbox", "4dgifts", "sgiweb",
"r00t", "ruut", "bomb", "backdoor",
"bionic", "warhead", "check_mate", "checkmate", "check_made",
"themage", "darkmage", "y0uar3ownd", "netfrack", "netphrack",
} &redef;
const forbidden_ids_if_no_password = { "lp" } &redef;
const forbidden_id_patterns = /(y[o0]u)(r|ar[e3])([o0]wn.*)/ &redef;
const always_hot_ids = {
"sync", "tutor", "tour",
"retro", "milk", "moof", "own", "gdm", "anacnd",
"lp", "demos", forbidden_ids,
} &redef;
# The ones here that aren't in always_hot_ids are only hot upon
# success.
const hot_ids = {
"root", "system", "smtp", "sysadm", "diag", "sysdiag", "sundiag",
"operator", "sys", "toor", "issadmin", "msql", "sysop", "sysoper",
"wank", always_hot_ids,
} &redef;

View file

@ -1,160 +0,0 @@
# $Id: hot.bro 7057 2010-07-19 23:22:19Z vern $
@load site
@load port-name
@load notice
@load terminate-connection
module Hot;
export {
# True if it should be considered a spoofing attack if a connection has
# the same local net for source and destination.
const same_local_net_is_spoof = F &redef;
const allow_spoof_services = {
110/tcp, # pop-3
139/tcp, # netbios-ssn
} &redef;
# Indexed by source address and destination address.
const allow_pairs: set[addr, addr] &redef;
const hot_srcs: table[addr] of string = {
# [ph33r.the.eleet.com] = "kidz",
} &redef;
const hot_dsts: table[addr] of string = {
[206.101.197.226] = "ILOVEYOU worm destination",
} &redef;
const allow_services = {
ssh, http, gopher, ident, smtp, 20/tcp,
53/udp, # DNS queries
123/udp, # NTP
} &redef;
const allow_services_to: set[addr, port] &redef;
const allow_services_from: set[addr, port] &redef;
const allow_service_pairs: set[addr, addr, port] &redef;
const flag_successful_service: table[port] of string = {
[[31337/tcp]] = "popular backdoors",
} &redef;
const flag_successful_inbound_service: table[port] of string = {
[1524/tcp] = "popular backdoor, but with false hits outbound",
} &redef;
const terminate_successful_inbound_service: table[port] of string &redef;
const flag_rejected_service: table[port] of string &redef;
# Different values to hand to check_hot() at different stages in
# a connection's lifetime.
const CONN_ATTEMPTED = 1;
const CONN_ESTABLISHED = 2;
const APPL_ESTABLISHED = 3;
const CONN_FINISHED = 4;
const CONN_REJECTED = 5;
const CONN_TIMEOUT = 6;
const CONN_REUSED = 7;
global check_hot: function(c: connection, state: count): bool;
global check_spoof: function(c: connection): bool;
}
# An internal function used by check_hot.
function do_hot_check(c: connection, a: addr, t: table[addr] of string)
{
if ( a in t )
{
++c$hot;
local hot_msg = fmt("<%s>", t[a]);
append_addl(c, hot_msg);
}
}
function check_spoof(c: connection): bool
{
local orig = c$id$orig_h;
local resp = c$id$resp_h;
local service = c$id$resp_p;
if ( is_local_addr(orig) && is_local_addr(resp) &&
service !in allow_spoof_services )
{
if ( c$id$orig_p == service && orig == resp )
event conn_weird("Land_attack", c, "");
if ( same_local_net_is_spoof )
++c$hot;
}
return c$hot != 0;
}
function check_hot(c: connection, state: count): bool
{
local id = c$id;
local service = id$resp_p;
if ( service in allow_services || "ftp-data" in c$service )
return F;
if ( state == CONN_ATTEMPTED )
check_spoof(c);
else if ( state == CONN_REJECTED )
{
check_spoof(c);
if ( service in flag_rejected_service )
++c$hot;
}
else if ( state == CONN_ESTABLISHED )
{
check_spoof(c);
local inbound = is_local_addr(id$resp_h);
if ( (service in flag_successful_service ||
(inbound &&
service in flag_successful_inbound_service)) &&
([id$resp_h, id$resp_p] !in allow_services_to ||
[id$orig_h, id$resp_p] !in allow_services_from) )
{
if ( inbound &&
service in terminate_successful_inbound_service )
TerminateConnection::terminate_connection(c);
++c$hot;
if ( service in flag_successful_service )
append_addl(c, flag_successful_service[service]);
else
append_addl(c, flag_successful_inbound_service[service]);
}
}
else if ( state == APPL_ESTABLISHED ||
((state == CONN_FINISHED || state == CONN_TIMEOUT ||
state == CONN_REUSED) &&
service != telnet && c$orig$size > 0 && c$resp$size > 0) )
{
# Connection established and has a non-trivial size.
local orig = c$id$orig_h;
local resp = c$id$resp_h;
if ( [resp, service] in allow_services_to ||
[orig, service] in allow_services_from ||
[orig, resp, service] in allow_service_pairs ||
[orig, resp] in allow_pairs )
return F;
do_hot_check(c, resp, hot_srcs);
do_hot_check(c, resp, hot_dsts);
}
return c$hot != 0;
}

View file

@ -1,54 +0,0 @@
# $Id: http-abstract.bro 47 2004-06-11 07:26:32Z vern $
@load http
@load http-entity
module HTTP;
export {
const abstract_max_length = 512 &redef;
}
redef http_entity_data_delivery_size = 4096;
redef include_HTTP_abstract = T;
function skip_abstract(c: connection, is_orig: bool, msg: http_message)
{
msg$skip_abstract = T;
if ( ! process_HTTP_data )
skip_http_entity_data(c, is_orig);
}
event http_content_type(c: connection, is_orig: bool, ty: string, subty: string)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
if ( msg$entity_level == 1 && ty == "TEXT" )
# Do not skip the body in this case.
return;
skip_abstract(c, is_orig, msg);
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
if ( msg$skip_abstract )
return;
local len = byte_len(data);
if ( len > abstract_max_length )
msg$abstract = sub_bytes(data, 1, abstract_max_length);
else
msg$abstract = data;
skip_abstract(c, is_orig, msg);
# print http_log, fmt("%.6f %s %s %d bytes: \"%s\"",
# network_time(), s$id,
# is_orig ? "=>" : "<=", byte_len(msg$abstract),
# msg$abstract);
}

View file

@ -1,209 +0,0 @@
# $Id:$
# Anonymize values in Server: headers.
#
# TODO:
#
# - Zedo and IBM web servers can have Apache mods -- the parsing should
# be extended to support them
#
@load anon
@load http-anon-utils
# ---------------------------------------------------------------------
# Apache (and friends)
# - abandon all hope ye who enter here .....
# ---------------------------------------------------------------------
const apache_server =
/apache(-ish)?(\/([0-9]+\.)*[0-9]+)? *(\(?(red hat( linux)?|cobalt|suse\/linux|linux\/suse|darwin|gentoo\/linux|debian gnu\/linux|win32|fedora|freebsd|red-hat\/linux|unix)\)? *)*/;
const apache_mod_pat =
/mod_fastcgi\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /openssl\/([0-9]+\.)*[0-9a-z]{1,4}(-beta[0-9]{0,2})?/
| /dav\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /php-cgi\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /ben-ssl\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /embperl\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_ruby\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /nexadesic\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /postgresql\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_tsunami\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_svn\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_mda\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /rus\/pl(([0-9]+\.)*[0-9]{1,4})/
| /authmysql\/(([0-9]+\.)*[0-9]{1,4})/
| /mod_auth_pgsql\/(([0-9]+\.)*[0-9]{1,4})/
| /mod_ssl\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /php\/(([0-9]+\.)*[0-9a-z]{1,4})(-[0-9]+)?/
| /mod_perl\/(([0-9]+\.)*[0-9a-z]{1,4})(\_[0-9]+)?/
| /mod_macro\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_pam\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_oas\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_cap\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /powweb\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_gzip\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /resin\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_jk\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /python\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /perl\/(v)?(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_python\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_log_bytes\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_passthrough\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_bwlimited\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_throttle\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_webapp\/(([0-9]+\.)*[0-9a-z]{1,4})(-dev)?/
| /frontpage\/(([0-9]+\.)*[0-9a-z]{1,5})/
| /mod_pubcookie\/[0-9a-z]{2}\/[0-9]+\.[0-9]+\-[0-9]+/
| /(-)?coyote\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /svn\/(([0-9]+\.)*[0-9a-z]{1,4})/
;
# Various Apache variants (e.g., stronghold).
const apache_misc =
/stronghold\/(([0-9]+\.)*[0-9]+) apache(\/([0-9]+\.)*[0-9]+)? (c2neteu\/[0-9])? *(\(?(red hat( linux)?|cobalt|suse\/linux|linux\/suse|darwin|gentoo\/linux|debian gnu\/linux|win32|fedora|freebsd|red-hat\/linux|unix)\)? *)*/;
const apache_basic = /apache?(\/([0-9]+\.)*[0-9]+)?/;
const apache_platforms =
/(\(?(red hat( linux)?|cobalt|suse\/linux|linux\/suse|darwin|gentoo\/linux|debian gnu\/linux|win32|fedora|freebsd|red-hat\/linux|unix)\)? *)*/;
# ibm_http_server/1.3.26.2, apache/1.3.26 (unix).
const IBM_server =
/ibm_http_server(\/[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)?( *apache\/[0-9]+\.[0-9]+\.[0-9]+ \(unix\))?/;
# ---------------------------------------------------------------------
# Servers values for which we don't retain all values.
# ---------------------------------------------------------------------
const zope_server =
/zope\/\(zope ([0-9]+\.)*[0-9]+-[a-z0-9]{1,2}\, python ([0-9]+\.)*[0-9]+\, linux[0-9]\)/;
const thttp_server = /thttpd\/[0-9]+\.[0-9]+(beta[0-9]+)?/;
const weblogic_server = /weblogic server [0-9]+\.[0-9]+/;
const zedo_server = /zedo 3g(\/([0-9]+\.)*[0-9]+)?/;
const jetty_server = /jetty\/[0-9]+\.[0-9]+/;
# ---------------------------------------------------------------------
# Misc Servers
# ---------------------------------------------------------------------
const misc_server =
/dclk creative/
| /gws\/[0-9]+\.[0-9]+/
| /nfe\/[0-9]+\.[0-9]+/
| /gfe\/[0-9]+\.[0-9]+/
| /dclk-adsvr/
| /rsi/
| /swcd\/([0-9]+\.)*[0-9]+/
| /microsoft-iis\/[0-9]{1,2}\.[0-9]{1,2}/
| /cafe\/[0-9]+\.[0-9]+/
| /artblast\/([0-9]+\.)*[0-9]+/
| /aolserver\/([0-9]+\.)*[0-9]+/
| /resin\/([0-9]+\.)*s?[0-9]+/
| /netscape-enterprise\/([0-9]+\.)*[0-9a-z]{1,2}+ *(aol)?/
| /mapquest listener/
| /miixpc\/[0-9]+\.[0-9]+/
| /sun-one-web-server\/[0-9]+\.[0-9]+/
| /appledotmacserver/
| /cj\/[0-9]+\.[0-9]+/
| /jigsaw\/([0-9]+\.)*[0-9]+/
| /boa\/[0-9]+\.[0-9]+(\.[0-9]+(rc[0-9]+)?)?/
| /tux\/[0-9]+\.[0-9]+ *\(linux\)/
| /igfe/
| /trafficmarketplace-jforce\/([0-9]+\.)*[0-9]+/
| /lighttpd/
| /hitbox gateway ([0-9]+\.)*[0-9]+ [a-z][0-9]/
| /jbird\/[0-9]+\.[0-9a-z]{1,2}/
| /perlbal/
| /big-ip/
| /konichiwa\/[0-9]+\.[0-9]+/
| /footprint [0-9]+\.[0-9]+\/fpmc/
| /iii [0-9]+/
| /clickability web server\/([0-9]+\.)*[0-9]+ *\(unix\)/
| /accipiter-directserver\/([0-9]+\.)*[0-9]+ \(nt; pentium\)/
| /ibm-proxy-wte\/([0-9]+\.)*[0-9]+/
| /netscape-commerce\/[0-9]+\.[0-9]+/
| /nde/
;
function do_apache_server(server: string): string
{
local apache_parts = split_all(server, apache_server);
if ( apache_parts[3] == "" )
return apache_parts[2];
local apache_return_string = apache_parts[2];
local mod_parts = split(apache_parts[3], / /);
for ( part in mod_parts )
{
if ( mod_parts[part] == apache_mod_pat )
{
apache_return_string =
string_cat(apache_return_string,
" ");
apache_return_string =
string_cat(apache_return_string,
mod_parts[part]);
}
else
print http_anon_log, fmt("** unknown Apache mod: %s:%s", mod_parts[part], server);
}
return apache_return_string;
}
function check_server(server: string, server_pat: pattern): bool
{
return server_pat in server;
}
function do_server(server: string, server_pat: pattern): string
{
return split_all(server, server_pat)[2];
}
function filter_in_http_server(server: string): string
{
# Vanilla Apache is a hard one and a special case. Let's get the
# nastiness over first.
if ( apache_server in server )
return do_apache_server(server);
if ( check_server(server, apache_misc) )
return do_server(server, apache_misc);
if ( check_server(server, IBM_server) )
return do_server(server, IBM_server);
if ( check_server(server, zedo_server) )
return do_server(server, zedo_server);
if ( check_server(server, zope_server) )
return do_server(server, zope_server);
if ( check_server(server, jetty_server) )
return do_server(server, jetty_server);
if ( check_server(server, thttp_server) )
return do_server(server, thttp_server);
if ( check_server(server, weblogic_server) )
return do_server(server, weblogic_server);
# Grab bag.
if ( misc_server in server )
return server;
# Best guess - unknown Apache variant of some sort.
if ( apache_basic in server )
{
print http_anon_log,
fmt("** unknown Apache variant: %s", server);
return fmt("(bro: unknown) %s %s",
split_all(server, apache_basic)[2],
split_all(server, apache_platforms)[2]);
}
print http_anon_log, fmt("** unknown server: %s", server);
return fmt("(bro: unknown) %s", anonymize_arg("server", server));
}

View file

@ -1,111 +0,0 @@
# $Id:$
# Filter-in known "USER-AGENT:" values.
@load anon
@load http-anon-utils
# ---------------------------------------------------------------------
# Mozilla (and friends)
# ---------------------------------------------------------------------
const mozilla_full_pat =
/mozilla\/[0-9]\.[0-9] \(( *|;|iebar| freebsd i[0-9]{1,4}|fr|-|windows|windows 98|sunos sun4u|compatible|msie [0-9]\.[0-9]|windows nt [0-9]\.[0-9]|google-tr-1|sv1|\.net clr ([0-9]\.)*[0-9]+|x11|en|ppc mac os x|macintosh|u|linux i[0-9]{1,4}|en-us|rv\:([0-9]+\.)*[0-9]+|aol [0-9]\.[0-9]|gnotify ([0-9]+\.)*[0-9]+)*\) *(gecko\/[0-9]+)? *(firefox\/([0-9]+.)*[0-9]+)?/;
const mozilla_head_pat = /mozilla\/[0-9]\.[0-9]/;
const misc_user_pat =
/spiderman/
| /w3m\/([0-9]+\.)*[0-9]+/
| /java([0-9]+\.)*[0-9]+(_[0-9]+)?/
| /java\/([0-9]+\.)*[0-9]+(_[0-9]+)?/
| /freecorder/
| /industry update control/
| /microsoft-cryptoapi\/([0-9]+\.)*[0-9]+/
| /ruriko\/([0-9]+\.)*[0-9]+/
| /crawler[0-9]\.[0-9]/
| /w3search/
| /symantec liveupdate/
| /davkit\/[0-9]\.[0-9]/
| /windows-media-player\/([0-9]+\.)*[0-9]+/
| /winamp\/([0-9]+\.)*[0-9]+/
| /headdump/
;
const misc_cmplx_user_pat =
/lynx\/([0-9]+\.)*[0-9]+.*/
| /wget\/([0-9]+\.)*[0-9]+.*/
| /yahooseeker\/([0-9]+\.)*[0-9]+.*/
| /rma\/([0-9]+\.)*[0-9]+.*/
| /aim\/[0-9]+.*/
| /ichiro\/([0-9]+\.)*[0-9]+.*/
| /unchaos.*/
| /irlbot\/[0-9]\.[0-9]+.*/
| /msnbot\/([0-9]+\.)*[0-9]+.*/
| /opera\/([0-9]+\.)*[0-9]+.*/
| /netnewswire\/([0-9]+\.)*[0-9]+.*/
| /nsplayer\/([0-9]+\.)*[0-9]+.*/
| /aipbot\/([0-9]+\.)*[0-9]+.*/
| /mac os x; webservicescore\.framework.*/
| /fast-webcrawler\/([0-9]+\.)*[0-9]+.*/
| /skype.*/
| /googlebot\/([0-9]+\.)*[0-9]+.*/
;
const misc_cmplx_user_start =
/lynx\/([0-9]+\.)*[0-9]+/
| /wget\/([0-9]+\.)*[0-9]+/
| /yahooseeker\/([0-9]+\.)*[0-9]+/
| /rma\/([0-9]+\.)*[0-9]+/
| /aim\/[0-9]+/
| /ichiro\/([0-9]+\.)*[0-9]+/
| /unchaos/
| /irlbot\/[0-9]\.[0-9]+/
| /opera\/([0-9]+\.)*[0-9]+/
| /msnbot\/([0-9]+\.)*[0-9]+/
| /netnewswire\/([0-9]+\.)*[0-9]+/
| /nsplayer\/([0-9]+\.)*[0-9]+/
| /aipbot\/([0-9]+\.)*[0-9]+/
| /mac os x; webservicescore\.framework/
| /fast-webcrawler\/([0-9]+\.)*[0-9]+/
| /skype/
| /googlebot\/([0-9]+\.)*[0-9]+/
;
function filter_in_http_useragent(user: string): string
{
# Check for an exact match for Mozilla.
if ( mozilla_full_pat in user )
return split_all(user, mozilla_full_pat)[2];
# Look for popular Mozilla-compatible crawlers.
if ( mozilla_head_pat in user )
{
local crawler = "(bro: unknown)";
if ( /.*yahoo\! slurp/ in user )
crawler = "(yahoo! slurp)";
else if ( /.*ask jeeves/ in user )
crawler = "(ask jeeves)";
else
print http_anon_log,
fmt("*** unknown Mozilla user-agent %s\n", user);
return fmt("%s %s", split_all(user, mozilla_head_pat)[2],
crawler);
}
# Some simple, common user names.
if ( misc_user_pat in user )
return user;
# Require some info removal.
if ( misc_cmplx_user_pat in user )
return split_all(user, misc_cmplx_user_pat)[2];
print http_anon_log,fmt("*** unknown user agent %s\n", user);
return fmt("(bro: unknown) %s", anonymize_arg("user-agent", user));
}

View file

@ -1,164 +0,0 @@
# $Id:$
@load anon
global http_anon_log = open_log_file("http-anon") &redef;
const URI_proto_pat = /^ *([a-zA-Z]+)\:\/\// ;
const known_URI_proto_pat = /^ *(http|https|ftp|ssh)\:\/\// ;
const host_pat = / *^([\-0-9a-zA-Z]+\.)+([\_\-0-9a-zA-Z])*/ ;
const port_pat = /^ *(\:[0-9]+\.)/ ;
const query_pat = /\?/ ;
function anonymize_http_URI(URI: string): string
{
URI = to_lower(URI);
# Strip off protocol.
local proto = "";
if ( URI_proto_pat in URI )
{
local proto_part = split(URI, /\:\/\//);
# Check if we know the protocol. If not, flag it so we
# can update our protocol database.
if ( known_URI_proto_pat !in URI )
{
print http_anon_log,
fmt("*** protocol %s unknown ", proto_part[1]);
proto_part[1] =
string_cat(" (bro: unknown) ",
anonymize_arg("proto", proto_part[1]));
}
proto = string_cat(proto_part[1],"://");
URI = proto_part[2];
}
# Strip off domain.
local host = "";
if ( host_pat in URI )
{
local base_parts =
split_all(URI, / *^([\-\_0-9a-z]+\.)+[\-\_0-9a-z]*/);
if ( |base_parts| < 2 )
{
print http_anon_log,
fmt (" XXXXXXXXXXXXXXXXXXXXXX BASE %s", URI);
return " XXXX processing error XXXX";
}
if ( |base_parts| == 2 )
URI = "";
else if ( |base_parts| == 3)
URI = base_parts[3];
else if ( |base_parts| > 3)
{
local patch_me = "";
local hack = base_parts[2];
local i = 1;
for ( part in base_parts )
{
if ( i != 2 )
patch_me = string_cat(patch_me,
base_parts[i]);
i += 1;
}
URI = patch_me;
}
if ( host == simple_filename )
host = anonymize_path(host);
else
host = anonymize_host(base_parts[2]);
}
# Strip off port (if it exists).
local pport = "";
if ( port_pat in URI )
{
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
print "XXXXX anon.bro doing nothing with port XXXXXXXXXXX ";
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
}
# Handle query (if exists).
local tail = "";
if ( URI == "/" )
{
# -- pass
}
else if ( query_pat in URI )
{
local query_part = split(URI, /\?/);
tail = fmt("%s?%s",
anonymize_path(query_part[1]),
anonymize_path(query_part[2]));
}
else
tail = anonymize_path(URI);
tail = string_cat("/", tail);
return fmt("%s%s%s%s", proto, host, pport, tail);
}
const a_href_pat = /.*\< *a *href.*\>.*/ ;
#/.*\< *a *href *= *\"[[:print:]]+\" *\>.*/;
# Doesn't get everything ... but works for most.
const a_href_split =
/\< *a *href *= *(\\)?(\"|\')?([0-9a-z\/._!\[\]():*;~&|$\\=+\-?%@])+(\\)?(\"|\')?/ ;
# Elegant ... yeah ... really .. :-/
const file_split =
/(\"|\')([0-9a-z\/._!\[\]():*;~&|$\\=+\-?%@])+(\"|\')/ ;
const file_strip_split = /([0-9a-z\/._!\[\]():*;~&|$\\=+\-?%@])+/ ;
function http_doc_link_list(abstract: string): string
{
abstract = to_lower(abstract);
if ( abstract == "" )
return abstract;
local concat_key = "";
local href_parts = split_all(abstract, a_href_split);
for ( part in href_parts )
{
if ( href_parts[part] == a_href_split )
{
local file_parts =
split_all(href_parts[part], file_split);
for ( a_part in file_parts )
{
if ( file_parts[a_part] == file_split )
{
local file_strip_parts =
split_all(file_parts[a_part],
file_strip_split);
concat_key = fmt("%s %s", concat_key,
anonymize_http_URI(file_strip_parts[2]));
}
}
}
}
return concat_key;
}

View file

@ -1,60 +0,0 @@
# $Id: http-body.bro 5230 2008-01-14 01:38:18Z vern $
# Counts length of data.
#
# If log_HTTP_data = T, it also outputs an abstract of data.
@load http
module HTTP;
redef process_HTTP_data = T;
redef log_HTTP_data = T;
export {
# If the following is > 0, then when logging contents, they will be
# truncated beyond this many bytes.
global content_truncation_limit = 40 &redef;
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
local len = byte_len(data);
msg$data_length = msg$data_length + length;
if ( log_HTTP_data )
{
local abstract: string;
if ( content_truncation_limit > 0 &&
len > content_truncation_limit )
abstract = cat(sub_bytes(data, 1, content_truncation_limit), "...");
else
abstract = data;
print http_log, fmt("%.6f %s %s %d bytes: \"%s\"",
network_time(), s$id,
is_orig ? "=>" : "<=", length,
abstract);
}
}
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
# This is for debugging purpose only
if ( msg$data_length > 0 &&
stat$body_length != msg$data_length + stat$content_gap_length)
{
# This can happen for multipart messages with a
# 'content-length' header, which is not required for multipart
# messages.
# Log::warning(fmt("length mismatch: %s %d %d %d",
# id_string(c$id), stat$body_length, msg$data_length,
# stat$content_gap_length));
}
}

View file

@ -1,45 +0,0 @@
@load http
module HTTP;
export {
redef enum Notice += {
PasswordFullFetch, # they got back the whole thing
PasswordShadowFetch, # they got back a shadowed version
};
# Pattern to search for in replies indicating that a full password
# file was returned.
const full_fetch =
/[[:alnum:]]+\:[[:alnum:]]+\:[[:digit:]]+\:[[:digit:]]+\:/
&redef;
# Same, but indicating a shadow password file was returned.
const shadow_fetch =
/[[:alnum:]]+\:\*\:[[:digit:]]+\:[[:digit:]]+\:/
&redef;
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
local s = lookup_http_request_stream(c);
local n = s$first_pending_request;
if ( n !in s$requests )
return;
local req = s$requests[n];
local passwd_request = req$passwd_req;
if ( ! passwd_request )
return;
if ( full_fetch in data )
NOTICE([$note=PasswordFullFetch,
$conn=c, $method=req$method, $URL=req$URI,
$msg=fmt("%s %s: %s %s", id_string(c$id), c$addl,
req$method, req$URI)]);
else if ( shadow_fetch in data )
NOTICE([$note=PasswordShadowFetch,
$conn=c, $method=req$method, $URL=req$URI,
$msg=fmt("%s %s: %s %s", id_string(c$id), c$addl,
req$method, req$URI)]);
}

View file

@ -1,20 +0,0 @@
# $Id: http-entity.bro 6 2004-04-30 00:31:26Z jason $
# Counts entity_level.
module HTTP;
event http_begin_entity(c: connection, is_orig: bool)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
++msg$entity_level;
}
event http_end_entity(c: connection, is_orig: bool)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
if ( msg$entity_level > 0 )
--msg$entity_level;
}

View file

@ -1,12 +0,0 @@
# $Id: http-event.bro 6 2004-04-30 00:31:26Z jason $
@load http
module HTTP;
event http_event(c: connection, event_type: string, detail: string)
{
print http_log, fmt("%.6f %s HTTP event: [%s] \"%s\"",
network_time(), id_string(c$id),
event_type, detail);
}

View file

@ -1,41 +0,0 @@
# $Id:$
# Extracts the items from HTTP traffic, one per file.
# Files are named:
#
# <prefix>.<n>.<orig-addr>_<orig-port>.<resp-addr>_<resp-port>.<is-orig>
#
# where <prefix> is a redef'able prefix (default: "http-item"), <n> is
# a number uniquely identifying the item, the next four are describe
# the connection tuple, and <is-orig> is "orig" if the item was transferred
# from the originator to the responder, "resp" otherwise.
@load http-reply
module HTTP_extract_items;
global prefix = "http-item" &redef;
global item_file: table[conn_id] of file;
global nitems = 0;
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
local id = c$id;
if ( id !in item_file )
{
# Create a new file for this one.
local fname = fmt("%s.%d.%s_%d.%s_%d.%s",
prefix, ++nitems,
id$orig_h, id$orig_p,
id$resp_h, id$resp_p,
is_orig ? "orig" : "resp");
item_file[id] = open(fname);
}
write_file(item_file[id], data);
}
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat)
{
delete item_file[c$id];
}

View file

@ -1,36 +0,0 @@
# $Id: http-header.bro 7073 2010-09-13 00:45:02Z vern $
# Prints out detailed HTTP headers.
@load http
module HTTP;
export {
# The following lets you specify headers that you don't want
# to print out.
global skip_header: set[string] &redef;
# If you add anything to the following table, *only* the headers
# included will be recorded.
global include_header: set[string] &redef;
# For example:
# redef skip_header += { "COOKIE", "SET-COOKIE" };
# will refrain from printing cookies.
}
event http_header(c: connection, is_orig: bool, name: string, value: string)
{
if ( name in skip_header )
return;
if ( |include_header| > 0 && name !in include_header )
return;
local s = lookup_http_request_stream(c);
print http_log, fmt("%.6f %s %s %s: %s",
network_time(), s$id,
is_orig ? ">" : "<", name, value);
}

View file

@ -1,115 +0,0 @@
# $Id:$
#
# Analyze HTTP entities for sensitive types (e.g., executables).
#
# Contributed by Seth Hall.
@load http-reply
module HTTP;
const http_identified_log = open_log_file("http-id");
export {
# Base the libmagic analysis on this many bytes. Currently,
# we will in fact use fewer (basically, just what's in the
# first data packet).
const magic_content_limit = 1024 &redef;
# These MIME types are logged and generate a Notice. The patterns
# need to match the entire description as returned by libMagic.
# For example, for plain text it can return
# "text/plain charset=us-ascii", so you might want to use
# /text\/plain.*/.
const watched_mime_types =
/application\/x-dosexec/ # Windows and DOS executables
| /application\/x-executable/ # *NIX executable binary
&redef;
const watched_descriptions = /PHP script text/ &redef;
# URLs included here are not logged and notices are not generated.
# Take care when defining patterns to not be overly broad.
const ignored_urls =
/^http:\/\/www\.download\.windowsupdate\.com\// &redef;
redef enum Notice += {
# Generated when we see a MIME type we flagged for watching.
HTTP_WatchedMIMEType,
# Generated when the file extension doesn't match
# the file contents.
HTTP_IncorrectFileType,
};
# Create patterns that *should* be in the URLs for specific MIME types.
# Notices are generated if the pattern doesn't match.
const mime_types_extensions = {
["application/x-dosexec"] = /\.([eE][xX][eE]|[dD][lL][lL])/,
} &redef;
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
if ( is_orig )
# For now we only inspect server responses.
return;
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
@ifndef ( content_truncation_limit )
# This is only done if http-body.bro is not loaded.
msg$data_length = msg$data_length + length;
@endif
# For the time being, we'll just use the data from the first packet.
# Don't continue until we have enough data.
# if ( msg$data_length < magic_content_limit )
# return;
# Right now, only try this for the first chunk of data
if ( msg$data_length > length )
return;
local abstract = sub_bytes(data, 1, magic_content_limit);
local magic_mime = identify_data(abstract, T);
local magic_descr = identify_data(abstract, F);
if ( (magic_mime == watched_mime_types ||
watched_descriptions in magic_descr) &&
s$first_pending_request in s$requests )
{
local r = s$requests[s$first_pending_request];
local host = (s$next_request$host=="") ?
fmt("%s", c$id$resp_h) : s$next_request$host;
event file_transferred(c, abstract, magic_descr, magic_mime);
local url = fmt("http://%s%s", host, r$URI);
if ( ignored_urls in url )
return;
local file_type = "";
if ( magic_mime == watched_mime_types )
file_type = magic_mime;
else
file_type = magic_descr;
local message = fmt("%s %s %s %s",
id_string(c$id), file_type, r$method, url);
NOTICE([$note=HTTP_WatchedMIMEType, $msg=message, $conn=c,
$method=r$method, $URL=url]);
print http_identified_log, fmt("%.06f %s %s",
network_time(), s$id, message);
if ( (magic_mime in mime_types_extensions &&
mime_types_extensions[magic_mime] !in url) ||
(magic_descr in mime_types_extensions &&
mime_types_extensions[magic_descr] !in url) )
NOTICE([$note=HTTP_IncorrectFileType, $msg=message,
$conn=c, $method=r$method, $URL=url]);
}
}

View file

@ -1,117 +0,0 @@
# $Id: http-reply.bro 2694 2006-04-02 22:50:00Z vern $
@load http-request
module HTTP;
redef capture_filters += {
["http-reply"] = "tcp src port 80 or tcp src port 8080 or tcp src port 8000"
};
redef process_HTTP_replies = T;
event http_reply(c: connection, version: string, code: count, reason: string)
{
local s = lookup_http_request_stream(c);
local msg = s$next_reply;
init_http_message(msg);
msg$initiated = T;
msg$code = code;
msg$reason = reason;
}
function http_request_done(c: connection, stat: http_message_stat)
{
local s = lookup_http_request_stream(c);
local msg = s$next_request;
msg$initiated = F;
}
function http_reply_done(c: connection, stat: http_message_stat)
{
local s = lookup_http_request_stream(c);
local req_msg = s$next_request;
local msg = s$next_reply;
local req: string;
local have_request = F;
local log_it: bool;
if ( s$num_pending_requests == 0 )
{
# Weird - reply w/o request - perhaps due to cold start?
req = "<unknown request>";
log_it = F;
}
else
{
local r = s$requests[s$first_pending_request];
have_request = T;
# Remove pending request.
delete s$requests[s$first_pending_request];
--s$num_pending_requests;
++s$first_pending_request;
req = fmt("%s %s", r$method, r$URI);
log_it = r$log_it;
}
local req_rep =
fmt("%s (%d \"%s\" [%d%s]%s)",
req, msg$code, string_escape(msg$reason, "\""),
stat$body_length,
stat$interrupted ? " (interrupted)" : "",
have_request ? fmt(" %s", req_msg$host) : "");
# The following is a more verbose form:
# local req_rep =
# fmt("%s (%d \"%s\" [\"%s\", %d%s%s])",
# req, msg$code, msg$reason,
# msg$content_length, stat$body_length,
# stat$interrupted ? " (interrupted)" : "",
# stat$content_gap_length > 0 ?
# fmt(" (gap = %d bytes)", stat$content_gap_length) : "");
if ( log_it )
NOTICE([$note=HTTP_SensitiveURI, $conn=c,
$method = r$method, $URL = r$URI,
$n = msg$code,
$msg = fmt("%s %s: %s",
id_string(c$id), c$addl, req_rep)]);
print http_log, fmt("%.6f %s %s", network_time(), s$id, req_rep);
msg$initiated = F;
}
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat)
{
if ( is_orig )
http_request_done(c, stat);
else
http_reply_done(c, stat);
}
@load http-entity
event http_header(c: connection, is_orig: bool, name: string, value: string)
{
# Only rewrite top-level headers.
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
if ( msg$entity_level == 1 )
{
if ( name == "CONTENT-LENGTH" )
msg$content_length = value;
else if ( is_orig && name == "HOST" )
{ # suppress leading blank
if ( /^ / in value )
msg$host = sub_bytes(value, 2, -1);
else
msg$host = value;
}
}
}

View file

@ -1,104 +0,0 @@
# $Id: http-request.bro 6726 2009-06-07 22:09:55Z vern $
# Analysis of HTTP requests.
@load http
module HTTP;
export {
const sensitive_URIs =
/etc\/(passwd|shadow|netconfig)/
| /IFS[ \t]*=/
| /nph-test-cgi\?/
| /(%0a|\.\.)\/(bin|etc|usr|tmp)/
| /\/Admin_files\/order\.log/
| /\/carbo\.dll/
| /\/cgi-bin\/(phf|php\.cgi|test-cgi)/
| /\/cgi-dos\/args\.bat/
| /\/cgi-win\/uploader\.exe/
| /\/search97\.vts/
| /tk\.tgz/
| /ownz/ # somewhat prone to false positives
| /viewtopic\.php.*%.*\(.*\(/ # PHP attack, 26Nov04
# a bunch of possible rootkits
| /sshd\.(tar|tgz).*/
| /[aA][dD][oO][rR][eE][bB][sS][dD].*/
# | /[tT][aA][gG][gG][eE][dD].*/ # prone to FPs
| /shv4\.(tar|tgz).*/
| /lrk\.(tar|tgz).*/
| /lyceum\.(tar|tgz).*/
| /maxty\.(tar|tgz).*/
| /rootII\.(tar|tgz).*/
| /invader\.(tar|tgz).*/
&redef;
# Used to look for attempted password file fetches.
const passwd_URI = /passwd/ &redef;
# URIs that match sensitive_URIs but can be generated by worms,
# and hence should not be flagged (because they're so common).
const worm_URIs =
/.*\/c\+dir/
| /.*cool.dll.*/
| /.*Admin.dll.*Admin.dll.*/
&redef;
# URIs that should not be considered sensitive if accessed by
# a local client.
const skip_remote_sensitive_URIs =
/\/cgi-bin\/(phf|php\.cgi|test-cgi)/
&redef;
const sensitive_post_URIs = /wwwroot|WWWROOT/ &redef;
}
redef capture_filters += {
["http-request"] = "tcp dst port 80 or tcp dst port 8080 or tcp dst port 8000"
};
event http_request(c: connection, method: string, original_URI: string,
unescaped_URI: string, version: string)
{
local log_it = F;
local URI = unescaped_URI;
if ( (sensitive_URIs in URI && URI != worm_URIs) ||
(method == "POST" && sensitive_post_URIs in URI) )
{
if ( is_local_addr(c$id$orig_h) &&
skip_remote_sensitive_URIs in URI )
; # don't flag it after all
else
log_it = T;
}
local s = lookup_http_request_stream(c);
if ( process_HTTP_replies )
{
# To process HTTP replies, we need to record the corresponding
# requests.
local n = s$first_pending_request + s$num_pending_requests;
s$requests[n] = [$method=method, $URI=URI, $log_it=log_it,
$passwd_req=passwd_URI in URI];
++s$num_pending_requests;
# if process_HTTP_messages
local msg = s$next_request;
init_http_message(msg);
msg$initiated = T;
}
else
{
if ( log_it )
NOTICE([$note=HTTP_SensitiveURI, $conn=c,
$method = method, $URL = URI,
$msg=fmt("%s %s: %s %s",
id_string(c$id), c$addl, method, URI)]);
print http_log,
fmt("%.6f %s %s %s", network_time(), s$id, method, URI);
}
}

View file

@ -1,203 +0,0 @@
# $Id: http.bro 6726 2009-06-07 22:09:55Z vern $
@load notice
@load site
@load conn-id
module HTTP;
export {
redef enum Notice += {
HTTP_SensitiveURI, # sensitive URI in GET/POST/HEAD
};
}
# DPM configuration.
global http_ports = {
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3138/tcp,
8000/tcp, 8080/tcp, 8888/tcp,
};
redef dpd_config += { [ANALYZER_HTTP] = [$ports = http_ports] };
redef dpd_config += { [ANALYZER_HTTP_BINPAC] = [$ports = http_ports] };
# HTTP processing options.
export {
const process_HTTP_replies = F &redef;
const process_HTTP_data = F &redef;
const include_HTTP_abstract = F &redef;
const log_HTTP_data = F &redef;
}
type http_pending_request: record {
method: string;
URI: string;
log_it: bool;
# Whether we determined it's an attempted passwd file fetch.
passwd_req: bool;
};
# Eventually we will combine http_pending_request and http_message.
type http_message: record {
initiated: bool;
code: count; # for HTTP reply message
reason: string; # for HTTP reply message
entity_level: count; # depth of enclosing MIME entities
data_length: count; # actual length of data delivered
content_length: string; # length specified in CONTENT-LENGTH header
header_slot: count; # rewrite slot at the end of headers
abstract: string; # data abstract
skip_abstract: bool; # to skip abstract for certain content types
host: string; # host indicated in Host header
};
type http_pending_request_stream: record {
# Number of first pending request.
first_pending_request: count &default = 0;
# Total number of pending requests.
num_pending_requests: count &default = 0;
# Indexed from [first_pending_request ..
# (first_pending_request + num_pending_requests - 1)]
requests: table[count] of http_pending_request;
next_request: http_message; # the on-going request
next_reply: http_message; # the on-going reply
# len_next_reply: count; # 0 means unspecified
# len_next_request: count;
id: string; # repeated from http_session_info, for convenience
};
type http_session_info: record {
id: string;
request_stream: http_pending_request_stream;
};
const http_log = open_log_file("http") &redef;
export {
global http_sessions: table[conn_id] of http_session_info;
}
global http_session_id = 0;
function init_http_message(msg: http_message)
{
msg$initiated = F;
msg$code = 0;
msg$reason = "";
msg$entity_level = 0;
msg$data_length = 0;
msg$content_length = "";
msg$header_slot = 0;
msg$abstract = "";
msg$skip_abstract = F;
msg$host = "";
}
function new_http_message(): http_message
{
local msg: http_message;
init_http_message(msg);
return msg;
}
function new_http_session(c: connection): http_session_info
{
local session = c$id;
local new_id = ++http_session_id;
local info: http_session_info;
info$id = fmt("%%%s", prefixed_id(new_id));
local rs: http_pending_request_stream;
rs$first_pending_request = 1;
rs$num_pending_requests = 0;
rs$id = info$id;
rs$next_request = new_http_message();
rs$next_reply = new_http_message();
rs$requests = table();
info$request_stream = rs;
http_sessions[session] = info;
print http_log, fmt("%.6f %s start %s:%d > %s:%d", network_time(),
info$id, c$id$orig_h,
c$id$orig_p, c$id$resp_h, c$id$resp_p);
return info;
}
function lookup_http_session(c: connection): http_session_info
{
local s: http_session_info;
local id = c$id;
s = id in http_sessions ? http_sessions[id] : new_http_session(c);
append_addl(c, s$id);
return s;
}
function lookup_http_request_stream(c: connection): http_pending_request_stream
{
local s = lookup_http_session(c);
return s$request_stream;
}
function get_http_message(s: http_pending_request_stream, is_orig: bool): http_message
{
return is_orig ? s$next_request : s$next_reply;
}
function finish_stream(session: conn_id, id: string,
rs: http_pending_request_stream)
{
### We really want to do this in sequential order, not table order.
for ( i in rs$requests )
{
local req = rs$requests[i];
if ( req$log_it )
NOTICE([$note=HTTP_SensitiveURI,
$src=session$orig_h, $dst=session$resp_h,
$URL=req$URI,
$method=req$method,
$msg=fmt("%s:%d -> %s:%d %s: <no reply>",
session$orig_h, session$orig_p,
session$resp_h, session$resp_p, id)]);
local msg = fmt("%s %s <no reply>", req$method, req$URI);
print http_log, fmt("%.6f %s %s", network_time(), rs$id, msg);
}
}
event connection_state_remove(c: connection)
{
local id = c$id;
if ( id !in http_sessions )
return;
local s = http_sessions[id];
finish_stream(id, s$id, s$request_stream);
delete http_sessions[c$id];
}
# event http_stats(c: connection, stats: http_stats_rec)
# {
# if ( stats$num_requests == 0 && stats$num_replies == 0 )
# return;
#
# c$addl = fmt("%s (%d v%.1f v%.1f)", c$addl, stats$num_requests, stats$request_version, stats$reply_version);
# }

View file

@ -1,306 +0,0 @@
# $Id: icmp.bro 6883 2009-08-19 21:08:09Z vern $
@load hot
@load weird
@load conn
@load scan
global icmp_file = open_log_file("icmp");
redef capture_filters += { ["icmp"] = "icmp" };
module ICMP;
export {
redef enum Notice += {
ICMPAsymPayload, # payload in echo req-resp not the same
ICMPConnectionPair, # too many ICMPs between hosts
ICMPAddressScan,
# The following isn't presently sufficiently useful due
# to cold start and packet drops.
# ICMPUnpairedEchoReply, # no EchoRequest seen for EchoReply
};
# Whether to log detailed information icmp.log.
const log_details = T &redef;
# ICMP scan detection.
const detect_scans = T &redef;
const scan_threshold = 25 &redef;
# Analysis of connection pairs.
const detect_conn_pairs = F &redef; # switch for connection pair
const detect_payload_asym = F &redef; # switch for echo payload
const conn_pair_threshold = 200 &redef;
}
global conn_pair:table[addr] of set[addr] &create_expire = 1 day;
global conn_pair_thresh_reached: table[addr] of bool &default=F;
type flow_id: record {
orig_h: addr;
resp_h: addr;
id: count;
};
type flow_info: record {
start_time: time;
last_time: time;
orig_bytes: count;
resp_bytes: count;
payload: string;
};
const names: table[count] of string = {
[0] = "echo_reply",
[3] = "unreach",
[4] = "quench",
[5] = "redirect",
[8] = "echo_req",
[9] = "router_adv",
[10] = "router_sol",
[11] = "time_xcd",
[12] = "param_prob",
[13] = "tstamp_req",
[14] = "tstamp_reply",
[15] = "info_req",
[16] = "info_reply",
[17] = "mask_req",
[18] = "mask_reply",
} &default = function(n: count): string { return fmt("icmp-%d", n); };
# Map IP protocol number to the protocol's name.
const IP_proto_name: table[count] of string = {
[1] = "ICMP",
[2] = "IGMP",
[6] = "TCP",
[17] = "UDP",
[41] = "IPV6",
} &default = function(n: count): string { return fmt("%s", n); }
&redef;
# Print a report for the given ICMP flow.
function generate_flow_summary(flow: flow_id, fi: flow_info)
{
local local_init = is_local_addr(flow$orig_h);
local local_addr = local_init ? flow$orig_h : flow$resp_h;
local remote_addr = local_init ? flow$resp_h : flow$orig_h;
local flags = local_init ? "L" : "";
local state: string;
if ( fi$orig_bytes > 0 )
{
if ( fi$resp_bytes > 0 )
state = "SF";
else
state = "SH";
}
else if ( fi$resp_bytes > 0 )
state = "SHR";
else
state = "OTH";
print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s",
fi$start_time, fi$last_time - fi$start_time,
flow$orig_h, flow$resp_h, "icmp_echo",
fi$orig_bytes, fi$resp_bytes, state, flags);
}
# Called when a flow is expired in order to generate a report for it.
function flush_flow(ft: table[flow_id] of flow_info, fi: flow_id): interval
{
generate_flow_summary(fi, ft[fi]);
return 0 sec;
}
# Table to track each active flow.
global flows: table[flow_id] of flow_info
&read_expire = 45 sec
&expire_func = flush_flow;
event icmp_sent(c: connection, icmp: icmp_conn)
{
print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s",
network_time(), 0.0, icmp$orig_h, icmp$resp_h,
names[icmp$itype], icmp$itype, icmp$icode, "icmp",
icmp$len, "0", "SH");
}
event flow_summary(flow: flow_id, last_time: time)
{
if ( flow !in flows )
return;
local fi = flows[flow];
if ( fi$last_time == last_time )
{
generate_flow_summary(flow, fi);
delete flows[flow];
}
}
function update_flow(icmp: icmp_conn, id: count, is_orig: bool, payload: string)
{
local fid: flow_id;
fid$orig_h = is_orig ? icmp$orig_h : icmp$resp_h;
fid$resp_h = is_orig ? icmp$resp_h : icmp$orig_h;
fid$id = id;
if ( fid !in flows )
{
local info: flow_info;
info$start_time = network_time();
info$orig_bytes = info$resp_bytes = 0;
info$payload = payload; # checked in icmp_echo_reply
flows[fid] = info;
}
local fi = flows[fid];
fi$last_time = network_time();
if ( is_orig )
fi$orig_bytes = fi$orig_bytes + byte_len(payload);
else
fi$resp_bytes = fi$resp_bytes + byte_len(payload);
schedule +30sec { flow_summary(fid, fi$last_time) };
}
event icmp_echo_request(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string)
{
update_flow(icmp, id, T, payload);
local orig = icmp$orig_h;
local resp = icmp$resp_h;
# Simple ping scan detector.
if ( detect_scans &&
(orig !in Scan::distinct_peers ||
resp !in Scan::distinct_peers[orig]) )
{
if ( orig !in Scan::distinct_peers )
{
local empty_peer_set: set[addr] &mergeable;
Scan::distinct_peers[orig] = empty_peer_set;
}
if ( resp !in Scan::distinct_peers[orig] )
add Scan::distinct_peers[orig][resp];
if ( ! Scan::shut_down_thresh_reached[orig] &&
orig !in Scan::skip_scan_sources &&
orig !in Scan::skip_scan_nets &&
|Scan::distinct_peers[orig]| >= scan_threshold )
{
NOTICE([$note=ICMPAddressScan, $src=orig,
$n=scan_threshold,
$msg=fmt("%s has icmp echo scanned %s hosts",
orig, scan_threshold)]);
Scan::shut_down_thresh_reached[orig] = T;
}
}
if ( detect_conn_pairs )
{
if ( orig !in conn_pair )
{
local empty_peer_set2: set[addr] &mergeable;
conn_pair[orig] = empty_peer_set2;
}
if ( resp !in conn_pair[orig] )
add conn_pair[orig][resp];
if ( ! conn_pair_thresh_reached[orig] &&
|conn_pair[orig]| >= conn_pair_threshold )
{
NOTICE([$note=ICMPConnectionPair,
$msg=fmt("ICMP connection threshold exceeded : %s -> %s",
orig, resp)]);
conn_pair_thresh_reached[orig] = T;
}
}
}
event icmp_echo_reply(c: connection, icmp: icmp_conn, id: count,
seq: count, payload: string)
{
# Check payload with the associated flow.
local fid: flow_id;
fid$orig_h = icmp$resp_h; # We know the expected results since
fid$resp_h = icmp$orig_h; # it's an echo reply.
fid$id = id;
if ( fid !in flows )
{
# NOTICE([$note=ICMPUnpairedEchoReply,
# $msg=fmt("ICMP echo reply w/o request: %s -> %s",
# icmp$orig_h, icmp$resp_h)]);
}
else
{
if ( detect_payload_asym )
{
local fi = flows[fid];
local pl = fi$payload;
if ( pl != payload )
{
NOTICE([$note=ICMPAsymPayload,
$msg=fmt("ICMP payload inconsistancy: %s(%s) -> %s(%s)",
icmp$orig_h, byte_len(fi$payload),
icmp$resp_h, byte_len(payload))]);
}
}
}
update_flow(icmp, id, F, payload);
}
event icmp_unreachable(c: connection, icmp: icmp_conn, code: count,
context: icmp_context)
{
if ( active_connection(context$id) )
{
# This section allows Bro to act on ICMP-unreachable packets
# that happen in the context of an active connection. It is
# not currently used.
local c2 = connection_record(context$id);
local os = c2$orig$state;
local rs = c2$resp$state;
local is_attempt =
is_tcp_port(c2$id$orig_p) ?
(os == TCP_SYN_SENT && rs == TCP_INACTIVE) :
(os == UDP_ACTIVE && rs == UDP_INACTIVE);
# Insert action here.
}
if ( log_details )
{
# ICMP unreachable packets are the only ones currently
# logged. Due to the connection data contained *within*
# them, each log line will contain two connections' worth
# of data. The initial ICMP connection info is the same
# as logged for connections.
print icmp_file, fmt("%.6f %.6f %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s",
network_time(), 0.0, icmp$orig_h, icmp$resp_h,
names[icmp$itype], icmp$itype, icmp$icode, "icmp",
icmp$len, "0", "EncapPkt:",
# This is the encapsulated packet:
context$id$orig_h, context$id$orig_p,
context$id$resp_h, context$id$resp_p,
context$len, IP_proto_name[context$proto],
context$len, context$bad_hdr_len,
context$bad_checksum);
}
}

View file

@ -1,68 +0,0 @@
# $Id: ident.bro 5948 2008-07-11 22:29:49Z vern $
@load notice
@load hot-ids
module Ident;
export {
redef enum Notice += {
IdentSensitiveID, # sensitive username in Ident lookup
};
const hot_ident_ids = { always_hot_ids, } &redef;
const hot_ident_exceptions = { "uucp", "nuucp", "daemon", } &redef;
}
redef capture_filters += { ["ident"] = "tcp port 113" };
global ident_ports = { 113/tcp } &redef;
redef dpd_config += { [ANALYZER_IDENT] = [$ports = ident_ports] };
global pending_ident_requests: set[addr, port, addr, port, port, port];
event ident_request(c: connection, lport: port, rport: port)
{
local id = c$id;
add pending_ident_requests[id$orig_h, id$orig_p, id$resp_h, id$resp_p, lport, rport];
}
function add_ident_tag(c: connection, lport: port, rport: port, tag: string)
: connection
{
local id = c$id;
if ( [id$orig_h, id$orig_p, id$resp_h, id$resp_p, lport, rport] in
pending_ident_requests )
delete pending_ident_requests[id$orig_h, id$orig_p, id$resp_h, id$resp_p, lport, rport];
else
tag = fmt("orphan-%s", tag);
local c_orig_id = [$orig_h = id$resp_h, $orig_p = rport,
$resp_h = id$orig_h, $resp_p = lport];
local c_orig = active_connection(c_orig_id) ?
connection_record(c_orig_id) : c;
append_addl(c_orig, tag);
return c_orig;
}
event ident_reply(c: connection, lport: port, rport: port,
user_id: string, system: string)
{
local c_orig = add_ident_tag(c, lport, rport, fmt("ident/%s", user_id));
if ( user_id in hot_ident_ids && user_id !in hot_ident_exceptions )
{
++c_orig$hot;
NOTICE([$note=IdentSensitiveID, $conn=c,
$msg=fmt("%s hot ident: %s",
$user=c_orig$addl, id_string(c_orig$id))]);
}
}
event ident_error(c: connection, lport: port, rport: port, line: string)
{
add_ident_tag(c, lport, rport, fmt("iderr/%s", line));
}

View file

@ -1,31 +0,0 @@
# $Id: inactivity.bro 7073 2010-09-13 00:45:02Z vern $
@load port-name
const inactivity_timeouts: table[port] of interval = {
# For interactive services, allow longer periods of inactivity.
[[telnet, rlogin, ssh, ftp]] = 1 hrs,
} &redef;
function determine_inactivity_timeout(c: connection)
{
local service = c$id$resp_p;
# Determine service (adapted from hot.bro)
if ( c$orig$state == TCP_INACTIVE )
{
# We're seeing a half-established connection. Use the
# service of the originator if it's well-known and the
# responder isn't.
if ( service !in port_names && c$id$orig_p in port_names )
service = c$id$orig_p;
}
if ( service in inactivity_timeouts )
set_inactivity_timeout(c$id, inactivity_timeouts[service]);
}
event connection_established(c: connection)
{
determine_inactivity_timeout(c);
}

View file

@ -1,318 +0,0 @@
# $Id: interconn.bro 3997 2007-02-23 00:31:19Z vern $
#
# interconn - generic detection of interactive connections.
@load port-name
@load demux
# The following must be defined for the event engine to generate
# interconn events.
redef interconn_min_interarrival = 0.01 sec;
redef interconn_max_interarrival = 2.0 sec;
redef interconn_max_keystroke_pkt_size = 20;
redef interconn_default_pkt_size = 512;
redef interconn_stat_period = 15.0 sec;
redef interconn_stat_backoff = 1.5;
const interconn_min_num_pkts = 10 &redef; # min num of pkts sent
const interconn_min_duration = 2.0 sec &redef; # min duration for the connection
const interconn_ssh_len_disabled = T &redef;
const interconn_min_ssh_pkts_ratio = 0.6 &redef;
const interconn_min_bytes = 10 &redef;
const interconn_min_7bit_ascii_ratio = 0.75 &redef;
const interconn_min_num_lines = 2 &redef;
const interconn_min_normal_line_ratio = 0.5 &redef;
# alpha: portion of interarrival times within range
# [interconn_min_interarrival, interconn_max_interarrival]
#
# alpha should be >= interconn_min_alpha
#
# gamma: num_keystrokes_two_in_row / num_pkts
# gamma indicates the portion of keystrokes in the overall traffic
#
# gamma should be >= interconn_min_gamma
const interconn_min_alpha = 0.2 &redef; # minimum required alpha
const interconn_min_gamma = 0.2 &redef; # minimum required gamma
const interconn_standard_ports = { telnet, rlogin, ftp, ssh, smtp, 143/tcp, 110/tcp } &redef;
const interconn_ignore_standard_ports = F &redef;
const interconn_demux_disabled = T &redef;
const INTERCONN_UNKNOWN = 0; # direction/interactivity is unknown
const INTERCONN_FORWARD = 1; # forward: a conn's orig is true originator
const INTERCONN_BACKWARD = 2; # backward: a conn's resp is true originator
const INTERCONN_INTERACTIVE = 1; # a conn is interactive
const INTERCONN_STANDARD_PORT = 2; # conn involves a standard port to ignore
type conn_info : record {
interactive: count; # interactivity: unknown/interactive/standard_port
dir: count; # direction: unknown/forward/backward
};
global interconn_conns: table [conn_id] of conn_info; # table for all connections
# Table for resp_endp's of those established (non-partial) conn's.
# If a partial conn connects to one of such resp's, we can infer
# its direction.
global interconn_resps: table [addr, port] of count &default = 0;
global interconn_log = open_log_file("interconn") &redef;
global num_interconns = 0;
function interconn_conn_string(c: connection): string
{
return fmt("%.6f %s.%d > %s.%d",
c$start_time,
c$id$orig_h, c$id$orig_p,
c$id$resp_h, c$id$resp_p);
}
function interconn_weird(c: connection, s: string)
{
print fmt("%s interconn_weird: %s %s", network_time(), interconn_conn_string(c), s);
}
function get_direction(c: connection): count
{
local id = c$id;
if ( interconn_conns[id]$dir != INTERCONN_UNKNOWN )
return interconn_conns[id]$dir;
# The connection is not established yet, but one endpoint
# is a known resp_endp
if ( [id$resp_h, id$resp_p] in interconn_resps )
{
interconn_conns[id]$dir = INTERCONN_FORWARD;
++interconn_resps[id$resp_h, id$resp_p];
return INTERCONN_FORWARD;
}
else if ( [id$orig_h, id$orig_p] in interconn_resps )
{
interconn_conns[id]$dir = INTERCONN_BACKWARD;
++interconn_resps[id$orig_h, id$orig_p];
return INTERCONN_BACKWARD;
}
return INTERCONN_UNKNOWN;
}
function comp_gamma(s: interconn_endp_stats): double
{
return s$num_pkts >= interconn_min_num_pkts ?
(1.0 * s$num_keystrokes_two_in_row) / s$num_pkts : 0.0;
}
function comp_alpha(s: interconn_endp_stats) : double
{
return ( s$num_keystrokes_two_in_row > 0 ) ?
(1.0 * s$num_normal_interarrivals / s$num_keystrokes_two_in_row) : 0.0;
}
function skip_further_interconn_processing(c: connection)
{
# This used to call skip_further_processing()
# (if active_connection(c$id) returned T). But that's
# clearly wrong *if* we're also doing additional analysis
# on the connection. So do nothing.
}
function log_interconn(c: connection, tag: string)
{
print interconn_log, fmt("%s %s", interconn_conn_string(c), tag);
local id = c$id;
if ( interconn_demux_disabled )
skip_further_interconn_processing(c);
else
demux_conn(id, tag, "orig", "resp");
}
function is_interactive_endp(s: interconn_endp_stats): bool
{
# Criteria 1: num_pkts >= interconn_min_num_pkts.
if ( s$num_pkts < interconn_min_num_pkts )
return F;
# Criteria 2: gamma >= interconn_min_gamma.
if ( comp_gamma(s) < interconn_min_gamma )
return F;
# Criteria 3: alpha >= interconn_min_alpha.
if ( comp_alpha(s) < interconn_min_alpha )
return F;
return T;
}
event connection_established(c: connection)
{
local id = c$id;
local dir = interconn_conns[id]$dir;
if ( dir == INTERCONN_FORWARD )
return;
if ( dir == INTERCONN_BACKWARD )
{
interconn_weird(c, "inconsistent direction");
return;
}
interconn_conns[id]$dir = INTERCONN_FORWARD;
++interconn_resps[id$resp_h, id$resp_p];
}
event new_connection(c: connection)
{
local id = c$id;
local info: conn_info;
info$dir = INTERCONN_UNKNOWN;
if ( interconn_ignore_standard_ports &&
(id$orig_p in interconn_standard_ports ||
id$resp_p in interconn_standard_ports) )
{
info$interactive = INTERCONN_STANDARD_PORT;
skip_further_interconn_processing(c);
}
else
info$interactive = INTERCONN_UNKNOWN;
interconn_conns[id] = info;
}
event interconn_remove_conn(c: connection)
{
local id = c$id;
if ( id !in interconn_conns )
# This can happen for weird connections such as those
# with an initial SYN+FIN packet.
return;
local dir = interconn_conns[id]$dir;
delete interconn_conns[id];
delete demuxed_conn[c$id];
if ( dir == INTERCONN_FORWARD )
{
if ( --interconn_resps[id$resp_h, id$resp_p] == 0 )
delete interconn_resps[id$resp_h, id$resp_p];
}
else if ( dir == INTERCONN_BACKWARD )
{
if ( --interconn_resps[id$orig_h, id$orig_p] == 0 )
delete interconn_resps[id$orig_h, id$orig_p];
}
}
event interconn_stats(c: connection,
os: interconn_endp_stats, rs: interconn_endp_stats)
{
local id = c$id;
if ( id !in interconn_conns )
return;
if ( interconn_conns[id]$interactive != INTERCONN_UNKNOWN )
return; # already classified
if ( c$duration < interconn_min_duration )
# forget about excessively short connections
return;
local dir = get_direction(c);
# Criteria:
#
# if ( dir == FORWARD )
# (os) is interactive
# else if ( dir == BACKWARD )
# (rs) is interactive
# else
# either (os) or (rs) is interactive
if ( dir == INTERCONN_FORWARD )
{
if ( ! is_interactive_endp(os) )
return;
}
else if ( dir == INTERCONN_BACKWARD )
{
if ( ! is_interactive_endp(rs) )
return;
}
else
{
if ( ! is_interactive_endp(os) && ! is_interactive_endp(rs) )
return;
}
local tag: string;
if ( ! interconn_ssh_len_disabled && (os$is_partial || rs$is_partial) )
{
local num_pkts = os$num_pkts + rs$num_pkts;
local num_8k0_pkts = os$num_8k0_pkts + rs$num_8k0_pkts;
local num_8k4_pkts = os$num_8k4_pkts + rs$num_8k4_pkts;
if ( num_8k0_pkts > num_pkts * interconn_min_ssh_pkts_ratio )
{
# c now considered as interactive.
interconn_conns[id]$interactive = INTERCONN_INTERACTIVE;
tag = fmt("interconn.%d.ssh2", ++num_interconns);
}
else if ( num_8k4_pkts > num_pkts * interconn_min_ssh_pkts_ratio )
{
# c now considered as interactive.
interconn_conns[id]$interactive = INTERCONN_INTERACTIVE;
tag = fmt("interconn.%d.ssh1", ++num_interconns);
}
}
# Criteria 4: num_7bit_ascii / num_bytes is big enough; AND
# enough number of normal lines
if ( interconn_conns[id]$interactive != INTERCONN_INTERACTIVE )
{
local num_bytes = os$num_bytes + rs$num_bytes;
local num_7bit_ascii = os$num_7bit_ascii + rs$num_7bit_ascii;
if ( num_bytes < interconn_min_bytes ||
num_7bit_ascii < num_bytes * interconn_min_7bit_ascii_ratio )
return;
local num_lines = os$num_lines + rs$num_lines;
local num_normal_lines = os$num_normal_lines +
rs$num_normal_lines;
if ( num_lines < interconn_min_num_lines ||
num_normal_lines < num_lines * interconn_min_normal_line_ratio )
return;
# c now considered as interactive.
interconn_conns[id]$interactive = INTERCONN_INTERACTIVE;
tag = fmt("interconn.%d", ++num_interconns);
}
log_interconn(c, tag);
}

View file

@ -1,79 +0,0 @@
# $Id: irc-bot-syslog.bro,v 1.1.4.2 2006/05/31 00:16:21 sommer Exp $
#
# Passes current bot-state to syslog.
#
# - When a new server/client is found, we syslog it immediately.
# - Every IrcBot::summary_interval we dump the current set.
@load irc-bot
module IrcBotSyslog;
export {
# Prefix for all messages for easy grepping.
const prefix = "irc-bots" &redef;
}
# For debugging, everything which goes to syslog also goes here.
global syslog_file = open_log_file("irc-bots.syslog");
function fmt_time(t: time) : string
{
return strftime("%Y-%m-%d-%H-%M-%S", t);
}
function log_server(ip: addr, new: bool)
{
local s = IrcBot::servers[ip];
local ports = IrcBot::portset_to_str(s$p);
local msg = fmt("%s ip=%s new=%d local=%d server=1 first_seen=%s last_seen=%s ports=%s",
prefix, ip, new, is_local_addr(ip),
fmt_time(s$first_seen), fmt_time(s$last_seen), ports);
syslog(msg);
print syslog_file, fmt("%.6f %s", network_time(), msg);
}
function log_client(ip: addr, new: bool)
{
local c = IrcBot::clients[ip];
local servers = IrcBot::addrset_to_str(c$servers);
local msg = fmt("%s ip=%s new=%d local=%d server=0 first_seen=%s last_seen=%s user=%s nick=%s realname=%s servers=%s",
prefix, ip, new, is_local_addr(ip),
fmt_time(c$first_seen), fmt_time(c$last_seen),
c$user, c$nick, c$realname, servers);
syslog(msg);
print syslog_file, fmt("%.6f %s", network_time(), msg);
}
event print_bot_state()
{
for ( s in IrcBot::confirmed_bot_servers )
log_server(s, F);
for ( c in IrcBot::confirmed_bot_clients )
log_client(c, F);
}
event bro_init()
{
set_buf(syslog_file, F);
}
redef notice_policy += {
[$pred(a: notice_info) =
{
if ( a$note == IrcBot::IrcBotServerFound )
log_server(a$src, T);
if ( a$note == IrcBot::IrcBotClientFound )
log_client(a$src, T);
return F;
},
$result = NOTICE_FILE,
$priority = 1]
};

View file

@ -1,566 +0,0 @@
# $Id:$
@load conn
@load notice
@load weird
module IrcBot;
export {
global detailed_log = open_log_file("irc.detailed") &redef;
global bot_log = open_log_file("irc-bots") &redef;
global summary_interval = 1 min &redef;
global detailed_logging = T &redef;
global content_dir = "irc-bots" &redef;
global bot_nicks =
/^\[([^\]]+\|)+[0-9]{2,}]/ # [DEU|XP|L|00]
| /^\[[^ ]+\]([^ ]+\|)+([0-9a-zA-Z-]+)/ # [0]CHN|3436036 [DEU][1]3G-QE
| /^DCOM[0-9]+$/ # DCOM7845
| /^\{[A-Z]+\}-[0-9]+/ # {XP}-5021040
| /^\[[0-9]+-[A-Z0-9]+\][a-z]+/ # [0058-X2]wpbnlgwf
| /^\[[a-zA-Z0-9]\]-[a-zA-Z0-9]+$/ # [SD]-743056826
| /^[a-z]+[A-Z]+-[0-9]{5,}$/
| /^[A-Z]{3}-[0-9]{4}/ # ITD-1119
;
global bot_cmds =
/(^| *)[.?#!][^ ]{0,5}(scan|ndcass|download|cvar\.|execute|update|dcom|asc|scanall) /
| /(^| +\]\[ +)\* (ipscan|wormride)/
| /(^| *)asn1/
;
global skip_msgs =
/.*AUTH .*/
| /.*\*\*\* Your host is .*/
| /.*\*\*\* If you are having problems connecting .*/
;
redef enum Notice += {
IrcBotServerFound,
IrcBotClientFound,
};
type channel: record {
name: string;
passwords: set[string];
topic: string &default="";
topic_history: vector of string;
};
type bot_client: record {
host: addr;
p: port;
nick: string &default="";
user: string &default="";
realname: string &default="";
channels: table[string] of channel;
servers: set[addr] &optional;
first_seen: time;
last_seen: time;
};
type bot_server: record {
host: addr;
p: set[port];
clients: table[addr] of bot_client;
global_users: string &default="";
passwords: set[string];
channels: table[string] of channel;
first_seen: time;
last_seen: time;
};
type bot_conn: record {
client: bot_client;
server: bot_server;
conn: connection;
fd: file;
ircx: bool &default=F;
};
# We keep three sets of clients/servers:
# (1) tables containing all IRC clients/servers
# (2) sets containing potential bot hosts
# (3) sets containing confirmend bot hosts
#
# Hosts are confirmed when a connection is established between
# potential bot hosts.
#
# FIXME: (1) should really be moved into the general IRC script.
global expire_server:
function(t: table[addr] of bot_server, idx: addr): interval;
global expire_client:
function(t: table[addr] of bot_client, idx: addr): interval;
global servers: table[addr] of bot_server &write_expire=24 hrs
&expire_func=expire_server &persistent;
global clients: table[addr] of bot_client &write_expire=24 hrs
&expire_func=expire_client &persistent;
global potential_bot_clients: set[addr] &persistent;
global potential_bot_servers: set[addr] &persistent;
global confirmed_bot_clients: set[addr] &persistent;
global confirmed_bot_servers: set[addr] &persistent;
# All IRC connections.
global conns: table[conn_id] of bot_conn &persistent;
# Connections between confirmed hosts.
global bot_conns: set[conn_id] &persistent;
# Helper functions for readable output.
global strset_to_str: function(s: set[string]) : string;
global portset_to_str: function(s: set[port]) : string;
global addrset_to_str: function(s: set[addr]) : string;
}
function strset_to_str(s: set[string]) : string
{
if ( |s| == 0 )
return "<none>";
local r = "";
for ( i in s )
{
if ( r != "" )
r = cat(r, ",");
r = cat(r, fmt("\"%s\"", i));
}
return r;
}
function portset_to_str(s: set[port]) : string
{
if ( |s| == 0 )
return "<none>";
local r = "";
for ( i in s )
{
if ( r != "" )
r = cat(r, ",");
r = cat(r, fmt("%d", i));
}
return r;
}
function addrset_to_str(s: set[addr]) : string
{
if ( |s| == 0 )
return "<none>";
local r = "";
for ( i in s )
{
if ( r != "" )
r = cat(r, ",");
r = cat(r, fmt("%s", i));
}
return r;
}
function fmt_time(t: time) : string
{
return strftime("%y-%m-%d-%H-%M-%S", t);
}
event print_bot_state()
{
local bot_summary_log = open_log_file("irc-bots.summary");
disable_print_hook(bot_summary_log);
print bot_summary_log, "---------------------------";
print bot_summary_log, strftime("%y-%m-%d-%H-%M-%S", network_time());
print bot_summary_log, "---------------------------";
print bot_summary_log;
print bot_summary_log, "Known servers";
for ( h in confirmed_bot_servers )
{
local s = servers[h];
print bot_summary_log,
fmt(" %s %s - clients: %d ports %s password(s) %s last-seen %s first-seen %s global-users %s",
(is_local_addr(s$host) ? "L" : "R"),
s$host, length(s$clients), portset_to_str(s$p),
strset_to_str(s$passwords),
fmt_time(s$last_seen), fmt_time(s$first_seen),
s$global_users);
for ( name in s$channels )
{
local ch = s$channels[name];
print bot_summary_log,
fmt(" channel %s: topic \"%s\", password(s) %s",
ch$name, ch$topic,
strset_to_str(ch$passwords));
}
}
print bot_summary_log, "\nKnown clients";
for ( h in confirmed_bot_clients )
{
local c = clients[h];
print bot_summary_log,
fmt(" %s %s - server(s) %s user %s nick %s realname %s last-seen %s first-seen %s",
(is_local_addr(h) ? "L" : "R"), h,
addrset_to_str(c$servers),
c$user, c$nick, c$realname,
fmt_time(c$last_seen), fmt_time(c$first_seen));
}
close(bot_summary_log);
if ( summary_interval != 0 secs )
schedule summary_interval { print_bot_state() };
}
event bro_init()
{
if ( summary_interval != 0 secs )
schedule summary_interval { print_bot_state() };
}
function do_log_force(c: connection, msg: string)
{
local id = c$id;
print bot_log, fmt("%.6f %s:%d > %s:%d %s %s",
network_time(), id$orig_h, id$orig_p,
id$resp_h, id$resp_p, c$addl, msg);
}
function do_log(c: connection, msg: string)
{
if ( c$id !in bot_conns )
return;
do_log_force(c, msg);
}
function log_msg(c: connection, cmd: string, prefix: string, msg: string)
{
if ( skip_msgs in msg )
return;
do_log(c, fmt("MSG command=%s prefix=%s msg=\"%s\"", cmd, prefix, msg));
}
function update_timestamps(c: connection) : bot_conn
{
local conn = conns[c$id];
conn$client$last_seen = network_time();
conn$server$last_seen = network_time();
# To prevent the set of entries from premature expiration,
# we need to make a write access (can't use read_expire as we
# iterate over the entries on a regular basis).
clients[c$id$orig_h] = conn$client;
servers[c$id$resp_h] = conn$server;
return conn;
}
function add_server(c: connection) : bot_server
{
local s_h = c$id$resp_h;
if ( s_h in servers )
return servers[s_h];
local empty_table1: table[addr] of bot_client;
local empty_table2: table[string] of channel;
local empty_set: set[string];
local empty_set2: set[port];
local server = [$host=s_h, $p=empty_set2, $clients=empty_table1,
$channels=empty_table2, $passwords=empty_set,
$first_seen=network_time(), $last_seen=network_time()];
servers[s_h] = server;
return server;
}
function add_client(c: connection) : bot_client
{
local c_h = c$id$orig_h;
if ( c_h in clients )
return clients[c_h];
local empty_table: table[string] of channel;
local empty_set: set[addr];
local client = [$host=c_h, $p=c$id$resp_p, $servers=empty_set,
$channels=empty_table, $first_seen=network_time(),
$last_seen=network_time()];
clients[c_h] = client;
return client;
}
function check_bot_conn(c: connection)
{
if ( c$id in bot_conns )
return;
local client = c$id$orig_h;
local server = c$id$resp_h;
if ( client !in potential_bot_clients || server !in potential_bot_servers )
return;
# New confirmed bot_conn.
add bot_conns[c$id];
if ( server !in confirmed_bot_servers )
{
NOTICE([$note=IrcBotServerFound, $src=server, $p=c$id$resp_p, $conn=c,
$msg=fmt("ircbot server found: %s:%d", server, $p=c$id$resp_p)]);
add confirmed_bot_servers[server];
}
if ( client !in confirmed_bot_clients )
{
NOTICE([$note=IrcBotClientFound, $src=client, $p=c$id$orig_p, $conn=c,
$msg=fmt("ircbot client found: %s:%d", client, $p=c$id$orig_p)]);
add confirmed_bot_clients[client];
}
}
function get_conn(c: connection) : bot_conn
{
local conn: bot_conn;
if ( c$id in conns )
{
check_bot_conn(c);
return update_timestamps(c);
}
local c_h = c$id$orig_h;
local s_h = c$id$resp_h;
local client : bot_client;
local server : bot_server;
if ( c_h in clients )
client = clients[c_h];
else
client = add_client(c);
if ( s_h in servers )
server = servers[s_h];
else
server = add_server(c);
server$clients[c_h] = client;
add server$p[c$id$resp_p];
add client$servers[s_h];
conn$server = server;
conn$client = client;
conn$conn = c;
conns[c$id] = conn;
update_timestamps(c);
return conn;
}
function expire_server(t: table[addr] of bot_server, idx: addr): interval
{
local server = t[idx];
for ( c in server$clients )
{
local client = server$clients[c];
delete client$servers[idx];
}
delete potential_bot_servers[idx];
delete confirmed_bot_servers[idx];
return 0secs;
}
function expire_client(t: table[addr] of bot_client, idx: addr): interval
{
local client = t[idx];
for ( s in client$servers )
if ( s in servers )
delete servers[s]$clients[idx];
delete potential_bot_clients[idx];
delete confirmed_bot_clients[idx];
return 0secs;
}
function remove_connection(c: connection)
{
local conn = conns[c$id];
delete conns[c$id];
delete bot_conns[c$id];
}
event connection_state_remove(c: connection)
{
if ( c$id !in conns )
return;
remove_connection(c);
}
event bro_init()
{
set_buf(detailed_log, F);
set_buf(bot_log, F);
}
event irc_client(c: connection, prefix: string, data: string)
{
if ( detailed_logging )
print detailed_log, fmt("%.6f %s > (%s) %s", network_time(), id_string(c$id), prefix, data);
local conn = get_conn(c);
if ( data == /^ *[iI][rR][cC][xX] *$/ )
conn$ircx = T;
}
event irc_server(c: connection, prefix: string, data: string)
{
if ( detailed_logging )
print detailed_log, fmt("%.6f %s < (%s) %s", network_time(), id_string(c$id), prefix, data);
local conn = get_conn(c);
}
event irc_user_message(c: connection, user: string, host: string, server: string, real_name: string)
{
local conn = get_conn(c);
conn$client$user = user;
conn$client$realname = real_name;
do_log(c, fmt("USER user=%s host=%s server=%s real_name=%s", user, host, server, real_name));
}
function get_channel(conn: bot_conn, channel: string) : channel
{
if ( channel in conn$server$channels )
return conn$server$channels[channel];
else
{
local empty_set: set[string];
local empty_vec: vector of string;
local ch = [$name=channel, $passwords=empty_set, $topic_history=empty_vec];
conn$server$channels[ch$name] = ch;
return ch;
}
}
event irc_join_message(c: connection, info_list: irc_join_list)
{
local conn = get_conn(c);
for ( i in info_list )
{
local ch = get_channel(conn, i$channel);
if ( i$password != "" )
add ch$passwords[i$password];
conn$client$channels[ch$name] = ch;
do_log(c, fmt("JOIN channel=%s password=%s", i$channel, i$password));
}
}
global urls: set[string] &read_expire = 7 days &persistent;
event http_request(c: connection, method: string, original_URI: string,
unescaped_URI: string, version: string)
{
if ( original_URI in urls )
do_log_force(c, fmt("Request for URL %s", original_URI));
}
event irc_channel_topic(c: connection, channel: string, topic: string)
{
if ( bot_cmds in topic )
{
do_log_force(c, fmt("Matching TOPIC %s", topic));
add potential_bot_servers[c$id$resp_h];
}
local conn = get_conn(c);
local ch = get_channel(conn, channel);
ch$topic_history[|ch$topic_history|] = ch$topic;
ch$topic = topic;
if ( c$id in bot_conns )
{
do_log(c, fmt("TOPIC channel=%s topic=\"%s\"", channel, topic));
local s = split(topic, / /);
for ( i in s )
{
local w = s[i];
if ( w == /[a-zA-Z]+:\/\/.*/ )
{
add urls[w];
do_log(c, fmt("URL channel=%s url=\"%s\"",
channel, w));
}
}
}
}
event irc_nick_message(c: connection, who: string, newnick: string)
{
if ( bot_nicks in newnick )
{
do_log_force(c, fmt("Matching NICK %s", newnick));
add potential_bot_clients[c$id$orig_h];
}
local conn = get_conn(c);
conn$client$nick = newnick;
do_log(c, fmt("NICK who=%s nick=%s", who, newnick));
}
event irc_password_message(c: connection, password: string)
{
local conn = get_conn(c);
add conn$server$passwords[password];
do_log(c, fmt("PASS password=%s", password));
}
event irc_privmsg_message(c: connection, source: string, target: string,
message: string)
{
log_msg(c, "privmsg", source, fmt("->%s %s", target, message));
}
event irc_notice_message(c: connection, source: string, target: string,
message: string)
{
log_msg(c, "notice", source, fmt("->%s %s", target, message));
}
event irc_global_users(c: connection, prefix: string, msg: string)
{
local conn = get_conn(c);
# Better would be to parse the message to extract the counts.
conn$server$global_users = msg;
log_msg(c, "globalusers", prefix, msg);
}

View file

@ -1,689 +0,0 @@
# $Id: irc.bro 4758 2007-08-10 06:49:23Z vern $
@load conn-id
@load notice
@load weird
@load signatures
module IRC;
export {
const log_file = open_log_file("irc") &redef;
type irc_user: record {
u_nick: string; # nick name
u_real: string; # real name
u_host: string; # client host
u_channels: set[string]; # channels the user is member of
u_is_operator: bool; # user is server operator
u_conn: connection; # connection handle
};
type irc_channel: record {
c_name: string; # channel name
c_users: set[string]; # users in channel
c_ops: set[string]; # channel operators
c_type: string; # channel type
c_modes: string; # channel modes
c_topic: string; # channel topic
};
global expired_user:
function(t: table[string] of irc_user, idx: string): interval;
global expired_channel:
function(t: table[string] of irc_channel, idx: string): interval;
# Commands to ignore in irc_request/irc_message.
const ignore_in_other_msgs = { "PING", "PONG", "ISON" } &redef;
# Return codes to ignore in irc_response
const ignore_in_other_responses: set[count] = {
303 # RPL_ISON
} &redef;
# Active users, indexed by nick.
global active_users: table[string] of irc_user &read_expire = 6 hrs
&expire_func = expired_user &redef;
# Active channels, indexed by channel name.
global active_channels: table[string] of irc_channel
&read_expire = 6 hrs
&expire_func = expired_channel &redef;
# Strings that generate a notice if found in session dialog.
const hot_words =
/.*etc\/shadow.*/
| /.*etc\/ldap.secret.*/
| /.*phatbot.*/
| /.*botnet.*/
&redef;
redef enum Notice += {
IRC_HotWord,
};
}
# IRC ports. This could be widened to 6660-6669, say.
redef capture_filters += { ["irc-6666"] = "port 6666" };
redef capture_filters += { ["irc-6667"] = "port 6667" };
# DPM configuration.
global irc_ports = { 6666/tcp, 6667/tcp } &redef;
redef dpd_config += { [ANALYZER_IRC] = [$ports = irc_ports] };
redef Weird::weird_action += {
["irc_invalid_dcc_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_invite_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_kick_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_line"] = Weird::WEIRD_FILE,
["irc_invalid_mode_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_names_line"] = Weird::WEIRD_FILE,
["irc_invalid_njoin_line"] = Weird::WEIRD_FILE,
["irc_invalid_notice_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_oper_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_privmsg_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_reply_number"] = Weird::WEIRD_FILE,
["irc_invalid_squery_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_who_line"] = Weird::WEIRD_FILE,
["irc_invalid_who_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_whois_channel_line"] = Weird::WEIRD_FILE,
["irc_invalid_whois_message_format"] = Weird::WEIRD_FILE,
["irc_invalid_whois_operator_line"] = Weird::WEIRD_FILE,
["irc_invalid_whois_user_line"] = Weird::WEIRD_FILE,
["irc_line_size_exceeded"] = Weird::WEIRD_FILE,
["irc_line_too_short"] = Weird::WEIRD_FILE,
["irc_partial_request"] = Weird::WEIRD_FILE,
["irc_too_many_invalid"] = Weird::WEIRD_FILE,
};
# # IRC servers to identify server-to-server connections.
# redef irc_servers = {
# # German IRCnet servers
# irc.leo.org,
# irc.fu-berlin.de,
# irc.uni-erlangen.de,
# irc.belwue.de,
# irc.freenet.de,
# irc.tu-ilmenau.de,
# irc.rz.uni-karlsruhe.de,
# };
global conn_list: table[conn_id] of count;
global conn_ID = 0;
global check_connection: function(c: connection);
function irc_check_hot(c: connection, s: string, context: string)
{
if ( s == hot_words )
NOTICE([$note=IRC_HotWord, $conn=c,
$msg=fmt("IRC hot word in: %s", context)]);
}
function log_activity(c: connection, msg: string)
{
print log_file, fmt("%.6f #%s %s",
network_time(), conn_list[c$id], msg);
}
event connection_state_remove(c: connection)
{
delete conn_list[c$id];
}
event irc_request(c: connection, prefix: string,
command: string, arguments: string)
{
check_connection(c);
local context = fmt("%s %s", command, arguments);
irc_check_hot(c, command, context);
irc_check_hot(c, arguments, context);
if ( command !in ignore_in_other_msgs )
log_activity(c, fmt("other request%s%s: %s",
prefix == "" ? "" : " ",
prefix, context));
}
event irc_reply(c: connection, prefix: string, code: count, params: string)
{
check_connection(c);
local context = fmt("%s %s", code, params);
irc_check_hot(c, params, context);
if ( code !in ignore_in_other_responses )
log_activity(c, fmt("other response from %s: %s",
prefix, context));
}
event irc_message(c: connection, prefix: string,
command: string, message: string)
{
check_connection(c);
# Sanity checks whether this is indeed IRC.
#
# If we happen to parse an HTTP connection, the server "commands" will
# end with ":".
if ( command == /.*:$/ )
{
local aid = current_analyzer();
event protocol_violation(c, ANALYZER_IRC, aid, "broken server response");
return;
}
local context = fmt("%s %s", command, message);
irc_check_hot(c, command, context);
irc_check_hot(c, message, context);
if ( command !in ignore_in_other_msgs )
log_activity(c, fmt("other server message from %s: %s",
prefix, context));
}
event irc_user_message(c: connection, user: string, host: string,
server: string, real_name: string)
{
check_connection(c);
log_activity(c, fmt("new user, user='%s', host='%s', server='%s', real = '%s'",
user, host, server, real_name));
if ( user in active_users )
active_users[user]$u_conn = c;
else
{
local u: irc_user;
u$u_nick = user;
u$u_real = real_name;
u$u_conn = c;
u$u_host = "";
u$u_is_operator = F;
active_users[user] = u;
}
}
event irc_quit_message(c: connection, nick: string, message: string)
{
check_connection(c);
log_activity(c, fmt("user '%s' leaving%s", nick,
message == "" ? "" : fmt(", \"%s\"", message)));
# Remove from lists.
if ( nick in active_users )
{
delete active_users[nick];
for ( my_channel in active_channels )
delete active_channels[my_channel]$c_users[nick];
}
}
function check_message(c: connection, source: string, target: string,
msg: string, msg_type: string)
{
check_connection(c);
irc_check_hot(c, msg, msg);
log_activity(c, fmt("%s%s to '%s': %s", msg_type,
source != "" ? fmt(" from '%s'", source) : "",
target, msg));
}
event irc_privmsg_message(c: connection, source: string, target: string,
message: string)
{
check_message(c, source, target, message, "message");
}
event irc_notice_message(c: connection, source: string, target: string,
message: string)
{
check_message(c, source, target, message, "notice");
}
event irc_squery_message(c: connection, source: string, target: string,
message: string)
{
check_message(c, source, target, message, "squery");
}
event irc_join_message(c: connection, info_list: irc_join_list)
{
check_connection(c);
for ( l in info_list )
{
log_activity(c, fmt("user '%s' joined '%s'%s",
l$nick, l$channel,
l$password != "" ?
fmt("with password '%s'",
l$password) : ""));
if ( l$nick == "" )
next;
if ( l$nick in active_users )
add (active_users[l$nick]$u_channels)[l$channel];
else
{
local user: irc_user;
user$u_nick = l$nick;
user$u_real = "";
user$u_conn = c;
user$u_host = "";
user$u_is_operator = F;
add user$u_channels[l$channel];
active_users[l$nick] = user;
}
# Add channel to lists.
if ( l$channel in active_channels )
add (active_channels[l$channel]$c_users)[l$nick];
else
{
local my_c: irc_channel;
my_c$c_name = l$channel;
add my_c$c_users[l$nick];
my_c$c_type = my_c$c_modes = "";
active_channels[l$channel] = my_c;
}
}
}
event irc_part_message(c: connection, nick: string,
chans: string_set, message: string)
{
check_connection(c);
local channel_str = "";
for ( ch in chans )
channel_str = channel_str == "" ?
ch : fmt("%s, %s", channel_str, ch);
log_activity(c, fmt("%s channel '%s'%s",
nick == "" ? "leaving" :
fmt("user '%s' leaving", nick),
channel_str,
message == "" ?
"" : fmt("with message '%s'", message)));
# Remove user from channel.
if ( nick == "" )
return;
for ( ch in active_channels )
{
delete (active_channels[ch]$c_users)[nick];
delete (active_channels[ch]$c_ops)[nick];
if ( nick in active_users )
delete (active_users[nick]$u_channels)[ch];
}
}
event irc_nick_message(c: connection, who: string, newnick: string)
{
check_connection(c);
log_activity(c, fmt("%s nick name to '%s'",
who == "" ? "changing" :
fmt("user '%s' changing", who),
newnick));
}
event irc_invalid_nick(c: connection)
{
check_connection(c);
log_activity(c, "changing nick name failed");
}
event irc_network_info(c: connection, users: count, services: count,
servers: count)
{
check_connection(c);
log_activity(c, fmt("network includes %d users, %d services, %d servers",
users, services, servers));
}
event irc_server_info(c: connection, users: count, services: count,
servers: count)
{
check_connection(c);
log_activity(c, fmt("server includes %d users, %d services, %d peers",
users, services, servers));
}
event irc_channel_info(c: connection, chans: count)
{
check_connection(c);
log_activity(c, fmt("network includes %d channels", chans));
}
event irc_who_line(c: connection, target_nick: string, channel: string,
user: string, host: string, server: string,
nick: string, params: string, hops: count,
real_name: string)
{
check_connection(c);
log_activity(c, fmt("channel '%s' includes '%s' on %s connected to %s with nick '%s', real name '%s', params %s",
channel, user, host, server,
nick, real_name, params));
if ( nick == "" || channel == "" )
return;
if ( nick in active_users )
active_users[nick]$u_conn = c;
else
{
local myuser: irc_user;
myuser$u_nick = nick;
myuser$u_real = real_name;
myuser$u_conn = c;
myuser$u_host = host;
myuser$u_is_operator = F;
add myuser$u_channels[channel];
active_users[nick] = myuser;
if ( channel in active_channels )
add (active_channels[channel]$c_users)[nick];
else
{
local my_c: irc_channel;
my_c$c_name = channel;
add my_c$c_users[nick];
my_c$c_type = "";
my_c$c_modes = "";
active_channels[channel] = my_c;
}
}
}
event irc_who_message(c: connection, mask: string, oper: bool)
{
check_connection(c);
log_activity(c, fmt("WHO with mask %s%s", mask,
oper ? ", only operators" : ""));
}
event irc_whois_message(c: connection, server: string, users: string)
{
check_connection(c);
log_activity(c, fmt("WHOIS%s for user(s) %s",
server == "" ?
server : fmt(" to server %s", server),
users));
}
event irc_whois_user_line(c: connection, nick: string,
user: string, host: string, real_name: string)
{
check_connection(c);
log_activity(c, fmt("user '%s' with nick '%s' on host %s has real name '%s'",
user, nick, host, real_name));
if ( nick in active_users )
{
active_users[nick]$u_real = real_name;
active_users[nick]$u_host = host;
}
else
{
local u: irc_user;
u$u_nick = nick;
u$u_real = real_name;
u$u_conn = c;
u$u_host = host;
u$u_is_operator = F;
active_users[nick] = u;
}
}
event irc_whois_operator_line(c: connection, nick: string)
{
check_connection(c);
log_activity(c, fmt("user '%s' is an IRC operator", nick));
if ( nick in active_users )
active_users[nick]$u_is_operator = T;
else
{
local u: irc_user;
u$u_nick = nick;
u$u_real = "";
u$u_conn = c;
u$u_host = "";
u$u_is_operator = T;
active_users[nick] = u;
}
}
event irc_whois_channel_line(c: connection, nick: string, chans: string_set)
{
check_connection(c);
local message = fmt("user '%s' is on channels:", nick);
for ( channel in chans )
message = fmt("%s %s", message, channel);
log_activity(c, message);
if ( nick in active_users )
{
for ( ch in chans )
add active_users[nick]$u_channels[ch];
}
else
{
local u: irc_user;
u$u_nick = nick;
u$u_real = "";
u$u_conn = c;
u$u_host = "";
u$u_is_operator = F;
u$u_channels = chans;
active_users[nick] = u;
}
for ( ch in chans )
{
if ( ch in active_channels )
add (active_channels[ch]$c_users)[nick];
else
{
local my_c: irc_channel;
my_c$c_name = ch;
add my_c$c_users[nick];
my_c$c_type = "";
my_c$c_modes = "";
active_channels[ch] = my_c;
}
}
}
event irc_oper_message(c: connection, user: string, password: string)
{
check_connection(c);
log_activity(c, fmt("user requests operator status with name '%s', password '%s'",
user, password));
}
event irc_oper_response(c: connection, got_oper: bool)
{
check_connection(c);
log_activity(c, fmt("user %s operator status",
got_oper ? "received" : "did not receive"));
}
event irc_kick_message(c: connection, prefix: string, chans: string,
users: string, comment: string)
{
check_connection(c);
log_activity(c, fmt("user '%s' requested to kick '%s' from channel(s) %s with comment %s",
prefix, users, chans, comment));
}
event irc_error_message(c: connection, prefix: string, message: string)
{
check_connection(c);
log_activity(c, fmt("error message%s: %s",
prefix == "" ? "" : fmt("from '%s'", prefix),
message));
}
event irc_invite_message(c: connection, prefix: string,
nickname: string, channel: string)
{
check_connection(c);
log_activity(c, fmt("'%s' invited to channel %s%s",
nickname, channel,
prefix == "" ? "" : fmt(" by %s", prefix)));
}
event irc_mode_message(c: connection, prefix: string, params: string)
{
check_connection(c);
log_activity(c, fmt("mode command%s: %s",
prefix == "" ? "" : fmt(" from '%s'", prefix),
params));
}
event irc_squit_message(c: connection, prefix: string,
server: string, message: string)
{
check_connection(c);
log_activity(c, fmt("server disconnect attempt%s for %s with comment %s",
prefix == "" ? "" : fmt(" from '%s'", prefix),
server, message));
}
event irc_names_info(c: connection, c_type: string, channel: string,
users: string_set)
{
check_connection(c);
local chan_type =
c_type == "@" ? "secret" :
(c_type == "*" ? "private" : "public");
local message = fmt("channel '%s' (%s) contains users:",
channel, chan_type);
for ( user in users )
message = fmt("%s %s", message, user);
log_activity(c, message);
if ( channel in active_channels )
{
for ( u in users )
add (active_channels[channel]$c_users)[u];
}
else
{
local my_c: irc_channel;
my_c$c_name = channel;
my_c$c_users = users;
my_c$c_type = "";
my_c$c_modes = "";
active_channels[channel] = my_c;
}
for ( nick in users )
{
if ( nick in active_users )
add (active_users[nick]$u_channels)[channel];
else
{
local usr: irc_user;
usr$u_nick = nick;
usr$u_real = "";
usr$u_conn = c;
usr$u_host = "";
usr$u_is_operator = F;
add usr$u_channels[channel];
active_users[nick] = usr;
}
}
}
event irc_dcc_message(c: connection, prefix: string, target: string,
dcc_type: string, argument: string,
address: addr, dest_port: count, size: count)
{
check_connection(c);
log_activity(c, fmt("DCC %s invitation for '%s' to host %s on port %s%s",
dcc_type, target, address, dest_port,
dcc_type == "SEND" ?
fmt(" (%s: %s bytes)", argument, size) :
""));
}
event irc_channel_topic(c: connection, channel: string, topic: string)
{
check_connection(c);
log_activity(c, fmt("topic for %s is '%s'", channel, topic));
}
event irc_password_message(c: connection, password: string)
{
check_connection(c);
log_activity(c, fmt("password %s", password));
}
function expired_user(t: table[string] of irc_user, idx: string): interval
{
for ( my_c in active_users[idx]$u_channels )
{
suspend_state_updates();
delete active_channels[my_c]$c_users[idx];
delete active_channels[my_c]$c_ops[idx];
resume_state_updates();
}
return 0 secs;
}
function expired_channel(t:table[string] of irc_channel, idx: string): interval
{
for ( my_u in active_channels[idx]$c_users )
if ( my_u in active_users )
delete active_users[my_u]$u_channels[idx];
# Else is there a possible state leak? How could it not
# be in active_users? Yet sometimes it isn't, which
# is why we needed to add the above test.
return 0 secs;
}
function check_connection(c: connection)
{
if ( c$id !in conn_list )
{
++conn_ID;
append_addl(c, fmt("#%d", conn_ID));
conn_list[c$id] = conn_ID;
log_activity(c, fmt("new connection %s", id_string(c$id)));
}
}

View file

@ -1,336 +0,0 @@
# $Id: large-conns.bro 1332 2005-09-07 17:39:17Z vern $
# Written by Chema Gonzalez.
# Estimates the size of large "flows" (i.e., each direction of a TCP
# connection) by noting when their sequence numbers cross a set of regions
# in the sequence space. This can be done using a static packet filter,
# so is very efficient. It works for (TCP) traffic that Bro otherwise doesn't
# see.
# Usage
#
# 1) Set the appropriate number_of_regions and region_size:
#
# Modify the number_of_regions and (perhaps) region_size global
# variables. You do this *prior* to loading this script, so
# for example:
#
# const number_of_regions = 32;
# @load large-conns
#
# You do *not* redef them like you would with other script variables
# (this is because they need to be used directly in the initializations
# of other variables used by this script).
#
# Note that number_of_regions affects the granularity
# and definition of the script (see below).
#
# 2) To get an estimate of the true size of a flow, call:
#
# function estimate_flow_size_and_remove(cid: conn_id, orig: bool):
# flow_size_est
#
# If orig=T, then an estimate of the size of the forward (originator)
# direction is returned. If orig=F, then the reverse (responder)
# direction is returned. In both cases, what's returned is a
# flow_size_est, which includes a flag indicating whether there was
# any estimate formed, and, if the flag is T, a lower bound, an upper bound,
# and an inconsistency-count (which, if > 0, means that the estimates
# came from sequence numbers that were inconsistent, and thus something
# is wrong - perhaps packet drops by the secondary filter). Finally,
# calling this function causes the flow's record to be deleted. Perhaps
# at some point we'll need to add a version that just retrieves the
# estimate.
type flow_size_est: record {
have_est: bool;
lower: double &optional;
upper: double &optional;
num_inconsistent: count &optional;
};
global estimate_flow_size_and_remove:
function(cid: conn_id, orig: bool): flow_size_est;
module LargeConn;
# Rationale
#
# One of the mechanisms that Bro uses to detect large TCP flows is
# to calculate the difference in the sequence number (seq) field contents
# between the last packet (FIN or RST) and the first packet (SYN). This
# method may be wrong if a) the seq number is busted (which can happen
# frequently with RST termination), or b) the seq number wraps around
# the 4GB sequence number space (note that this is OK for TCP while
# there is no ambiguity on what a packet's sequence number means,
# due to its use of a window <= 2 GB in size).
#
# The purpose of this script is to resolve these ambiguities. In other
# words, help with differentiating truly large flows from flows with
# a busted seq, and detecting very large flows that wrap around the
# 4GB seq space.
#
# To do so, large-flow listens to a small group of thin regions in
# the sequence space, located at equal distances from each other. The idea
# is that a truly large flow will pass through the regions in
# an orderly fashion, maybe several times. This script keeps track of
# all packets that pass through any of the regions, counting the number
# of times a packet from a given flow passes through consecutive regions.
#
# Note that the exact number of regions, and the size of each region, can
# be controlled by redefining the global variables number_of_regions
# and region_size, respectively. Both should be powers of two (if not,
# they are rounded to be such), and default to 4 and 16KB, respectively.
# The effect of varying these parameters is the following:
#
# - Increasing number_of_regions will increase the granularity of the
# script, at the cost of elevating its cost in both processing (more
# packets will be seen) and memory (more flows will be seen).
# The granularity of the script is defined as the minimum variation
# in size the script can see. Its value is:
#
# granularity = (4GB / number_of_regions)
#
# For example, if we're using 4 regions, the minimum flow size difference
# that the script can see is 1GB.
#
# number_of_regions also affects the script definition, defined as the
# smallest size of a flow which ensures that the flow will be seen by
# the script. The script definition is:
#
# definition = (2 * granularity)
#
# The script sees no flow smaller than the granularity, some flows with
# size between granularity and definition, and all flows larger than
# definition. In our example, the script definition is 2GB (it will see
# for sure only flows bigger than 2GB).
#
# - Increasing region_size will only increase the resilience of the script
# to lost packets, at the cost of augmenting the cost in both processing
# and memory (see above). The default value of 16 KB is chosen to work
# in the presence of largish packets without too much additional work.
# Set up defaults, unless the user has already specified these. Note that
# these variables are *not* redef'able, since they are used in initializations
# later in this script (so a redef wouldn't be "seen" in time).
@ifndef ( number_of_regions )
const number_of_regions = 4;
@endif
@ifndef ( region_size )
const region_size = 16 * 1024; # 16 KB
@endif
# Track the regions visited for each flow.
type t_info: record {
last_region: count; # last region visited
num_regions: count; # number of regions visited
num_inconsistent: count; # num. inconsistent region crossings
};
# The state expiration for this table needs to be generous, as it's
# for tracking very large flows, which could be quite long-lived.
global flow_region_info: table[conn_id] of t_info &write_expire = 6 hr;
# Returns the integer logarithm in base b.
function logarithm(base: count, x: count): count
{
if ( x < base )
return 0;
else
return 1 + logarithm(base, x / base);
}
# Function used to get around Bro's lack of real ordered loop.
function do_while(i: count, max: count, total: count,
f: function(i: count, total: count): count): count
{
if ( i >= max )
return total;
else
return do_while(++i, max, f(--i, total), f);
}
function fn_mask_location(i: count, total: count): count
{
return total * 2 + 1;
}
function fn_filter_location(i: count, total: count): count
{
# The location pattern is 1010101010...
return total * 2 + (i % 2 == 0 ? 1 : 0);
}
function fn_common_region_size(i: count, total: count): count
{
return total * 2;
}
function get_interregion_distance(number_of_regions: count,
region_size: count): count
{
local bits_number_of_regions = logarithm(2, number_of_regions);
local bits_other = int_to_count(32 - bits_number_of_regions);
return do_while(0, bits_other, 1, fn_common_region_size);
}
global interregion_distance =
get_interregion_distance(number_of_regions, region_size);
# Returns an estiamte of size of the flow (one direction of a TCP connection)
# that this script has seen. This is based on the number of consecutive
# regions a flow has visited, weighted with the distance between regions.
#
# We know that the full sequence number space accounts for 4GB. This
# space comprises number_of_regions regions, separated from each other
# a (4GB / number_of_regions) distance. If a flow has been seen
# in X consecutive regions, it means that the size of the flow is
# greater than ((X - 1) * distance_between_regions) GB.
#
# Note that seeing a flow in just one region is no different from
# not seeing it at all.
function estimate_flow_size_and_remove(cid: conn_id, orig: bool): flow_size_est
{
local id = orig ? cid :
[$orig_h = cid$resp_h, $orig_p = cid$resp_p,
$resp_h = cid$orig_h, $resp_p = cid$orig_p];
if ( id !in flow_region_info )
return [$have_est = F];
local regions_crossed =
int_to_count(flow_region_info[id]$num_regions - 1);
local lower = regions_crossed * interregion_distance * 1.0;
local upper = lower + interregion_distance * 2.0;
local num_inconsis = flow_region_info[id]$num_inconsistent;
delete flow_region_info[id];
return [$have_est = T, $lower = lower, $upper = upper,
$num_inconsistent = num_inconsis];
}
# Returns a tcpdump filter corresponding to the number of regions and
# region size requested by the user.
#
# How to calculate the tcpdump filter used to hook packet_event to the
# secondary filter system? We are interested only in TCP packets whose
# seq number belongs to any of the test slices. Let's focus on the case
# of 4 regions, 16KB per region.
#
# The mask should be: [ x x L L L ... L L L x x ... x ]
# <---><---------------><--------->
# | | |
# | | +-> suffix: region size
# | +-> location: remaining bits
# +-> prefix: number of equidistant regions
#
# The 32-bit seq number is masked as follows:
#
# - suffix: defines size of the regions (16KB implies log_2(16KB) = 14 bits)
#
# - location: defines the exact location of the 4 regions. Note that, to
# minimize the amount of data we keep, the location will be distinct from
# zero, so segments with seq == 0 are not in a valid region
#
# - prefix: defines number of regions (4 implies log_2(4) = 2 bits)
#
# E.g., the mask will be seq_number & 0011...1100..00_2 = 00LL..LL00..00_2,
# which, by setting the location to 1010101010101010, will finally be
# seq_number & 0011...1100..00_2 = 00101010101010101000..00_2, i.e.,
# seq_number & 0x3fffc000 = 0x2aaa8000.
#
# For that particular parameterization, we'd like to wind up with a
# packet event filter of "(tcp[4:4] & 0x3fffc000) == 0x2aaa8000".
function get_event_filter(number_of_regions: count, region_size: count): string
{
local bits_number_of_regions = logarithm(2, number_of_regions);
local bits_region_size = logarithm(2, region_size);
local bits_remaining =
int_to_count(32 - bits_number_of_regions - bits_region_size);
# Set the bits corresponding to the location:
# i = 0;
# while ( i < bits_remaining )
# {
# mask = (mask * 2) + 1;
# filter = (filter * 2) + (((i % 2) == 0) ? 1 : 0);
# ++i;
# }
local mask = do_while(0, bits_remaining, 0, fn_mask_location);
local filter = do_while(0, bits_remaining, 0, fn_filter_location);
# Set the bits corrsponding to the region size
# i = 0;
# while ( i < bits_region_size )
# {
# mask = mask * 2;
# filter = filter * 2;
# ++i;
# }
mask = do_while(0, bits_region_size, mask, fn_common_region_size);
filter = do_while(0, bits_region_size, filter, fn_common_region_size);
return fmt("(tcp[4:4] & 0x%x) == 0x%x", mask, filter);
}
# packet_event --
#
# This event is raised once per (TCP) packet falling into any of the regions.
# It updates the flow_region_info table.
event packet_event(filter: string, pkt: pkt_hdr)
{
# Distill the region from the seq number.
local region = pkt$tcp$seq / interregion_distance;
# Get packet info and update global counters.
local cid = [$orig_h = pkt$ip$src, $orig_p = pkt$tcp$sport,
$resp_h = pkt$ip$dst, $resp_p = pkt$tcp$dport];
if ( cid !in flow_region_info )
{
flow_region_info[cid] =
[$last_region = region, $num_regions = 1,
$num_inconsistent = 0];
return;
}
local info = flow_region_info[cid];
local next_region = (info$last_region + 1) % number_of_regions;
if ( region == next_region )
{ # flow seen in the next region
info$last_region = region;
++info$num_regions;
}
else if ( region == info$last_region )
{ # flow seen in the same region, ignore
}
else
{
# Flow seen in another region (not the next one).
info$last_region = region;
info$num_regions = 1; # restart accounting
++info$num_inconsistent;
}
}
# Glue the filter into the secondary filter hookup.
global packet_event_filter = get_event_filter(number_of_regions, region_size);
redef secondary_filters += { [packet_event_filter] = packet_event };

View file

@ -1,16 +0,0 @@
# $Id: listen-clear.bro 416 2004-09-17 03:52:28Z vern $
#
# Listen for other Bros (non-SSL).
@load remote
# On which port to listen.
const listen_port_clear = Remote::default_port_clear &redef;
# On which IP to bind (0.0.0.0 for any interface)
const listen_if_clear = 0.0.0.0 &redef;
event bro_init()
{
listen(listen_if_clear, listen_port_clear, F);
}

View file

@ -1,16 +0,0 @@
# $Id: listen-ssl.bro 1015 2005-01-31 13:46:50Z kreibich $
#
# Listen for other Bros (SSL).
@load remote
# On which port to listen.
const listen_port_ssl = Remote::default_port_ssl &redef;
# On which IP to bind (0.0.0.0 for any interface)
const listen_if_ssl = 0.0.0.0 &redef;
event bro_init()
{
listen(listen_if_ssl, listen_port_ssl, T);
}

View file

@ -1,194 +0,0 @@
# $Id: load-level.bro 1904 2005-12-14 03:27:15Z vern $
#
# Support for shedding/reinstating load.
@load notice
# If no load_level is given, a filter is always activated.
#
# If a level is given for a filter (using the same ID than in
# {capture,restrict}_filter), then:
#
# - a capture_filter is activated if current load_level is <=
# - a restrict_filter is activated if current load_level is >=
global capture_load_levels: table[string] of PcapFilterID &redef;
global restrict_load_levels: table[string] of PcapFilterID &redef;
redef enum PcapFilterID += {
LoadLevel1, LoadLevel2, LoadLevel3, LoadLevel4, LoadLevel5,
LoadLevel6, LoadLevel7, LoadLevel8, LoadLevel9, LoadLevel10,
};
const Levels = {
LoadLevel1, LoadLevel2, LoadLevel3, LoadLevel4, LoadLevel5,
LoadLevel6, LoadLevel7, LoadLevel8, LoadLevel9, LoadLevel10
};
# The load-level cannot not leave this interval.
const MinLoad = LoadLevel1;
const MaxLoad = LoadLevel10;
# The initial load-level.
global default_load_level = LoadLevel10 &redef;
# Set to 0 to turn off any changes of the filter.
global can_adjust_filter = T &redef;
global current_load_level = DefaultPcapFilter;
global ll_file = open_log_file("load-level");
# Interface functions for switching load levels.
function set_load_level(level: PcapFilterID): bool
{
if ( level == current_load_level )
return T;
if ( ! can_adjust_filter )
{
print ll_file, fmt("%.6f can't set %s (load-levels are turned off)", network_time(), level);
return F;
}
if ( ! install_pcap_filter(level) )
{
print ll_file, fmt("%.6f can't set %s (install failed)", network_time(), level);
# Don't try again.
can_adjust_filter = F;
return F;
}
current_load_level = level;
print ll_file, fmt("%.6f switched to %s", network_time(), level);
return T;
}
# Too bad that we can't use enums like integers...
const IncreaseLoadLevelTab = {
[LoadLevel1] = LoadLevel2,
[LoadLevel2] = LoadLevel3,
[LoadLevel3] = LoadLevel4,
[LoadLevel4] = LoadLevel5,
[LoadLevel5] = LoadLevel6,
[LoadLevel6] = LoadLevel7,
[LoadLevel7] = LoadLevel8,
[LoadLevel8] = LoadLevel9,
[LoadLevel9] = LoadLevel10,
[LoadLevel10] = LoadLevel10,
};
const DecreaseLoadLevelTab = {
[LoadLevel1] = LoadLevel1,
[LoadLevel2] = LoadLevel1,
[LoadLevel3] = LoadLevel2,
[LoadLevel4] = LoadLevel3,
[LoadLevel5] = LoadLevel4,
[LoadLevel6] = LoadLevel5,
[LoadLevel7] = LoadLevel6,
[LoadLevel8] = LoadLevel7,
[LoadLevel9] = LoadLevel8,
[LoadLevel10] = LoadLevel9,
};
const LoadLevelToInt = {
[DefaultPcapFilter] = 0,
[LoadLevel1] = 1,
[LoadLevel2] = 2,
[LoadLevel3] = 3,
[LoadLevel4] = 4,
[LoadLevel5] = 5,
[LoadLevel6] = 6,
[LoadLevel7] = 7,
[LoadLevel8] = 8,
[LoadLevel9] = 9,
[LoadLevel10] = 10,
};
function increase_load_level()
{
set_load_level(IncreaseLoadLevelTab[current_load_level]);
}
function decrease_load_level()
{
set_load_level(DecreaseLoadLevelTab[current_load_level]);
}
# Internal functions.
function load_level_error()
{
print ll_file, fmt("%.6f Error, switching back to DefaultPcapFilter",
network_time());
install_default_pcap_filter();
# Don't try changing the load level any more.
can_adjust_filter = F;
}
function build_load_level_filter(level: PcapFilterID): string
{
# Build up capture_filter.
local cfilter = "";
for ( id in capture_filters )
{
if ( id !in capture_load_levels ||
LoadLevelToInt[level] <= LoadLevelToInt[capture_load_levels[id]] )
cfilter = add_to_pcap_filter(cfilter, capture_filters[id], "or");
}
# Build up restrict_filter.
local rfilter = "";
for ( id in restrict_filters )
{
if ( id !in restrict_load_levels ||
LoadLevelToInt[level] >= LoadLevelToInt[restrict_load_levels[id]] )
rfilter = add_to_pcap_filter(rfilter, restrict_filters[id], "and");
}
return join_filters(cfilter, rfilter);
}
function precompile_load_level_filters(): bool
{
print ll_file, fmt("%.6f <<< Begin of precompilation", network_time() );
for ( i in Levels )
{
local filter = build_load_level_filter(i);
if ( ! precompile_pcap_filter(i, filter) )
{
print ll_file, fmt("%.6f Level %d: %s",
network_time(), LoadLevelToInt[i], pcap_error());
load_level_error();
return F;
}
print ll_file, fmt("%.6f Level %2d: %s", network_time(), LoadLevelToInt[i], filter);
}
print ll_file, fmt("%.6f >>> End of precompilation", network_time() );
return T;
}
event bro_init()
{
set_buf(ll_file, F);
precompile_load_level_filters();
set_load_level(default_load_level);
# Don't adjust the filter when reading a trace.
if ( ! reading_live_traffic() )
can_adjust_filter = F;
}

View file

@ -1,43 +0,0 @@
# $Id: load-sample.bro 1758 2005-11-22 00:58:10Z vern $
# A simple form of profiling based on sampling the work done per-packet.
# load_sample() is generated every load_sample_freq packets (roughly;
# it's randomized). For each sampled packet, "samples" contains a set
# of the functions, event handlers, and their source files that were accessed
# during the processing of that packet, along with an estimate of the
# CPU cost of processing the packet and (currently broken) memory allocated/
# freed.
global sampled_count: table[string] of count &default = 0;
global sampled_CPU: table[string] of interval &default = 0 sec;
global sampled_mem: table[string] of int &default = +0;
global num_samples = 0;
global total_sampled_CPU = 0 sec;
global total_sampled_mem = +0;
event load_sample(samples: load_sample_info, CPU: interval, dmem: int)
{
++num_samples;
total_sampled_CPU += CPU;
total_sampled_mem += dmem;
if ( |samples| == 0 )
add samples["<nothing>"];
for ( i in samples )
{
++sampled_count[i];
sampled_CPU[i] += CPU;
sampled_mem[i] += dmem;
}
}
event bro_done()
{
for ( i in sampled_CPU )
print fmt("%s: %d%% pkts, %.1f%% CPU",
i, sampled_count[i] * 100 / num_samples,
sampled_CPU[i] * 100 / total_sampled_CPU);
# sampled_mem[i] / total_sampled_mem;
}

View file

@ -1,10 +0,0 @@
# $Id: log-append.bro 2797 2006-04-23 05:56:24Z vern $
# By default, logs are overwritten when opened, deleting the contents
# of any existing log of the same name. Loading this module changes the
# behavior to appending.
function open_log_file(tag: string): file
{
return open_for_append(log_file_name(tag));
}

View file

@ -1,677 +0,0 @@
# $Id: login.bro 6481 2008-12-15 00:47:57Z vern $
@load notice
@load weird
@load hot-ids
@load conn
# scan.bro is needed for "account_tried" event.
@load scan
@load demux
@load terminate-connection
module Login;
global telnet_ports = { 23/tcp } &redef;
redef dpd_config += { [ANALYZER_TELNET] = [$ports = telnet_ports] };
global rlogin_ports = { 513/tcp } &redef;
redef dpd_config += { [ANALYZER_RLOGIN] = [$ports = rlogin_ports] };
export {
redef enum Notice += {
SensitiveLogin, # interactive login using sensitive username
# Interactive login seen using forbidden username, but the analyzer
# was confused in following the login dialog, so may be in error.
LoginForbiddenButConfused,
# During a login dialog, a sensitive username (e.g., "rewt") was
# seen in the user's *password*. This is reported as a notice
# because it could be that the login analyzer didn't track the
# authentication dialog correctly, and in fact what it thinks is
# the user's password is instead the user's username.
SensitiveUsernameInPassword,
};
# If these patterns appear anywhere in the user's keystrokes, do a notice.
const input_trouble =
/rewt/
| /eggdrop/
| /\/bin\/eject/
| /oir##t/
| /ereeto/
| /(shell|xploit)_?code/
| /execshell/
| /ff\.core/
| /unset[ \t]+(histfile|history|HISTFILE|HISTORY)/
| /neet\.tar/
| /r0kk0/
| /su[ \t]+(daemon|news|adm)/
| /\.\/clean/
| /rm[ \t]+-rf[ \t]+secure/
| /cd[ \t]+\/dev\/[a-zA-Z]{3}/
| /solsparc_lpset/
| /\.\/[a-z]+[ \t]+passwd/
| /\.\/bnc/
| /bnc\.conf/
| /\"\/bin\/ksh\"/
| /LAST STAGE OF DELIRIUM/
| /SNMPXDMID_PROG/
| /snmpXdmid for solaris/
| /\"\/bin\/uname/
| /gcc[ \t]+1\.c/
| />\/etc\/passwd/
| /lynx[ \t]+-source[ \t]+.*(packetstorm|shellcode|linux|sparc)/
| /gcc.*\/bin\/login/
| /#define NOP.*0x/
| /printf\(\"overflowing/
| /exec[a-z]*\(\"\/usr\/openwin/
| /perl[ \t]+.*x.*[0-9][0-9][0-9][0-9]/
| /ping.*-s.*%d/
&redef;
# If this pattern appears anywhere in the user's input after applying
# <backspace>/<delete> editing, do a notice ...
const edited_input_trouble =
/[ \t]*(cd|pushd|more|less|cat|vi|emacs|pine)[ \t]+((['"]?\.\.\.)|(["'](\.*)[ \t]))/
&redef;
# ... *unless* the corresponding output matches this:
const output_indicates_input_not_trouble = /No such file or directory/ &redef;
# NOTICE on these, but only after waiting for the corresponding output,
# so it can be displayed at the same time.
const input_wait_for_output = edited_input_trouble &redef;
# If the user's entire input matches this pattern, do a notice. Putting
# "loadmodule" here rather than in input_trouble is just to illustrate
# the idea, it could go in either.
const full_input_trouble = /.*loadmodule.*/ &redef;
# If the following appears anywhere in the user's output, do a notice.
const output_trouble =
/^-r.s.*root.*\/bin\/(sh|csh|tcsh)/
| /Jumping to address/
| /Jumping Address/
| /smashdu\.c/
| /PATH_UTMP/
| /Log started at =/
| /www\.anticode\.com/
| /www\.uberhax0r\.net/
| /smurf\.c by TFreak/
| /Super Linux Xploit/
| /^# \[root@/
| /^-r.s.*root.*\/bin\/(time|sh|csh|tcsh|bash|ksh)/
| /invisibleX/
| /PATH_(UTMP|WTMP|LASTLOG)/
| /[0-9]{5,} bytes from/
| /(PATH|STAT):\ .*=>/
| /----- \[(FIN|RST|DATA LIMIT|Timed Out)\]/
| /IDLE TIMEOUT/
| /DATA LIMIT/
| /-- TCP\/IP LOG --/
| /STAT: (FIN|TIMED_OUT) /
| /(shell|xploit)_code/
| /execshell/
| /x86_bsd_compaexec/
| /\\xbf\\xee\\xee\\xee\\x08\\xb8/ # from x.c worm
| /Coded by James Seter/
| /Irc Proxy v/
| /Daemon port\.\.\.\./
| /BOT_VERSION/
| /NICKCRYPT/
| /\/etc\/\.core/
| /exec.*\/bin\/newgrp/
| /deadcafe/
| /[ \/]snap\.sh/
| /Secure atime,ctime,mtime/
| /Can\'t fix checksum/
| /Promisc Dectection/
| /ADMsn0ofID/
| /(cd \/; uname -a; pwd; id)/
| /drw0rm/
| /[Rr][Ee3][Ww][Tt][Ee3][Dd]/
| /rpc\.sadmin/
| /AbraxaS/
| /\[target\]/
| /ID_SENDSYN/
| /ID_DISTROIT/
| /by Mixter/
| /rap(e?)ing.*using weapons/
| /spsiod/
| /[aA][dD][oO][rR][eE][bB][sS][dD]/ # rootkit
&redef;
# Same, but must match entire output.
const full_output_trouble = /.*Trojaning in progress.*/ &redef;
const backdoor_prompts =
/^[!-~]*( ?)[#%$] /
| /.*no job control/
| /WinGate>/
&redef;
const non_backdoor_prompts = /^ *#.*#/ &redef;
const hot_terminal_types = /VT666|007/ &redef;
const hot_telnet_orig_ports = { 53982/tcp, } &redef;
const router_prompts: set[string] &redef;
const non_ASCII_hosts: set[addr] &redef;
const skip_logins_to = { non_ASCII_hosts, } &redef;
const always_hot_login_ids = { always_hot_ids } &redef;
const hot_login_ids = { hot_ids } &redef;
const rlogin_id_okay_if_no_password_exposed = { "root", } &redef;
const BS = "\x08";
const DEL = "\x7f";
global new_login_session:
function(c: connection, pid: peer_id, output_line: count);
global remove_login_session: function(c: connection, pid: peer_id);
global ext_set_login_state:
function(cid: conn_id, pid: peer_id, state: count);
global ext_get_login_state:
function(cid: conn_id, pid: peer_id): count;
}
redef capture_filters += { ["login"] = "port telnet or tcp port 513" };
redef skip_authentication = {
"WELCOME TO THE BERKELEY PUBLIC LIBRARY",
};
redef direct_login_prompts = { "TERMINAL?", };
redef login_prompts = {
"Login:", "login:", "Name:", "Username:", "User:", "Member Name",
"User Access Verification", "Cisco Systems Console",
direct_login_prompts
};
redef login_non_failure_msgs = {
"Failures", "failures", # probably is "<n> failures since last login"
"failure since last successful login",
"failures since last successful login",
};
redef login_non_failure_msgs = {
"Failures", "failures", # probably is "<n> failures since last login"
"failure since last successful login",
"failures since last successful login",
} &redef;
redef login_failure_msgs = {
"invalid", "Invalid", "incorrect", "Incorrect", "failure", "Failure",
# "Unable to authenticate", "unable to authenticate",
"User authorization failure",
"Login failed",
"INVALID", "Sorry.", "Sorry,",
};
redef login_success_msgs = {
"Last login",
"Last successful login", "Last successful login",
"checking for disk quotas", "unsuccessful login attempts",
"failure since last successful login",
"failures since last successful login",
router_prompts,
};
redef login_timeouts = {
"timeout", "timed out", "Timeout", "Timed out",
"Error reading command input", # VMS
};
type check_info: record {
expanded_line: string; # line with all possible editing seqs
hot: bool; # whether any editing sequence was a hot user id
hot_id: string; # the ID considered hot
forbidden: bool; # same, but forbidden user id
};
type login_session_info: record {
user: string;
output_line: count; # number of lines seen
# input string for which we want to match the output.
waiting_for_output: string;
waiting_for_output_line: count; # output line we want to match it to
state: count; # valid for external connections only
};
global login_sessions: table[peer_id, conn_id] of login_session_info;
# The next two functions are "external-to-the-event-engine",
# hence the ext_ prefix. They're used by the script to manage
# login state so that they can work with login sessions unknown
# to the event engine (such as those received from remote peers).
function ext_get_login_state(cid: conn_id, pid: peer_id): count
{
if ( pid == PEER_ID_NONE )
return get_login_state(cid);
return login_sessions[pid, cid]$state;
}
function ext_set_login_state(cid: conn_id, pid: peer_id, state: count)
{
if ( pid == PEER_ID_NONE )
set_login_state(cid, state);
else
login_sessions[pid, cid]$state = state;
}
function new_login_session(c: connection, pid: peer_id, output_line: count)
{
local s: login_session_info;
s$waiting_for_output = s$user = "";
s$output_line = output_line;
s$state = LOGIN_STATE_AUTHENTICATE;
login_sessions[pid, c$id] = s;
}
function remove_login_session(c: connection, pid: peer_id)
{
delete login_sessions[pid, c$id];
}
function is_login_conn(c: connection): bool
{
return c$id$resp_p == telnet || c$id$resp_p == rlogin;
}
function hot_login(c: connection, pid: peer_id, msg: string, tag: string)
{
if ( [pid, c$id] in login_sessions )
NOTICE([$note=SensitiveLogin, $conn=c,
$user=login_sessions[pid, c$id]$user, $msg=msg]);
else
NOTICE([$note=SensitiveLogin, $conn=c, $msg=msg]);
++c$hot;
demux_conn(c$id, tag, "keys", service_name(c));
}
function is_hot_id(id: string, successful: bool, confused: bool): bool
{
return successful ? id in hot_login_ids :
(confused ? id in forbidden_ids :
id in always_hot_login_ids);
}
function is_forbidden_id(id: string): bool
{
return id in forbidden_ids || id == forbidden_id_patterns;
}
function edit_and_check_line(c: connection, pid: peer_id, line: string,
successful: bool): check_info
{
line = to_lower(line);
local ctrl_H_edit = edit(line, BS);
local del_edit = edit(line, DEL);
local confused =
(ext_get_login_state(c$id, pid) == LOGIN_STATE_CONFUSED);
local hot = is_hot_id(line, successful, confused);
local hot_id = hot ? line : "";
local forbidden = is_forbidden_id(line);
local eline = line;
if ( ctrl_H_edit != line )
{
eline = fmt("%s,%s", eline, ctrl_H_edit);
if ( ! hot && is_hot_id(ctrl_H_edit, successful, confused) )
{
hot = T;
hot_id = ctrl_H_edit;
}
forbidden = forbidden || is_forbidden_id(ctrl_H_edit);
}
if ( del_edit != line )
{
eline = fmt("%s,%s", eline, del_edit);
if ( ! hot && is_hot_id(del_edit, successful, confused) )
{
hot = T;
hot_id = del_edit;
}
forbidden = forbidden || is_forbidden_id(del_edit);
}
local results: check_info;
results$expanded_line = eline;
results$hot = hot;
results$hot_id = hot_id;
results$forbidden = forbidden;
return results;
}
function edit_and_check_user(c: connection, pid: peer_id, user: string,
successful: bool, fmt_s: string): bool
{
local check = edit_and_check_line(c, pid, user, successful);
if ( [pid, c$id] !in login_sessions )
new_login_session(c, pid, 9999);
login_sessions[pid, c$id]$user = check$expanded_line;
c$addl = fmt(fmt_s, c$addl, check$expanded_line);
if ( check$hot )
{
++c$hot;
demux_conn(c$id, check$hot_id, "keys", service_name(c));
}
if ( check$forbidden )
{
if ( ext_get_login_state(c$id, pid) == LOGIN_STATE_CONFUSED )
NOTICE([$note=LoginForbiddenButConfused, $conn=c,
$user = user,
$msg=fmt("not terminating %s because confused about state", full_id_string(c))]);
else
TerminateConnection::terminate_connection(c);
}
return c$hot > 0;
}
function edit_and_check_password(c: connection, pid: peer_id, password: string)
{
local check = edit_and_check_line(c, pid, password, T);
if ( check$hot )
{
++c$hot;
NOTICE([$note=SensitiveUsernameInPassword, $conn=c,
$user=password,
$msg=fmt("%s password: \"%s\"",
id_string(c$id), check$expanded_line)]);
}
}
event login_failure(c: connection, user: string, client_user: string,
password: string, line: string)
{
local pid = get_event_peer()$id;
event account_tried(c, user, password);
edit_and_check_password(c, pid, password);
if ( c$hot == 0 && password == "" &&
! edit_and_check_line(c, pid, user, F)$hot )
# Don't both reporting it, this was clearly a half-hearted
# attempt and it's not a sensitive username.
return;
local user_hot = edit_and_check_user(c, pid, user, F, "%sfail/%s ");
if ( client_user != "" && client_user != user &&
edit_and_check_user(c, pid, client_user, F, "%s(%s) ") )
user_hot = T;
if ( user_hot || c$hot > 0 )
NOTICE([$note=SensitiveLogin, $conn=c,
$user=user, $sub=client_user,
$msg=fmt("%s %s", id_string(c$id), c$addl)]);
}
event login_success(c: connection, user: string, client_user: string,
password: string, line: string)
{
local pid = get_event_peer()$id;
Hot::check_hot(c, Hot::APPL_ESTABLISHED);
event account_tried(c, user, password);
edit_and_check_password(c, pid, password);
# Look for whether the user name is sensitive; but allow for
# some ids being okay if no password was exposed accessing them.
local user_hot = F;
if ( c$id$resp_p == rlogin && password == "<none>" &&
user in rlogin_id_okay_if_no_password_exposed )
append_addl(c, fmt("\"%s\"", user));
else
user_hot = edit_and_check_user(c, pid, user, T, "%s\"%s\" ");
if ( c$id$resp_p == rlogin && client_user in always_hot_login_ids )
{
append_addl(c, fmt("(%s)", client_user));
demux_conn(c$id, client_user, "keys", service_name(c));
user_hot = T;
}
if ( user_hot || c$hot > 0 )
NOTICE([$note=SensitiveLogin, $conn=c,
$user=user, $sub=client_user,
$msg=fmt("%s %s", id_string(c$id), c$addl)]);
# else if ( password == "" )
# alarm fmt("%s %s <no password>", id_string(c$id), c$addl);
### use the following if no login_input_line/login_output_line
# else
# {
# set_record_packets(c$id, F);
# skip_further_processing(c$id);
# }
}
event login_input_line(c: connection, line: string)
{
local pid = get_event_peer()$id;
local BS_line = edit(line, BS);
local DEL_line = edit(line, DEL);
if ( input_trouble in line ||
### need to merge input_trouble and edited_input_trouble here
### ideally, match on input_trouble would tell whether we need
### to invoke the edit functions, as an attribute of a .*(^H|DEL)
### rule.
input_trouble in BS_line || input_trouble in DEL_line ||
(edited_input_trouble in BS_line &&
# If one is in but the other not, then the one that's not
# is presumably the correct edit, and the one that is, isn't
# in fact edited at all
edited_input_trouble in DEL_line) ||
line == full_input_trouble )
{
if ( [pid, c$id] !in login_sessions )
new_login_session(c, pid, 9999);
if ( edited_input_trouble in BS_line &&
edited_input_trouble in DEL_line )
{
login_sessions[pid, c$id]$waiting_for_output = line;
login_sessions[pid, c$id]$waiting_for_output_line =
# We don't want the *next* line, that's just
# the echo of this input.
login_sessions[pid, c$id]$output_line + 2;
}
else if ( ++c$hot <= 2 )
hot_login(c, pid, fmt("%s input \"%s\"", id_string(c$id), line), "trb");
}
}
event login_output_line(c: connection, line: string)
{
local pid = get_event_peer()$id;
if ( [pid, c$id] !in login_sessions )
new_login_session(c, pid, 9999);
local s = login_sessions[pid, c$id];
if ( line != "" && ++s$output_line == 1 )
{
if ( byte_len(line) < 40 &&
backdoor_prompts in line && non_backdoor_prompts !in line )
hot_login(c, pid, fmt("%s possible backdoor \"%s\"", id_string(c$id), line), "trb");
}
if ( s$waiting_for_output != "" &&
s$output_line >= s$waiting_for_output_line )
{
if ( output_indicates_input_not_trouble !in line )
hot_login(c, pid,
fmt("%s input \"%s\" yielded output \"%s\"",
id_string(c$id),
s$waiting_for_output,
line),
"trb");
s$waiting_for_output = "";
}
if ( byte_len(line) < 256 &&
(output_trouble in line || line == full_output_trouble) &&
++c$hot <= 2 )
hot_login(c, pid, fmt("%s output \"%s\"", id_string(c$id), line), "trb");
}
event login_confused(c: connection, msg: string, line: string)
{
Hot::check_hot(c, Hot::APPL_ESTABLISHED);
append_addl(c, "<confused>");
event conn_weird_addl(msg, c, line);
set_record_packets(c$id, T);
}
event login_confused_text(c: connection, line: string)
{
local pid = get_event_peer()$id;
if ( c$hot == 0 && edit_and_check_line(c, pid, line, F)$hot )
{
local ignore =
edit_and_check_user(c, pid, line, F, "%sconfused/%s ");
NOTICE([$note=SensitiveLogin, $conn=c,
$user=line,
$msg=fmt("%s %s", id_string(c$id), c$addl)]);
set_record_packets(c$id, T);
}
}
event login_terminal(c: connection, terminal: string)
{
local pid = get_event_peer()$id;
if ( hot_terminal_types in terminal )
hot_login(c, pid,
fmt("%s term %s", id_string(c$id), terminal), "trb");
}
event login_prompt(c: connection, prompt: string)
{
# Could check length >= 6, per Solaris exploit ...
local pid = get_event_peer()$id;
hot_login(c, pid,
fmt("%s $TTYPROMPT %s", id_string(c$id), prompt), "trb");
}
event excessive_line(c: connection)
{
if ( is_login_conn(c) )
{
local pid = get_event_peer()$id;
if ( ! c$hot && c$id$resp_h in non_ASCII_hosts )
{
ext_set_login_state(c$id, pid, LOGIN_STATE_SKIP);
set_record_packets(c$id, F);
}
else if ( ext_get_login_state(c$id, pid) == LOGIN_STATE_AUTHENTICATE )
{
event login_confused(c, "excessive_line", "");
ext_set_login_state(c$id, pid, LOGIN_STATE_CONFUSED);
}
}
}
event inconsistent_option(c: connection)
{
print Weird::weird_file, fmt("%.6f %s inconsistent option", network_time(), id_string(c$id));
}
event bad_option(c: connection)
{
print Weird::weird_file, fmt("%.6f %s bad option", network_time(), id_string(c$id));
}
event bad_option_termination(c: connection)
{
print Weird::weird_file, fmt("%.6f %s bad option termination", network_time(), id_string(c$id));
}
event authentication_accepted(name: string, c: connection)
{
local addl_msg = fmt("auth/%s", name);
append_addl(c, addl_msg);
}
event authentication_rejected(name: string, c: connection)
{
append_addl(c, fmt("auth-failed/%s", name));
}
event authentication_skipped(c: connection)
{
append_addl(c, "(skipped)");
skip_further_processing(c$id);
if ( ! c$hot )
set_record_packets(c$id, F);
}
event connection_established(c: connection)
{
if ( is_login_conn(c) )
{
local pid = get_event_peer()$id;
new_login_session(c, pid, 0);
if ( c$id$resp_h in skip_logins_to )
event authentication_skipped(c);
if ( c$id$resp_p == telnet &&
c$id$orig_p in hot_telnet_orig_ports )
hot_login(c, pid, fmt("%s hot_orig_port", id_string(c$id)), "orig");
}
}
event partial_connection(c: connection)
{
if ( is_login_conn(c) )
{
local pid = get_event_peer()$id;
new_login_session(c, pid, 9999);
ext_set_login_state(c$id, pid, LOGIN_STATE_CONFUSED);
if ( c$id$resp_p == telnet &&
c$id$orig_p in hot_telnet_orig_ports )
hot_login(c, pid, fmt("%s hot_orig_port", id_string(c$id)), "orig");
}
}
event connection_finished(c: connection)
{
local pid = get_event_peer()$id;
remove_login_session(c, pid);
}
event activating_encryption(c: connection)
{
if ( is_login_conn(c) )
append_addl(c, "(encrypted)");
}

View file

@ -1,180 +0,0 @@
# $Id: mime-pop.bro 4758 2007-08-10 06:49:23Z vern $
#
# A stripped-down version of mime.bro adapted to work on POP3 events.
#
# FIXME: What's the best way to merge mime.bro and mime-pop3.bro?
@load pop3
module MIME_POP3;
const mime_log = open_log_file("mime-pop") &redef;
type mime_session_info: record {
id: count;
connection_id: conn_id;
level: count;
data_offset: count;
};
global mime_session_id = 0;
global mime_sessions: table[conn_id] of mime_session_info;
function mime_session_string(session: mime_session_info): string
{
return fmt("#%s %s +%d", prefixed_id(session$id),
id_string(session$connection_id), session$level);
}
function mime_log_warning(what: string)
{
print mime_log, fmt("%.6f warning: %s", network_time(), what);
}
function mime_log_msg(session: mime_session_info, where: string, what: string)
{
print mime_log, fmt("%.6f %s: [%s] %s",
network_time(),
mime_session_string(session),
where,
what);
}
function new_mime_session(c: connection)
{
local id = c$id;
local session_id = ++mime_session_id;
local info: mime_session_info;
info$id = session_id;
info$connection_id = id;
info$level = 0;
info$data_offset = 0;
mime_sessions[id] = info;
mime_log_msg(info, "start", "");
}
function get_mime_session(c: connection, new_session_ok: bool): mime_session_info
{
local id = c$id;
if ( id !in mime_sessions )
{
if ( ! new_session_ok )
mime_log_warning(fmt("begin_entity missing for new MIME session %s", id_string(id)));
new_mime_session(c);
}
return mime_sessions[id];
}
function end_mime_session(session: mime_session_info)
{
mime_log_msg(session, "finish", "");
delete mime_sessions[session$connection_id];
}
event connection_state_remove(c: connection)
{
if ( c$id$resp_p != 110/tcp )
return;
local id = c$id;
if ( id in mime_sessions )
{
mime_log_msg(mime_sessions[id], "state remove", "");
delete mime_sessions[id];
}
}
function do_mime_begin_entity(c: connection)
{
local session = get_mime_session(c, T);
++session$level;
session$data_offset = 0;
mime_log_msg(session, "begin entity", "");
}
event mime_begin_entity(c: connection)
{
if ( c$id$resp_p != 110/tcp )
return;
do_mime_begin_entity(c);
}
function do_mime_end_entity(c: connection)
{
local session = get_mime_session(c, T);
mime_log_msg(session, "end entity", "");
if ( session$level > 0 )
{
--session$level;
if ( session$level == 0 )
end_mime_session(session);
}
else
mime_log_warning(fmt("unmatched end_entity for MIME session %s",
mime_session_string(session)));
}
event mime_end_entity(c: connection)
{
if ( c$id$resp_p != 110/tcp )
return;
do_mime_end_entity(c);
}
event mime_next_entity(c: connection)
{
if ( c$id$resp_p != 110/tcp )
return;
do_mime_end_entity(c);
do_mime_begin_entity(c);
}
event mime_all_headers(c: connection, hlist: mime_header_list)
{
if ( c$id$resp_p != 110/tcp )
return;
local session = get_mime_session(c, T);
local i = 0;
for ( i in hlist )
{
local h = hlist[i];
mime_log_msg(session, "header",
fmt("%s: \"%s\"", h$name, h$value));
}
}
event mime_segment_data(c: connection, length: count, data: string)
{
if ( c$id$resp_p != 110/tcp )
return;
local session = get_mime_session(c, T);
if ( session$data_offset < 256 )
mime_log_msg(session, "data", fmt("%d: %s", length, data));
session$data_offset = session$data_offset + length;
}
event mime_event(c: connection, event_type: string, detail: string)
{
if ( c$id$resp_p != 110/tcp )
return;
local session = get_mime_session(c, T);
mime_log_msg(session, "event", fmt("%s: %s", event_type, detail));
}

View file

@ -1,15 +0,0 @@
# $Id: mt.bro 340 2004-09-09 06:38:27Z vern $
@load dns-lookup
@load hot
@load frag
@load tcp
@load scan
@load weird
@load finger
@load ident
@load ftp
@load login
@load portmapper
@load ntp
@load tftp

View file

@ -1,101 +0,0 @@
# $Id:$
@load conn-id
module NCP;
global ncp_log = open_log_file("ncp") &redef;
redef capture_filters += {["ncp"] = "tcp port 524"};
export {
const ncp_frame_type_name = {
[ 0x1111 ] = "NCP_ALLOC_SLOT",
[ 0x2222 ] = "NCP_REQUEST",
[ 0x3333 ] = "NCP_REPLY",
[ 0x5555 ] = "NCP_DEALLOC_SLOT",
[ 0x7777 ] = "NCP_BURST",
[ 0x9999 ] = "NCP_ACK",
} &default = function(code: count): string
{
return fmt("NCP_UNKNOWN_FRAME_TYPE(%x)", code);
};
const ncp_function_name = {
[ 0x01 ] = "NCP_FILE_SET_LOCK",
[ 0x02 ] = "NCP_FILE_RELEASE_LOCK",
[ 0x03 ] = "NCP_LOG_FILE",
[ 0x04 ] = "NCP_LOCK_FILE_SET",
[ 0x05 ] = "NCP_RELEASE_FILE",
[ 0x06 ] = "NCP_RELEASE_FILE_SET",
[ 0x07 ] = "NCP_CLEAR_FILE",
[ 0x08 ] = "NCP_CLEAR_FILE_SET",
[ 0x09 ] = "NCP_LOG_LOGICAL_RECORD",
[ 0x0a ] = "NCP_LOCK_LOGICAL_RECORD_SET",
[ 0x0b ] = "NCP_CLEAR_LOGICAL_RECORD",
[ 0x0c ] = "NCP_RELEASE_LOGICAL_RECORD",
[ 0x0d ] = "NCP_RELEASE_LOGICAL_RECORD_SET",
[ 0x0e ] = "NCP_CLEAR_LOGICAL_RECORD_SET",
[ 0x0f ] = "NCP_ALLOC_RESOURCE",
[ 0x10 ] = "NCP_DEALLOC_RESOURCE",
[ 0x11 ] = "NCP_PRINT",
[ 0x15 ] = "NCP_MESSAGE",
[ 0x16 ] = "NCP_DIRECTORY",
[ 0x17 ] = "NCP_BINDARY_AND_MISC",
[ 0x18 ] = "NCP_END_OF_JOB",
[ 0x19 ] = "NCP_LOGOUT",
[ 0x1a ] = "NCP_LOG_PHYSICAL_RECORD",
[ 0x1b ] = "NCP_LOCK_PHYSICAL_RECORD_SET",
[ 0x1c ] = "NCP_RELEASE_PHYSICAL_RECORD",
[ 0x1d ] = "NCP_RELEASE_PHYSICAL_RECORD_SET",
[ 0x1e ] = "NCP_CLEAR_PHYSICAL_RECORD",
[ 0x1f ] = "NCP_CLEAR_PHYSICAL_RECORD_SET",
[ 0x20 ] = "NCP_SEMAPHORE",
[ 0x22 ] = "NCP_TRANSACTION_TRACKING",
[ 0x23 ] = "NCP_AFP",
[ 0x42 ] = "NCP_CLOSE_FILE",
[ 0x47 ] = "NCP_GET_FILE_SIZE",
[ 0x48 ] = "NCP_READ_FILE",
[ 0x49 ] = "NCP_WRITE_FILE",
[ 0x56 ] = "NCP_EXT_ATTR",
[ 0x57 ] = "NCP_FILE_DIR",
[ 0x58 ] = "NCP_AUDITING",
[ 0x5a ] = "NCP_MIGRATION",
[ 0x60 ] = "NCP_PNW",
[ 0x61 ] = "NCP_GET_MAX_PACKET_SIZE",
[ 0x68 ] = "NCP_NDS",
[ 0x6f ] = "NCP_SEMAPHORE_NEW",
[ 0x7b ] = "NCP_7B",
[ 0x5701 ] = "NCP_CREATE_FILE_DIR",
[ 0x5702 ] = "NCP_INIT_SEARCH",
[ 0x5703 ] = "NCP_SEARCH_FILE_DIR",
[ 0x5704 ] = "NCP_RENAME_FILE_DIR",
[ 0x5706 ] = "NCP_OBTAIN_FILE_DIR_INFO",
[ 0x5707 ] = "NCP_MODIFY_FILE_DIR_DOS_INFO",
[ 0x5708 ] = "NCP_DELETE_FILE_DIR",
[ 0x5709 ] = "NCP_SET_SHORT_DIR_HANDLE",
[ 0x5714 ] = "NCP_SEARCH_FOR_FILE_DIR_SET",
[ 0x5718 ] = "NCP_GET_NAME_SPACE_LOADED_LIST",
[ 0x5742 ] = "NCP_GET_CURRENT_SIZE_OF_FILE",
} &default = function(code: count): string
{
return fmt("NCP_UNKNOWN_FUNCTION(%x)", code);
};
} # export
event ncp_request(c: connection, frame_type: count, length: count, func: count)
{
print ncp_log, fmt("%.6f %s NCP request type=%s function=%s",
network_time(), id_string(c$id),
ncp_frame_type_name[frame_type],
ncp_function_name[func]);
}
event ncp_reply(c: connection, frame_type: count, length: count,
req_frame: count, req_func: count, completion_code: count)
{
}

View file

@ -1,106 +0,0 @@
# $Id:$
#
# Netflow data-dumper and proof-of-concept flow restitcher.
# Written by Bernhard Ager (2007).
module NetFlow;
export {
# Perform flow restitching?
global netflow_restitch = T &redef;
# How long to wait for additional flow records after a RST or FIN,
# so we can compress multiple RST/FINs for the same flow rather than
# treating them as separate flows. It's not clear what's the best
# setting for this timer, but for now we use something larger
# than the NetFlow inactivity timeout (5 minutes).
global netflow_finished_conn_expire = 310 sec &redef;
}
global netflow_log = open_log_file("netflow") &redef;
# Should be larger than activity timeout. Setting only affects table
# declaration, therefore &redef useless.
const netflow_table_expire = 31 min;
type flow: record {
cnt: count;
pkts: count;
octets: count;
syn: bool;
fin: bool;
first: time;
last: time;
};
function new_flow(r: nf_v5_record): flow
{
return [ $cnt = 1,
$pkts = r$pkts,
$octets = r$octets,
$syn = r$tcpflag_syn,
$fin = r$tcpflag_fin,
$first = r$first,
$last = r$last ];
}
function update_flow(f: flow, r: nf_v5_record)
{
f$pkts += r$pkts;
f$octets += r$octets;
++f$cnt;
f$syn = f$syn || r$tcpflag_syn;
f$fin = f$fin || r$tcpflag_fin;
if ( r$first < f$first )
f$first = r$first;
if ( r$last > f$last )
f$last = r$last;
}
function print_flow(t: table[conn_id] of flow, idx: conn_id): interval
{
print netflow_log, fmt("%.6f flow %s: %s", network_time(), idx, t[idx]);
return -1 sec;
}
event v5flow_finished(t: table[conn_id] of flow, idx: conn_id)
{
if ( idx in t )
{
print_flow(t, idx);
delete t[idx];
}
}
global flows: table[conn_id] of flow &write_expire = netflow_table_expire
&expire_func = print_flow;
event netflow_v5_header(h: nf_v5_header)
{
print netflow_log, fmt("%.6f header %s", network_time(), h);
}
event netflow_v5_record (r: nf_v5_record)
{
if ( netflow_restitch )
{
if ( r$id in flows )
update_flow (flows[r$id], r);
else
flows[r$id] = new_flow (r);
if ( r$tcpflag_fin || r$tcpflag_rst )
schedule netflow_finished_conn_expire {
v5flow_finished (flows, r$id)
};
}
print netflow_log, fmt("%.6f record %s", network_time(), r);
}
event bro_done ()
{
for ( f_id in flows )
print_flow(flows, f_id);
}

View file

@ -1,32 +0,0 @@
# $Id: netstats.bro 564 2004-10-23 02:27:57Z vern $
@load notice
redef enum Notice += {
DroppedPackets, # Bro reported packets dropped by the packet filter
};
const stats_collection_interval = 10secs;
event net_stats_update(last_stat: NetStats)
{
local ns = net_stats();
local new_dropped = ns$pkts_dropped - last_stat$pkts_dropped;
if ( new_dropped > 0 )
{
local new_recvd = ns$pkts_recvd - last_stat$pkts_recvd;
local new_link = ns$pkts_link - last_stat$pkts_link;
NOTICE([$note=DroppedPackets,
$msg=fmt("%d packets dropped after filtering, %d received%s",
new_dropped, new_recvd + new_dropped,
new_link != 0 ?
fmt(", %d on link", new_link) : "")]);
}
schedule stats_collection_interval { net_stats_update(ns) };
}
event bro_init()
{
schedule stats_collection_interval { net_stats_update(net_stats()) };
}

View file

@ -1,408 +0,0 @@
@load udp
module NFS3;
export {
global log_file = open_log_file("nfs") &redef;
global names_log_file = open_log_file("nfs-files") &redef;
global readdir_log = open_log_file("nfs-readdir") &redef;
# We want to estimate how long it takes to lookup a chain of FH (directories)
# until we reach a FH that is used in a read or write operation. Whenever we
# get a new FH, we check how long ago we got the FH's parent. If this is less
# than fh_chain_maxtime, we assume that they belong to a lookup chain and set
# the dt value for the FH accordingly.
global fh_chain_maxtime = 100 msec;
}
redef capture_filters += {
["nfs"] = "port 2049",
# NFS UDP packets are often fragmented.
["nfs-frag"] = "(ip[6:2] & 0x3fff != 0) and udp",
};
global nfs_ports = { 2049/tcp, 2049/udp } &redef;
redef dpd_config += { [ANALYZER_NFS] = [$ports = nfs_ports] };
# Information about a filehandle
type fh_info : record {
id: count; # A unique ID (counter) for more readable representation of the FH
pathname: string &default="@"; # the path leading to this FH
basename: string &default=""; # the name of this FHs file or directory
mimetype: string &default="";
t0: time &default=double_to_time(0); # time when we first saw this FH
dt: interval &default=0 sec; # time it took to get this FH (assuming a chain of
# procedures that ultimately yield the FH for the file
# a client is interested in
chainlen: count &default=0;
attr: fattr_t &optional;
};
# Maps opaque file handles to numbers for easier tracking.
global num_fhs = 0;
global fh_map: table[addr,string] of fh_info;
# Maps connids to number for easier post processing
global num_nfs_conns = 0;
global nfs_conns: table[conn_id] of count;
# Get the FH info. Create a new info if it doesn't exists
function get_fh_info(c: connection, fh: string): fh_info
{
if ( [c$id$resp_h, fh] !in fh_map )
{
# Don't have a mapping for this FH yet. E.g., a root FH
local newfhinfo: fh_info = [ $id=++num_fhs ];
newfhinfo$pathname = fmt("@%d", newfhinfo$id);
newfhinfo$t0 = network_time();
fh_map[c$id$resp_h, fh] = newfhinfo;
}
return fh_map[c$id$resp_h, fh];
}
function log_filename(proc: string, info: fh_info)
{
print names_log_file, fmt("%.6f %s path FH%d %s/%s", network_time(), proc,
info$id, info$pathname, info$basename);
##print fmt("%.6f FH%d <%s> <%s>", network_time(), info$id, info$pathname, info$basename);
}
function fmt_attr(a: fattr_t): string
{
local s = fmt("%s %s %d %d %d %d %d %d %d %d %d %.2f %.2f %.2f",
a$ftype, mode2string(a$mode), a$nlink, a$uid, a$gid, a$size, a$used, a$rdev1, a$rdev2,
a$fsid, a$fileid, a$atime, a$mtime, a$ctime);
return s;
}
function log_attributes(c: connection, proc: string, fh: string, attr: fattr_t)
{
local info = get_fh_info(c,fh);
local did_change = F;
# check whether the attributes have changes
if (info?$attr)
{
# We can't compare records for equality :-(. So we use a hack.
# We add the two instance we want to compare to a set. If there
# are two elements in the set, the records are not equal...
local dummy: set[fattr_t];
add dummy[info$attr];
add dummy[attr];
if (|dummy| > 1)
did_change = T;
}
else
did_change=T;
if (did_change)
{
info$attr = attr;
print names_log_file, fmt("%.6f %s attr FH%d %s", network_time(), proc,
info$id, fmt_attr(attr));
}
}
# Update (or add) a filehandle mapping.
# parentfh ... parent (directory)
# name ....... the name for this FH
# fh ......... the new FH
function add_update_fh(c: connection, proc: string, parentfh: string, name: string, fh: string)
{
local info = get_fh_info(c, fh);
# TODO: we could/should check if we already have a pathname and/or basename
# for this FH and if so whether it matches the parent we just got!
if (name == ".")
return;
info$basename = name;
if (parentfh != "")
{
local parentinfo = get_fh_info(c, parentfh);
info$pathname = cat(parentinfo$pathname, "/", parentinfo$basename);
if ( (network_time() - parentinfo$t0) < fh_chain_maxtime
&& info$dt < 0 sec )
{
# The FH is part of lookup chain and it doesn't yet have a dt value
# TODO: this should probably be moved to get_fh_info(). But then get_fh_info()
# would need information about a FH's parent....
# TODO: We are using network_time(), but we really should use request
# and reply time!!!
info$dt = parentinfo$dt + (network_time() - parentinfo$t0);
info$chainlen = parentinfo$chainlen + 1;
}
}
log_filename(proc, info);
}
function set_fh_mimetype(c: connection, fh: string, proc:string, data: string)
{
local info = get_fh_info(c,fh);
local mimetype = identify_data(data, T);
if (info$mimetype != mimetype)
{
info$mimetype = mimetype;
print names_log_file, fmt("%.6f %s type FH%d %s/%s %s", network_time(), proc,
info$id, info$pathname, info$basename, (mimetype!="") ? mimetype : "X/X");
}
}
# Get the total time of the lookup chain for this FH to the
# current network time. Returns a negative interal if no
# lookup chain was found
function get_fh_chaintime_str(c:connection, fh:string): string
{
local info = get_fh_info(c, fh);
if ((network_time() - info$t0) < fh_chain_maxtime)
return fmt("%d %.6f", info$chainlen, info$dt + (network_time() - info$t0));
else
return fmt("%d %.6f", 0, 0.0);
}
# Get a FH ID
function get_fh_id(c:connection, fh: string): string
{
return cat("FH", get_fh_info(c, fh)$id);
}
# Get the basename for the FH
function get_fh_basename(c:connection, fh: string): string
{
return get_fh_info(c, fh)$basename;
}
# Get the fullname for the FH
function get_fh_fullname(c:connection, fh: string): string
{
local info = get_fh_info(c, fh);
return cat(info$pathname, "/", info$basename);
}
function print_attr(attr: fattr_t): string
{
return fmt("%s", attr);
}
function map_conn(cid: conn_id): count
{
if (cid !in nfs_conns)
nfs_conns[cid] = ++num_nfs_conns;
return nfs_conns[cid];
}
function is_success(info: info_t): bool
{
return (info$rpc_stat == RPC_SUCCESS && info$nfs_stat == NFS3ERR_OK);
}
function is_rpc_success(info: info_t): bool
{
return (info$rpc_stat == RPC_SUCCESS);
}
function nfs_get_log_prefix(c: connection, info: info_t, proc: string): string
{
local nfs_stat_str = (info$rpc_stat == RPC_SUCCESS) ? fmt("%s", info$nfs_stat) : "X";
return fmt("%.06f %.06f %d %.06f %.06f %d %s %s %d %s %s %s",
info$req_start, info$req_dur, info$req_len,
info$rep_start, info$rep_dur, info$rep_len,
id_string(c$id), get_port_transport_proto(c$id$orig_p),
map_conn(c$id),
proc, info$rpc_stat, nfs_stat_str);
}
event nfs_proc_not_implemented(c: connection, info: info_t, proc: proc_t)
{
local prefix = nfs_get_log_prefix(c, info, fmt("%s", proc));
print log_file, fmt("%s Not_implemented", prefix);
}
event nfs_proc_null(c: connection, info: info_t)
{
local prefix = nfs_get_log_prefix(c, info, "null");
print log_file, prefix;
}
event nfs_proc_getattr (c: connection, info: info_t, fh: string, attrs: fattr_t)
{
local prefix = nfs_get_log_prefix(c, info, "getattr");
if (is_success(info))
log_attributes(c, "getattr", fh, attrs);
print log_file, fmt("%s %s", prefix, get_fh_id(c,fh));
}
event nfs_proc_lookup(c: connection, info: info_t, req: diropargs_t, rep: lookup_reply_t)
{
local prefix = nfs_get_log_prefix(c, info, "lookup");
if (! is_success(info) )
{
print log_file, fmt("%s %s + %s", prefix, get_fh_id(c, req$dirfh), req$fname);
# could print dir_attr, if they are set ....
return;
}
if (rep?$dir_attr)
log_attributes(c, "lookup", req$dirfh, rep$dir_attr);
if (is_rpc_success(info) && rep?$obj_attr)
log_attributes(c, "lookup", rep$fh, rep$obj_attr);
add_update_fh(c, "lookup", req$dirfh, req$fname, rep$fh);
print log_file, fmt("%s %s + %s => %s", prefix, get_fh_id(c, req$dirfh), req$fname, get_fh_id(c, rep$fh));
}
event nfs_proc_read(c: connection, info: info_t, req: readargs_t, rep: read_reply_t)
{
local msg = nfs_get_log_prefix(c, info, "read");
msg = fmt("%s %s @%d: %d", msg, get_fh_id(c, req$fh), req$offset, req$size);
if (is_success(info))
{
msg = fmt("%s got %d bytes %s %s", msg, rep$size, (rep$eof) ? "<eof>" : "x",
get_fh_chaintime_str(c, req$fh));
if (rep?$data && req$offset==0 && rep$size>0)
set_fh_mimetype(c, req$fh, "read", rep$data);
if (is_rpc_success(info) && rep?$attr)
log_attributes(c, "read", req$fh, rep$attr);
}
print log_file, msg;
}
event nfs_proc_readlink(c: connection, info: info_t, fh: string, rep: readlink_reply_t)
{
local msg = nfs_get_log_prefix(c, info, "readlink");
msg = fmt("%s %s", msg, get_fh_id(c, fh));
if (is_success(info))
{
msg = fmt("%s : %s", msg, rep$nfspath);
if (rep?$attr)
log_attributes(c, "readlink", fh, rep$attr);
}
print log_file, msg;
}
event nfs_proc_write(c: connection, info: info_t, req: writeargs_t, rep: write_reply_t)
{
local msg = nfs_get_log_prefix(c, info, "write");
msg = fmt("%s %s @%d: %d %s", msg, get_fh_id(c, req$fh), req$offset, req$size, req$stable);
if (is_success(info))
{
msg = fmt("%s wrote %d bytes %s %s", msg, rep$size, rep$commited,
get_fh_chaintime_str(c, req$fh));
if (req?$data && req$offset==0 && rep$size>0)
set_fh_mimetype(c, req$fh, "write", req$data);
if (rep?$postattr)
log_attributes(c, "write", req$fh, rep$postattr);
}
print log_file, msg;
}
function nfs_newobj(c: connection, info: info_t, proc: string, req: diropargs_t, rep: newobj_reply_t)
{
local prefix = nfs_get_log_prefix(c, info, proc);
local newfh_str: string;
if (! is_success(info) )
{
print log_file, fmt("%s %s + %s", prefix, get_fh_id(c, req$dirfh), req$fname);
# could print dir_attr, if they are set ....
return;
}
if (is_rpc_success(info) && rep?$dir_post_attr)
log_attributes(c, proc, req$dirfh, rep$dir_post_attr);
# TODO: could print dir_pre_attr
if (is_rpc_success(info) && rep?$obj_attr)
log_attributes(c, proc, rep$fh, rep$obj_attr);
add_update_fh(c, proc, req$dirfh, req$fname, rep$fh);
newfh_str = (rep?$fh) ? get_fh_id(c, rep$fh) : "FH??";
print log_file, fmt("%s %s + %s => %s", prefix, get_fh_id(c, req$dirfh), req$fname, get_fh_id(c, rep$fh));
}
event nfs_proc_create(c: connection, info: info_t, req: diropargs_t, rep: newobj_reply_t)
{
# TODO: create request attributes not implemented in core
nfs_newobj(c, info, "create", req, rep);
}
event nfs_proc_mkdir(c: connection, info: info_t, req: diropargs_t, rep: newobj_reply_t)
{
# TODO: mkidir request attributes not implemented in core
nfs_newobj(c, info, "mkdir", req, rep);
}
function nfs_delobj(c: connection, info: info_t, proc: string, req: diropargs_t, rep: delobj_reply_t)
{
local prefix = nfs_get_log_prefix(c, info, proc);
print log_file, fmt("%s %s - %s", prefix, get_fh_id(c, req$dirfh), req$fname);
if (is_rpc_success(info) && rep?$dir_post_attr)
log_attributes(c, proc, req$dirfh, rep$dir_post_attr);
# TODO: could print dir_pre_attr
}
event nfs_proc_remove(c: connection, info: info_t, req: diropargs_t, rep: delobj_reply_t)
{
nfs_delobj(c, info, "remove", req, rep);
}
event nfs_proc_rmdir(c: connection, info: info_t, req: diropargs_t, rep: delobj_reply_t)
{
nfs_delobj(c, info, "rmdir", req, rep);
}
function fmt_direntry(c: connection, e: direntry_t): string
{
local rv = "";
rv = fmt("%d %s %d", e$fileid, e$fname, e$cookie);
if (e?$fh)
rv = fmt("%s %s", rv, get_fh_id(c, e$fh));
return rv;
}
event nfs_proc_readdir(c: connection, info: info_t, req: readdirargs_t, rep: readdir_reply_t)
{
local isplus = req$isplus;
local proc = (isplus) ? "readdirplus" : "readdir";
local msg = nfs_get_log_prefix(c, info, proc);
msg = fmt("%s %s @%d (%x)", msg, get_fh_id(c, req$dirfh), req$cookie, req$cookieverf);
if (is_success(info))
{
msg = fmt("%s %d entries %d", msg, |rep$entries|, rep$eof);
print readdir_log, msg;
for (i in rep$entries)
{
local curentry = rep$entries[i];
if (curentry?$attr && curentry?$fh)
log_attributes(c, proc, curentry$fh, curentry$attr);
if (curentry?$fh)
add_update_fh(c, proc, req$dirfh, curentry$fname, curentry$fh);
print readdir_log,fmt(" %s", fmt_direntry(c, curentry));
}
if (rep?$dir_attr)
log_attributes(c, proc, req$dirfh, rep$dir_attr);
}
else if (is_rpc_success(info) && rep?$dir_attr)
{
log_attributes(c, proc, req$dirfh, rep$dir_attr);
}
print log_file, msg;
}
event connection_state_remove(c: connection)
{
if ( c$id !in nfs_conns )
return;
delete nfs_conns[c$id];
}

View file

@ -1,72 +0,0 @@
# $Id: notice-policy.bro 4758 2007-08-10 06:49:23Z vern $
# Examples of using notice_policy and other mechanisms to filter out
# alarms that are not interesting.
# Note: this file is not self-contained, in that it refers to Notice
# names that will only be defined if you've loaded other files (e.g.,
# print-resources for the ResourceSummary notice). The full list of
# policy files it needs is:
#
# blaster.bro
# conn.bro
# http-request.bro
# netstats.bro
# print-resources.bro
# trw.bro
# weird.bro
# Remove these notices from logging since they can be too noisy.
redef notice_action_filters += {
[[Weird::ContentGap, Weird::AckAboveHole]] = ignore_notice,
};
# Send these only to the notice log, not the alarm log.
redef notice_action_filters += {
[[Drop::AddressDropIgnored, DroppedPackets,
ResourceSummary, W32B_SourceRemote,
TRW::TRWScanSummary, Scan::BackscatterSeen,
Weird::WeirdActivity,
Weird::RetransmissionInconsistency]] = file_notice,
};
# Other example use of notice_action_filters:
#
# To just get a summary Notice when Bro is shutdown/checkpointed, use
# tally_notice_type, such as:
#redef notice_action_filters += {
# [[RetransmissionInconsistency, ContentGap, AckAboveHole]] =
# tally_notice_type,
#};
# To get a summary once every hour per originator, use notice_alarm_per_orig,
# such as:
#redef notice_action_filters += {
# [[ BackscatterSeen, RetransmissionInconsistency]] =
# notice_alarm_per_orig,
#};
# Fine-grained filtering of specific alarms.
redef notice_policy += {
# Connections to 2766/tcp ("Solaris listen service") appear
# nearly always actually due to P2P apps.
[$pred(n: notice_info) =
{
return n$note == SensitiveConnection &&
/Solaris listen service/ in n$msg;
},
$result = NOTICE_FILE,
$priority = 1],
# Ignore sensitive URLs that end in .gif, .jpg, .png
[$pred(n: notice_info) =
{
return n$note == HTTP::HTTP_SensitiveURI &&
n$URL == /.*\.(gif|GIF|png|PNG|jpg|JPG)/;
},
$result = NOTICE_FILE,
$priority = 1],
};

View file

@ -1,53 +0,0 @@
# $Id: ntp.bro 4758 2007-08-10 06:49:23Z vern $
@load udp-common
redef capture_filters += { ["ntp"] = "udp port 123" };
module NTP;
export {
const excessive_ntp_request = 48 &redef;
const allow_excessive_ntp_requests: set[addr] &redef;
}
# DPM configuration.
global ntp_ports = { 123/udp } &redef;
redef dpd_config += { [ANALYZER_NTP] = [$ports = ntp_ports] };
const ntp_code: table[count] of string = {
[0] = "unspec",
[1] = "sym_act",
[2] = "sym_psv",
[3] = "client",
[4] = "server",
[5] = "bcast",
[6] = "rsv1",
[7] = "rsv2",
};
event ntp_message(u: connection, msg: ntp_msg, excess: string)
{
local id = u$id;
if ( id !in udp_rep_count && id !in udp_req_count )
{
Hot::check_hot(u, Hot::CONN_ATTEMPTED);
Scan::check_scan(u, F, F);
}
if ( msg$code == 4 )
# "server"
++udp_rep_count[id];
else
# anything else
++udp_req_count[id];
local n_excess = byte_len(excess);
if ( n_excess > excessive_ntp_request &&
id$orig_h !in allow_excessive_ntp_requests )
{
append_addl_marker(u, fmt("%s", n_excess), ",");
++u$hot;
}
}

View file

@ -1,29 +0,0 @@
# $Id: passwords.bro 688 2004-11-02 23:59:55Z vern $
# Generates notices of exposed passwords. Currently just works
# on telnet/rlogin access. Should be extended to do FTP, HTTP, etc.
@load login
redef enum Notice += {
PasswordExposed,
};
# Usernames which we ignore.
global okay_usernames: set[string] &redef;
# Passwords which we ignore.
global okay_passwords = { "", "<none>" } &redef;
event login_success(c:connection, user: string, client_user: string,
password: string, line: string)
{
if ( user in okay_usernames || password in okay_passwords )
return;
NOTICE([$note=PasswordExposed,
$conn=c,
$user=user,
$sub=password,
$msg="login exposed user's password"]);
}

View file

@ -1,84 +0,0 @@
# $Id: peer-status.bro 5954 2008-07-15 00:07:50Z vern $
#
# Emits process status "update" event periodically.
module PeerStatus;
export {
type peer_status: record {
res: bro_resources;
stats: net_stats;
current_time: time;
cpu: double; # average CPU load since last update
default_filter: string; # default capture filter
};
# Event sent periodically.
global update: event(status: peer_status);
# Update interval.
const update_interval = 1 min;
# This keeps track of all (local and remote) updates
# (indexed by peer ID).
global peers: table[peer_id] of peer_status;
}
global start_time = 0;
global cpu_last_proc_time = 0 secs;
global cpu_last_wall_time: time = 0;
global stats: net_stats;
global default_filter : string;
event net_stats_update(t: time, ns: net_stats)
{
stats = ns;
}
event emit_update()
{
# Get CPU load.
local res = resource_usage();
local proc_time = res$user_time + res$system_time;
local wall_time = current_time();
local dproc = proc_time - cpu_last_proc_time;
local dwall = wall_time - cpu_last_wall_time;
local load = dproc / dwall * 100.0;
cpu_last_proc_time = proc_time;
cpu_last_wall_time = wall_time;
local status: peer_status;
status$res = res;
status$stats = stats;
status$current_time = current_time();
status$cpu = load;
status$default_filter = default_filter;
event PeerStatus::update(status);
schedule update_interval { emit_update() };
}
event bro_init()
{
default_filter = build_default_pcap_filter();
local res = resource_usage();
cpu_last_proc_time = res$user_time + res$system_time;
cpu_last_wall_time = current_time();
stats = [$pkts_recvd=0, $pkts_dropped=0, $pkts_link=0];
schedule update_interval { emit_update() };
}
event update(status: peer_status)
{
local peer = get_event_peer();
peers[peer$id] = status;
}
event remote_connection_closed(p: event_peer)
{
if ( p$id in peers )
delete peers[p$id];
}

View file

@ -1,5 +0,0 @@
# $Id: pkt-profile.bro 325 2004-09-03 01:33:15Z vern $
redef pkt_profile_file = open_log_file("pkt-prof");
redef pkt_profile_mode = PKT_PROFILE_MODE_SECS;
redef pkt_profile_freq = 1.0;

View file

@ -1,155 +0,0 @@
# $Id: pop3.bro 4758 2007-08-10 06:49:23Z vern $
#
# Analyzer for Post Office Protocol, version 3.
#
# If you want to decode the mail itself, also load mime-pop.bro.
@load login
module POP3;
export {
# Report if source triggers more ERR messages.
const error_threshold: count = 3 &redef;
# Don't log these commands.
const ignore_commands: set[string] = { "STAT" } &redef;
}
redef capture_filters += { ["pop3"] = "port 110" };
global pop3_ports = { 110/tcp } &redef;
redef dpd_config += { [ANALYZER_POP3] = [$ports = pop3_ports] };
const log_file = open_log_file("pop3") &redef;
type pop3_session_info: record {
id: count; # Unique session ID.
quit_sent: bool; # Client issued a QUIT.
last_command: string; # Last command of client.
};
global pop_log: function(conn: pop3_session_info,
command: string, message: string);
global get_connection: function(id: conn_id): pop3_session_info;
global pop_connections:
table[conn_id] of pop3_session_info &read_expire = 60 mins;
global pop_connection_weirds:
table[addr] of count &default=0 &read_expire = 60 mins;
global pop_session_id = 0;
event pop3_request(c: connection, is_orig: bool, command: string, arg: string)
{
local conn = get_connection(c$id);
pop_log(conn, command, fmt("%.6f #%s > %s %s",
network_time(), prefixed_id(conn$id), command, arg));
conn$last_command = command;
if ( command == "QUIT" )
conn$quit_sent = T;
}
event pop3_reply(c: connection, is_orig: bool, cmd: string, msg: string)
{
local conn = get_connection(c$id);
pop_log(conn, cmd,
fmt("%.6f #%s < %s %s", network_time(), prefixed_id(conn$id), cmd, msg));
if ( cmd == "OK" )
{
if ( conn$quit_sent )
delete pop_connections[c$id];
}
else if ( cmd == "ERR" )
{
++pop_connection_weirds[c$id$orig_h];
if ( pop_connection_weirds[c$id$orig_h] > error_threshold )
print log_file, fmt("%.6f #%s %s/%d > %s/%d WARNING: error count exceeds threshold",
network_time(), prefixed_id(conn$id),
c$id$orig_h, c$id$orig_p,
c$id$resp_h, c$id$resp_p);
}
}
event pop3_login_success(c: connection, is_orig: bool,
user: string, password: string)
{
local conn = get_connection(c$id);
local pw = byte_len(password) != 0 ? password : "<not seen>";
print log_file, fmt("%.6f #%s > login successful: user %s password: %s",
network_time(), prefixed_id(conn$id), user, pw);
event login_success(c, user, "", password, "");
}
event pop3_login_failure(c: connection, is_orig: bool,
user: string, password: string)
{
local conn = get_connection(c$id);
print log_file, fmt("%.6f #%s > login failed: user %s password: %s",
network_time(), prefixed_id(conn$id), user, password);
event login_failure(c, user, "", password, "");
}
# event pop3_data(c: connection, is_orig: bool, data: string)
# {
# # We could instantiate partial connections here if we wished,
# # but at considerable cost in terms of event counts.
# local conn = get_connection(c$id);
# }
event pop3_unexpected(c: connection, is_orig: bool, msg: string, detail: string)
{
local conn = get_connection(c$id);
print log_file, fmt("%.6f #%s unexpected cmd: %s detail: %s",
network_time(), prefixed_id(conn$id),
msg, detail);
}
event pop3_terminate(c: connection, is_orig: bool, msg: string)
{
delete pop_connections[c$id];
}
function pop_log(conn: pop3_session_info, command: string, message: string)
{
if ( command !in ignore_commands )
{
if ( (command == "OK" || command == "ERR") &&
conn$last_command in ignore_commands )
;
else
print log_file, message;
}
}
function get_connection(id: conn_id): pop3_session_info
{
if ( id in pop_connections )
return pop_connections[id];
local conn: pop3_session_info;
conn$id = ++pop_session_id;
conn$quit_sent = F;
conn$last_command = "INIT";
pop_connections[id] = conn;
print log_file, fmt("%.6f #%s %s/%d > %s/%d: new connection",
network_time(), prefixed_id(conn$id),
id$orig_h, id$orig_p, id$resp_h, id$resp_p);
return conn;
}

View file

@ -1,63 +0,0 @@
const port_names: table[port] of string = {
[0/icmp] = "icmp-echo",
[3/icmp] = "icmp-unreach",
[8/icmp] = "icmp-echo",
[7/tcp] = "echo",
[9/tcp] = "discard",
[20/tcp] = "ftp-data",
[21/tcp] = "ftp",
[22/tcp] = "ssh",
[23/tcp] = "telnet",
[25/tcp] = "smtp",
[37/tcp] = "time",
[43/tcp] = "whois",
[53/tcp] = "dns",
[79/tcp] = "finger",
[80/tcp] = "http",
[109/tcp] = "pop-2",
[110/tcp] = "pop-3",
[111/tcp] = "portmap",
[113/tcp] = "ident",
[119/tcp] = "nntp",
[135/tcp] = "epmapper",
[139/tcp] = "netbios-ssn",
[143/tcp] = "imap4",
[179/tcp] = "bgp",
[389/tcp] = "ldap",
[443/tcp] = "https",
[445/tcp] = "smb",
[512/tcp] = "exec",
[513/tcp] = "rlogin",
[514/tcp] = "shell",
[515/tcp] = "printer",
[524/tcp] = "ncp",
[543/tcp] = "klogin",
[544/tcp] = "kshell",
[631/tcp] = "ipp",
[993/tcp] = "simap",
[995/tcp] = "spop",
[1521/tcp] = "oracle-sql",
[2049/tcp] = "nfs",
[6000/tcp] = "X11",
[6001/tcp] = "X11",
[6667/tcp] = "IRC",
[53/udp] = "dns",
[69/udp] = "tftp",
[111/udp] = "portmap",
[123/udp] = "ntp",
[137/udp] = "netbios-ns",
[138/udp] = "netbios-dgm",
[161/udp] = "snmp",
[2049/udp] = "nfs",
} &redef;
function endpoint_id(h: addr, p: port): string
{
if ( p in port_names )
return fmt("%s/%s", h, port_names[p]);
else
return fmt("%s/%d", h, p);
}

View file

@ -1,468 +0,0 @@
# $Id: portmapper.bro 4758 2007-08-10 06:49:23Z vern $
@load notice
@load hot
@load conn
@load weird
@load scan
module Portmapper;
export {
redef enum Notice += {
# Some combination of the service looked up, the host doing the
# request, and the server contacted is considered sensitive.
SensitivePortmapperAccess,
};
# Kudos to Job de Haas for a lot of these entries.
const rpc_programs = {
[200] = "aarp",
[100000] = "portmapper", [100001] = "rstatd",
[100002] = "rusersd", [100003] = "nfs", [100004] = "ypserv",
[100005] = "mountd", [100007] = "ypbind", [100008] = "walld",
[100009] = "yppasswdd", [100010] = "etherstatd",
[100011] = "rquotad", [100012] = "sprayd",
[100013] = "3270_mapper", [100014] = "rje_mapper",
[100015] = "selection_svc", [100016] = "database_svc",
[100017] = "rexd", [100018] = "alis", [100019] = "sched",
[100020] = "llockmgr", [100021] = "nlockmgr",
[100022] = "x25.inr", [100023] = "statmon",
[100024] = "status", [100026] = "bootparam",
[100028] = "ypupdate", [100029] = "keyserv",
[100033] = "sunlink_mapper", [100036] = "pwdauth",
[100037] = "tfsd", [100038] = "nsed",
[100039] = "nsemntd", [100041] = "pnpd",
[100042] = "ipalloc", [100043] = "filehandle",
[100055] = "ioadmd", [100062] = "NETlicense",
[100065] = "sunisamd", [100066] = "debug_svc",
[100068] = "cms", [100069] = "ypxfrd",
[100071] = "bugtraqd", [100078] = "kerbd",
[100083] = "tooltalkdb", [100087] = "admind",
[100099] = "autofsd",
[100101] = "event", [100102] = "logger", [100104] = "sync",
[100105] = "diskinfo", [100106] = "iostat",
[100107] = "hostperf", [100109] = "activity",
[100111] = "lpstat", [100112] = "hostmem",
[100113] = "sample", [100114] = "x25", [100115] = "ping",
[100116] = "rpcnfs", [100117] = "hostif", [100118] = "etherif",
[100119] = "ippath", [100120] = "iproutes",
[100121] = "layers", [100122] = "snmp", [100123] = "traffic",
[100131] = "layers2", [100135] = "etherif2",
[100136] = "hostmem2", [100137] = "iostat2",
[100138] = "snmpv2", [100139] = "sender",
[100221] = "kcms", [100227] = "nfs_acl", [100229] = "metad",
[100230] = "metamhd", [100232] = "sadmind", [100233] = "ufsd",
[100235] = "cachefsd", [100249] = "snmpXdmid",
[100300] = "nisd", [100301] = "nis_cache",
[100302] = "nis_callback", [100303] = "nispasswd",
[120126] = "nf_snmd", [120127] = "nf_snmd",
[150001] = "pcnfsd",
[300004] = "frameuser", [300009] = "stdfm", [300019] = "amd",
[300433] = "bssd", [300434] = "drdd",
[300598] = "dmispd",
[390100] = "prestoctl_svc",
[390600] = "arserverd", [390601] = "ntserverd",
[390604] = "arservtcd",
[391000] = "SGI_snoopd", [391001] = "SGI_toolkitbus",
[391002] = "SGI_fam", [391003] = "SGI_notepad",
[391004] = "SGI_mountd", [391005] = "SGI_smtd",
[391006] = "SGI_pcsd", [391007] = "SGI_nfs",
[391008] = "SGI_rfind", [391009] = "SGI_pod",
[391010] = "SGI_iphone", [391011] = "SGI_videod",
[391012] = "SGI_testcd", [391013] = "SGI_ha_hb",
[391014] = "SGI_ha_nc", [391015] = "SGI_ha_appmon",
[391016] = "SGI_xfsmd", [391017] = "SGI_mediad",
# 391018 - 391063 = "SGI_reserved"
[545580417] = "bwnfsd",
[555555554] = "inetray.start", [555555555] = "inetray",
[555555556] = "inetray", [555555557] = "inetray",
[555555558] = "inetray", [555555559] = "inetray",
[555555560] = "inetray",
[600100069] = "fypxfrd",
[1342177279] = "Solaris/CDE", # = 0x4fffffff
# Some services that choose numbers but start often at these values.
[805306368] = "dmispd",
[824395111] = "cfsd", [1092830567] = "cfsd",
} &redef;
const NFS_services = {
"mountd", "nfs", "pcnfsd", "nlockmgr", "rquotad", "status"
} &redef;
# Indexed by the host providing the service, the host requesting it,
# and the service.
const RPC_okay: set[addr, addr, string] &redef;
const RPC_okay_nets: set[subnet] &redef;
const RPC_okay_services: set[string] &redef;
const NFS_world_servers: set[addr] &redef;
# Indexed by the portmapper request and a boolean that's T if
# the request was answered, F it was attempted but not answered.
# If there's an entry in the set, then the access won't lead to a
# NOTICE (unless the connection is hot for some other reason).
const RPC_do_not_complain: set[string, bool] = {
["pm_null", [T, F]],
} &redef;
# Indexed by the host requesting the dump and the host from which it's
# requesting it.
const RPC_dump_okay: set[addr, addr] &redef;
# Indexed by the host providing the service - any host can request it.
const any_RPC_okay = {
[NFS_world_servers, NFS_services],
[sun-rpc.mcast.net, "ypserv"], # sigh
} &redef;
# Logs all portmapper activity as readable "messages"
# Format: timestamp orig_p resp_h resp_p proto localInit PortmapProcedure success details
const log_file = open_log_file("portmapper") &redef;
# Logs all portmapper mappings that we observe (i.e., getport and
# dump replies. Format:
# timestamp orig_h orig_p resp_h resp_p proto localInit PortmapProcedure RPCprogram version port proto
# the mapping is then: <resp_h> accepts <RPCprogram> with <version>
# calls on <port> <proto>. We learned this mapping via <PortmapProcedure>
const mapping_log_file = open_log_file("portmapper-maps") &redef;
}
redef capture_filters += { ["portmapper"] = "port 111" };
const portmapper_ports = { 111/tcp, 111/udp } &redef;
redef dpd_config += { [ANALYZER_PORTMAPPER] = [$ports = portmapper_ports] };
# Indexed by source and destination addresses, plus the portmapper service.
# If the tuple is in the set, then we already created a NOTICE for it and
# shouldn't do so again.
global did_pm_notice: set[addr, addr, string];
# Indexed by source and portmapper service. If set, we already created
# a notice and shouldn't do so again.
global suppress_pm_notice: set[addr, string];
function RPC_weird_action_filter(c: connection): Weird::WeirdAction
{
if ( c$id$orig_h in RPC_okay_nets )
return Weird::WEIRD_FILE;
else
return Weird::WEIRD_UNSPECIFIED;
}
redef Weird::weird_action_filters += {
[["bad_RPC", "excess_RPC", "multiple_RPCs", "partial_RPC"]] =
RPC_weird_action_filter,
};
function rpc_prog(p: count): string
{
if ( p in rpc_programs )
return rpc_programs[p];
else
return fmt("unknown-%d", p);
}
function pm_get_conn_string(cid: conn_id) : string
{
return fmt("%s %d %s %d %s %s",
cid$orig_h, cid$orig_p,
cid$resp_h, cid$resp_p,
get_port_transport_proto(cid$resp_p),
is_local_addr(cid$orig_h) ? "L" : "X"
);
}
# Log a pm_request or pm_attempt to the log file
function pm_log(r: connection, proc: string, msg: string, success: bool)
{
print log_file, fmt("%f %s %s %s %s", network_time(),
pm_get_conn_string(r$id),
proc, success, msg);
}
# Log portmapper mappings received from a dump procedure
function pm_log_mapping_dump(r: connection, m: pm_mappings)
{
# TODO: sort by program and version
for ( mp in m )
{
local prog = rpc_prog(m[mp]$program);
local ver = m[mp]$version;
local p = m[mp]$p;
print mapping_log_file, fmt("%f %s pm_dump %s %d %d %s", network_time(),
pm_get_conn_string(r$id),
prog, ver, p, get_port_transport_proto(p));
}
}
# Log portmapper mappings received from a getport procedure
# Unfortunately, pm_request_getport doesn't return pm_mapping,
# but returns the parameters separately ....
function pm_log_mapping_getport(r: connection, pr: pm_port_request, p: port)
{
local prog = rpc_prog(pr$program);
local ver = pr$version;
print mapping_log_file, fmt("%f %s pm_getport %s %d %d %s", network_time(),
pm_get_conn_string(r$id),
prog, ver, p, get_port_transport_proto(p));
}
function pm_check_getport(r: connection, prog: string): bool
{
if ( prog in RPC_okay_services ||
[r$id$resp_h, prog] in any_RPC_okay ||
[r$id$resp_h, r$id$orig_h, prog] in RPC_okay )
return F;
if ( r$id$orig_h in RPC_okay_nets )
return F;
return T;
}
function pm_activity(r: connection, do_notice: bool, proc: string)
{
local id = r$id;
if ( do_notice &&
[id$orig_h, id$resp_h, proc] !in did_pm_notice &&
[id$orig_h, proc] !in suppress_pm_notice )
{
NOTICE([$note=SensitivePortmapperAccess, $conn=r,
$msg=fmt("rpc: %s %s: %s",
id_string(r$id), proc, r$addl)]);
add did_pm_notice[id$orig_h, id$resp_h, proc];
}
}
function pm_request(r: connection, proc: string, addl: string, do_notice: bool)
{
if ( [proc, T] in RPC_do_not_complain )
do_notice = F;
if ( ! is_tcp_port(r$id$orig_p) )
{
# It's UDP, so no connection_established event - check for
# scanning, hot access, here, instead.
Scan::check_scan(r, T, F);
Hot::check_hot(r, Hot::CONN_ESTABLISHED);
}
if ( r$addl == "" )
r$addl = addl;
else
{
if ( byte_len(r$addl) > 80 )
{
# r already has a lot of annotation. We can sometimes
# get *zillions* of successive pm_request's with the
# same connection ID, depending on how the RPC client
# behaves. For those, don't add any further, except
# add an ellipses if we don't already have one.
append_addl(r, "...");
}
else
append_addl_marker(r, addl, ", ");
}
add r$service[proc];
Hot::check_hot(r, Hot::CONN_FINISHED);
pm_activity(r, do_notice || r$hot > 0, proc);
pm_log(r, proc, addl, T);
}
event pm_request_null(r: connection)
{
pm_request(r, "pm_null", "", F);
}
event pm_request_set(r: connection, m: pm_mapping, success: bool)
{
pm_request(r, "pm_set", fmt("%s %d (%s)",
rpc_prog(m$program), m$p, success ? "ok" : "failed"), T);
}
event pm_request_unset(r: connection, m: pm_mapping, success: bool)
{
pm_request(r, "pm_unset", fmt("%s %d (%s)",
rpc_prog(m$program), m$p, success ? "ok" : "failed"), T);
}
function update_RPC_server_map(server: addr, p: port, prog: string)
{
if ( [server, p] in RPC_server_map )
{
if ( prog !in RPC_server_map[server, p] )
{
RPC_server_map[server, p] =
fmt("%s/%s", RPC_server_map[server, p], prog);
}
}
else
RPC_server_map[server, p] = prog;
}
event pm_request_getport(r: connection, pr: pm_port_request, p: port)
{
local prog = rpc_prog(pr$program);
local do_notice = pm_check_getport(r, prog);
update_RPC_server_map(r$id$resp_h, p, prog);
pm_request(r, "pm_getport", fmt("%s -> %s", prog, p), do_notice);
pm_log_mapping_getport(r, pr, p);
}
# Note, this function has the side effect of updating the
# RPC_server_map
function pm_mapping_to_text(server: addr, m: pm_mappings): string
{
# Used to suppress multiple entries for multiple versions.
local mapping_seen: set[count, port];
local addls: vector of string;
local num_addls = 0;
for ( mp in m )
{
local prog = m[mp]$program;
local p = m[mp]$p;
if ( [prog, p] !in mapping_seen )
{
add mapping_seen[prog, p];
addls[num_addls] = fmt("%s -> %s", rpc_prog(prog), p);
++num_addls;
update_RPC_server_map(server, p, rpc_prog(prog));
}
}
local addl_str = fmt("%s", sort(addls, strcmp));
# Lop off surrounding []'s for compatibility with previous
# format.
addl_str = sub(addl_str, /^\[/, "");
addl_str = sub(addl_str, /\]$/, "");
return addl_str;
}
event pm_request_dump(r: connection, m: pm_mappings)
{
local do_notice = [r$id$orig_h, r$id$resp_h] !in RPC_dump_okay;
# pm_mapping_to_text has the side-effect of updating RPC_server_map
pm_request(r, "pm_dump",
length(m) == 0 ? "(nil)" : pm_mapping_to_text(r$id$resp_h, m),
do_notice);
pm_log_mapping_dump(r, m);
}
event pm_request_callit(r: connection, call: pm_callit_request, p: port)
{
local orig_h = r$id$orig_h;
local prog = rpc_prog(call$program);
local do_notice = [orig_h, prog] !in suppress_pm_notice;
pm_request(r, "pm_callit", fmt("%s/%d (%d bytes) -> %s",
prog, call$proc, call$arg_size, p), do_notice);
if ( prog == "walld" )
add suppress_pm_notice[orig_h, prog];
}
function pm_attempt(r: connection, proc: string, status: rpc_status,
addl: string, do_notice: bool)
{
if ( [proc, F] in RPC_do_not_complain )
do_notice = F;
if ( ! is_tcp_port(r$id$orig_p) )
{
# It's UDP, so no connection_attempt event - check for
# scanning here, instead.
Scan::check_scan(r, F, F);
Hot::check_hot(r, Hot::CONN_ATTEMPTED);
}
add r$service[proc];
append_addl(r, fmt("(%s)", RPC_status[status]));
# Current policy is ignore any failed attempts.
pm_activity(r, F, proc);
pm_log(r, proc, addl, F);
}
event pm_attempt_null(r: connection, status: rpc_status)
{
pm_attempt(r, "pm_null", status, "", T);
}
event pm_attempt_set(r: connection, status: rpc_status, m: pm_mapping)
{
pm_attempt(r, "pm_set", status, fmt("%s %d", rpc_prog(m$program), m$p), T);
}
event pm_attempt_unset(r: connection, status: rpc_status, m: pm_mapping)
{
pm_attempt(r, "pm_unset", status, fmt("%s %d", rpc_prog(m$program), m$p), T);
}
event pm_attempt_getport(r: connection, status: rpc_status, pr: pm_port_request)
{
local prog = rpc_prog(pr$program);
local do_notice = pm_check_getport(r, prog);
pm_attempt(r, "pm_getport", status, prog, do_notice);
}
event pm_attempt_dump(r: connection, status: rpc_status)
{
local do_notice = [r$id$orig_h, r$id$resp_h] !in RPC_dump_okay;
pm_attempt(r, "pm_dump", status, "", do_notice);
}
event pm_attempt_callit(r: connection, status: rpc_status,
call: pm_callit_request)
{
local orig_h = r$id$orig_h;
local prog = rpc_prog(call$program);
local do_notice = [orig_h, prog] !in suppress_pm_notice;
pm_attempt(r, "pm_callit", status,
fmt("%s/%d (%d bytes)", prog, call$proc, call$arg_size),
do_notice);
if ( prog == "walld" )
add suppress_pm_notice[orig_h, prog];
}
event pm_bad_port(r: connection, bad_p: count)
{
event conn_weird_addl("bad_pm_port", r, fmt("port %d", bad_p));
}

View file

@ -1,26 +0,0 @@
# $Id: print-filter.bro 4506 2007-06-27 14:40:34Z vern $
module PrintFilter;
export {
# If true, terminate Bro after printing the filter.
const terminate_bro = T &redef;
# If true, write to log file instead of stdout.
const to_file = F &redef;
}
event bro_init()
{
if ( to_file )
{
local f = open_log_file("pcap_filter");
print f, build_default_pcap_filter();
close(f);
}
else
print build_default_pcap_filter();
if ( terminate_bro )
exit();
}

View file

@ -1,4 +0,0 @@
event bro_done()
{
print global_sizes();
}

View file

@ -1,21 +0,0 @@
# $Id: print-resources.bro 6703 2009-05-13 22:27:44Z vern $
# Logs Bro resource usage information upon termination.
@load notice
redef enum Notice += {
ResourceSummary, # Notice type for this event
};
event bro_done()
{
local res = resource_usage();
NOTICE([$note=ResourceSummary,
$msg=fmt("elapsed time = %s, total CPU = %s, maximum memory = %d KB, peak connections = %d, peak timers = %d, peak fragments = %d",
res$real_time, res$user_time + res$system_time,
res$mem / 1024,
res$max_TCP_conns + res$max_UDP_conns + res$max_ICMP_conns,
res$max_timers, res$max_fragments)]);
}

View file

@ -1,18 +0,0 @@
# $Id: print-sig-states.bro 491 2004-10-05 05:44:59Z vern $
#
# Simple profiling script for periodicaly dumping out signature-matching
# statistics.
global sig_state_stats_interval = 5 mins;
global sig_state_file = open_log_file("sig-states");
event dump_sig_state_stats()
{
dump_rule_stats(sig_state_file);
schedule sig_state_stats_interval { dump_sig_state_stats() };
}
event bro_init()
{
schedule sig_state_stats_interval { dump_sig_state_stats() };
}

Some files were not shown because too many files have changed in this diff Show more