mirror of
https://github.com/zeek/zeek.git
synced 2025-10-04 15:48:19 +00:00
Merge remote-tracking branch 'origin/master' into topic/seth/file-entropy
# Conflicts: # scripts/test-all-policy.bro # testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log
This commit is contained in:
commit
89b4d79f93
1081 changed files with 38403 additions and 11012 deletions
|
@ -53,7 +53,8 @@ function set_limit(f: fa_file, args: Files::AnalyzerArgs, n: count): bool
|
|||
function on_add(f: fa_file, args: Files::AnalyzerArgs)
|
||||
{
|
||||
if ( ! args?$extract_filename )
|
||||
args$extract_filename = cat("extract-", f$source, "-", f$id);
|
||||
args$extract_filename = cat("extract-", f$last_active, "-", f$source,
|
||||
"-", f$id);
|
||||
|
||||
f$info$extracted = args$extract_filename;
|
||||
args$extract_filename = build_path_compressed(prefix, args$extract_filename);
|
||||
|
|
1
scripts/base/files/pe/README
Normal file
1
scripts/base/files/pe/README
Normal file
|
@ -0,0 +1 @@
|
|||
Support for Portable Executable (PE) file analysis.
|
2
scripts/base/files/pe/__load__.bro
Normal file
2
scripts/base/files/pe/__load__.bro
Normal file
|
@ -0,0 +1,2 @@
|
|||
@load ./consts
|
||||
@load ./main
|
184
scripts/base/files/pe/consts.bro
Normal file
184
scripts/base/files/pe/consts.bro
Normal file
|
@ -0,0 +1,184 @@
|
|||
|
||||
module PE;
|
||||
|
||||
export {
|
||||
const machine_types: table[count] of string = {
|
||||
[0x00] = "UNKNOWN",
|
||||
[0x1d3] = "AM33",
|
||||
[0x8664] = "AMD64",
|
||||
[0x1c0] = "ARM",
|
||||
[0x1c4] = "ARMNT",
|
||||
[0xaa64] = "ARM64",
|
||||
[0xebc] = "EBC",
|
||||
[0x14c] = "I386",
|
||||
[0x200] = "IA64",
|
||||
[0x9041] = "M32R",
|
||||
[0x266] = "MIPS16",
|
||||
[0x366] = "MIPSFPU",
|
||||
[0x466] = "MIPSFPU16",
|
||||
[0x1f0] = "POWERPC",
|
||||
[0x1f1] = "POWERPCFP",
|
||||
[0x166] = "R4000",
|
||||
[0x1a2] = "SH3",
|
||||
[0x1a3] = "SH3DSP",
|
||||
[0x1a6] = "SH4",
|
||||
[0x1a8] = "SH5",
|
||||
[0x1c2] = "THUMB",
|
||||
[0x169] = "WCEMIPSV2"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const file_characteristics: table[count] of string = {
|
||||
[0x1] = "RELOCS_STRIPPED",
|
||||
[0x2] = "EXECUTABLE_IMAGE",
|
||||
[0x4] = "LINE_NUMS_STRIPPED",
|
||||
[0x8] = "LOCAL_SYMS_STRIPPED",
|
||||
[0x10] = "AGGRESSIVE_WS_TRIM",
|
||||
[0x20] = "LARGE_ADDRESS_AWARE",
|
||||
[0x80] = "BYTES_REVERSED_LO",
|
||||
[0x100] = "32BIT_MACHINE",
|
||||
[0x200] = "DEBUG_STRIPPED",
|
||||
[0x400] = "REMOVABLE_RUN_FROM_SWAP",
|
||||
[0x800] = "NET_RUN_FROM_SWAP",
|
||||
[0x1000] = "SYSTEM",
|
||||
[0x2000] = "DLL",
|
||||
[0x4000] = "UP_SYSTEM_ONLY",
|
||||
[0x8000] = "BYTES_REVERSED_HI"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const dll_characteristics: table[count] of string = {
|
||||
[0x40] = "DYNAMIC_BASE",
|
||||
[0x80] = "FORCE_INTEGRITY",
|
||||
[0x100] = "NX_COMPAT",
|
||||
[0x200] = "NO_ISOLATION",
|
||||
[0x400] = "NO_SEH",
|
||||
[0x800] = "NO_BIND",
|
||||
[0x2000] = "WDM_DRIVER",
|
||||
[0x8000] = "TERMINAL_SERVER_AWARE"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const windows_subsystems: table[count] of string = {
|
||||
[0] = "UNKNOWN",
|
||||
[1] = "NATIVE",
|
||||
[2] = "WINDOWS_GUI",
|
||||
[3] = "WINDOWS_CUI",
|
||||
[7] = "POSIX_CUI",
|
||||
[9] = "WINDOWS_CE_GUI",
|
||||
[10] = "EFI_APPLICATION",
|
||||
[11] = "EFI_BOOT_SERVICE_DRIVER",
|
||||
[12] = "EFI_RUNTIME_
DRIVER",
|
||||
[13] = "EFI_ROM",
|
||||
[14] = "XBOX"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const directories: table[count] of string = {
|
||||
[0] = "Export Table",
|
||||
[1] = "Import Table",
|
||||
[2] = "Resource Table",
|
||||
[3] = "Exception Table",
|
||||
[4] = "Certificate Table",
|
||||
[5] = "Base Relocation Table",
|
||||
[6] = "Debug",
|
||||
[7] = "Architecture",
|
||||
[8] = "Global Ptr",
|
||||
[9] = "TLS Table",
|
||||
[10] = "Load Config Table",
|
||||
[11] = "Bound Import",
|
||||
[12] = "IAT",
|
||||
[13] = "Delay Import Descriptor",
|
||||
[14] = "CLR Runtime Header",
|
||||
[15] = "Reserved"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const section_characteristics: table[count] of string = {
|
||||
[0x8] = "TYPE_NO_PAD",
|
||||
[0x20] = "CNT_CODE",
|
||||
[0x40] = "CNT_INITIALIZED_DATA",
|
||||
[0x80] = "CNT_UNINITIALIZED_DATA",
|
||||
[0x100] = "LNK_OTHER",
|
||||
[0x200] = "LNK_INFO",
|
||||
[0x800] = "LNK_REMOVE",
|
||||
[0x1000] = "LNK_COMDAT",
|
||||
[0x8000] = "GPREL",
|
||||
[0x20000] = "MEM_16BIT",
|
||||
[0x40000] = "MEM_LOCKED",
|
||||
[0x80000] = "MEM_PRELOAD",
|
||||
[0x100000] = "ALIGN_1BYTES",
|
||||
[0x200000] = "ALIGN_2BYTES",
|
||||
[0x300000] = "ALIGN_4BYTES",
|
||||
[0x400000] = "ALIGN_8BYTES",
|
||||
[0x500000] = "ALIGN_16BYTES",
|
||||
[0x600000] = "ALIGN_32BYTES",
|
||||
[0x700000] = "ALIGN_64BYTES",
|
||||
[0x800000] = "ALIGN_128BYTES",
|
||||
[0x900000] = "ALIGN_256BYTES",
|
||||
[0xa00000] = "ALIGN_512BYTES",
|
||||
[0xb00000] = "ALIGN_1024BYTES",
|
||||
[0xc00000] = "ALIGN_2048BYTES",
|
||||
[0xd00000] = "ALIGN_4096BYTES",
|
||||
[0xe00000] = "ALIGN_8192BYTES",
|
||||
[0x1000000] = "LNK_NRELOC_OVFL",
|
||||
[0x2000000] = "MEM_DISCARDABLE",
|
||||
[0x4000000] = "MEM_NOT_CACHED",
|
||||
[0x8000000] = "MEM_NOT_PAGED",
|
||||
[0x10000000] = "MEM_SHARED",
|
||||
[0x20000000] = "MEM_EXECUTE",
|
||||
[0x40000000] = "MEM_READ",
|
||||
[0x80000000] = "MEM_WRITE"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const os_versions: table[count, count] of string = {
|
||||
[10,0] = "Windows 10",
|
||||
[6,4] = "Windows 10 Technical Preview",
|
||||
[6,3] = "Windows 8.1 or Server 2012 R2",
|
||||
[6,2] = "Windows 8 or Server 2012",
|
||||
[6,1] = "Windows 7 or Server 2008 R2",
|
||||
[6,0] = "Windows Vista or Server 2008",
|
||||
[5,2] = "Windows XP x64 or Server 2003",
|
||||
[5,1] = "Windows XP",
|
||||
[5,0] = "Windows 2000",
|
||||
[4,90] = "Windows Me",
|
||||
[4,10] = "Windows 98",
|
||||
[4,0] = "Windows 95 or NT 4.0",
|
||||
[3,51] = "Windows NT 3.51",
|
||||
[3,50] = "Windows NT 3.5",
|
||||
[3,2] = "Windows 3.2",
|
||||
[3,11] = "Windows for Workgroups 3.11",
|
||||
[3,10] = "Windows 3.1 or NT 3.1",
|
||||
[3,0] = "Windows 3.0",
|
||||
[2,11] = "Windows 2.11",
|
||||
[2,10] = "Windows 2.10",
|
||||
[2,0] = "Windows 2.0",
|
||||
[1,4] = "Windows 1.04",
|
||||
[1,3] = "Windows 1.03",
|
||||
[1,1] = "Windows 1.01",
|
||||
[1,0] = "Windows 1.0",
|
||||
} &default=function(i: count, j: count):string { return fmt("unknown-%d.%d", i, j); };
|
||||
|
||||
const section_descs: table[string] of string = {
|
||||
[".bss"] = "Uninitialized data",
|
||||
[".cormeta"] = "CLR metadata that indicates that the object file contains managed code",
|
||||
[".data"] = "Initialized data",
|
||||
[".debug$F"] = "Generated FPO debug information",
|
||||
[".debug$P"] = "Precompiled debug types",
|
||||
[".debug$S"] = "Debug symbols",
|
||||
[".debug$T"] = "Debug types",
|
||||
[".drective"] = "Linker options",
|
||||
[".edata"] = "Export tables",
|
||||
[".idata"] = "Import tables",
|
||||
[".idlsym"] = "Includes registered SEH to support IDL attributes",
|
||||
[".pdata"] = "Exception information",
|
||||
[".rdata"] = "Read-only initialized data",
|
||||
[".reloc"] = "Image relocations",
|
||||
[".rsrc"] = "Resource directory",
|
||||
[".sbss"] = "GP-relative uninitialized data",
|
||||
[".sdata"] = "GP-relative initialized data",
|
||||
[".srdata"] = "GP-relative read-only data",
|
||||
[".sxdata"] = "Registered exception handler data",
|
||||
[".text"] = "Executable code",
|
||||
[".tls"] = "Thread-local storage",
|
||||
[".tls$"] = "Thread-local storage",
|
||||
[".vsdata"] = "GP-relative initialized data",
|
||||
[".xdata"] = "Exception information",
|
||||
} &default=function(i: string):string { return fmt("unknown-%s", i); };
|
||||
|
||||
}
|
137
scripts/base/files/pe/main.bro
Normal file
137
scripts/base/files/pe/main.bro
Normal file
|
@ -0,0 +1,137 @@
|
|||
module PE;
|
||||
|
||||
@load ./consts.bro
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Current timestamp.
|
||||
ts: time &log;
|
||||
## File id of this portable executable file.
|
||||
id: string &log;
|
||||
## The target machine that the file was compiled for.
|
||||
machine: string &log &optional;
|
||||
## The time that the file was created at.
|
||||
compile_ts: time &log &optional;
|
||||
## The required operating system.
|
||||
os: string &log &optional;
|
||||
## The subsystem that is required to run this file.
|
||||
subsystem: string &log &optional;
|
||||
## Is the file an executable, or just an object file?
|
||||
is_exe: bool &log &default=T;
|
||||
## Is the file a 64-bit executable?
|
||||
is_64bit: bool &log &default=T;
|
||||
## Does the file support Address Space Layout Randomization?
|
||||
uses_aslr: bool &log &default=F;
|
||||
## Does the file support Data Execution Prevention?
|
||||
uses_dep: bool &log &default=F;
|
||||
## Does the file enforce code integrity checks?
|
||||
uses_code_integrity: bool &log &default=F;
|
||||
## Does the file use structured exception handing?
|
||||
uses_seh: bool &log &default=T;
|
||||
## Does the file have an import table?
|
||||
has_import_table: bool &log &optional;
|
||||
## Does the file have an export table?
|
||||
has_export_table: bool &log &optional;
|
||||
## Does the file have an attribute certificate table?
|
||||
has_cert_table: bool &log &optional;
|
||||
## Does the file have a debug table?
|
||||
has_debug_data: bool &log &optional;
|
||||
## The names of the sections, in order.
|
||||
section_names: vector of string &log &optional;
|
||||
};
|
||||
|
||||
## Event for accessing logged records.
|
||||
global log_pe: event(rec: Info);
|
||||
|
||||
## A hook that gets called when we first see a PE file.
|
||||
global set_file: hook(f: fa_file);
|
||||
}
|
||||
|
||||
redef record fa_file += {
|
||||
pe: Info &optional;
|
||||
};
|
||||
|
||||
const pe_mime_types = { "application/x-dosexec" };
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Files::register_for_mime_types(Files::ANALYZER_PE, pe_mime_types);
|
||||
Log::create_stream(LOG, [$columns=Info, $ev=log_pe, $path="pe"]);
|
||||
}
|
||||
|
||||
hook set_file(f: fa_file) &priority=5
|
||||
{
|
||||
if ( ! f?$pe )
|
||||
f$pe = [$ts=network_time(), $id=f$id];
|
||||
}
|
||||
|
||||
event pe_dos_header(f: fa_file, h: PE::DOSHeader) &priority=5
|
||||
{
|
||||
hook set_file(f);
|
||||
}
|
||||
|
||||
event pe_file_header(f: fa_file, h: PE::FileHeader) &priority=5
|
||||
{
|
||||
hook set_file(f);
|
||||
|
||||
f$pe$machine = machine_types[h$machine];
|
||||
f$pe$compile_ts = h$ts;
|
||||
f$pe$is_exe = ( h$optional_header_size > 0 );
|
||||
|
||||
for ( c in h$characteristics )
|
||||
{
|
||||
if ( file_characteristics[c] == "32BIT_MACHINE" )
|
||||
f$pe$is_64bit = F;
|
||||
}
|
||||
}
|
||||
|
||||
event pe_optional_header(f: fa_file, h: PE::OptionalHeader) &priority=5
|
||||
{
|
||||
hook set_file(f);
|
||||
|
||||
# Only EXEs have optional headers
|
||||
if ( ! f$pe$is_exe )
|
||||
return;
|
||||
|
||||
f$pe$os = os_versions[h$os_version_major, h$os_version_minor];
|
||||
f$pe$subsystem = windows_subsystems[h$subsystem];
|
||||
|
||||
for ( c in h$dll_characteristics )
|
||||
{
|
||||
if ( dll_characteristics[c] == "DYNAMIC_BASE" )
|
||||
f$pe$uses_aslr = T;
|
||||
if ( dll_characteristics[c] == "FORCE_INTEGRITY" )
|
||||
f$pe$uses_code_integrity = T;
|
||||
if ( dll_characteristics[c] == "NX_COMPAT" )
|
||||
f$pe$uses_dep = T;
|
||||
if ( dll_characteristics[c] == "NO_SEH" )
|
||||
f$pe$uses_seh = F;
|
||||
}
|
||||
|
||||
f$pe$has_export_table = (|h$table_sizes| > 0 && h$table_sizes[0] > 0);
|
||||
f$pe$has_import_table = (|h$table_sizes| > 1 && h$table_sizes[1] > 0);
|
||||
f$pe$has_cert_table = (|h$table_sizes| > 4 && h$table_sizes[4] > 0);
|
||||
f$pe$has_debug_data = (|h$table_sizes| > 6 && h$table_sizes[6] > 0);
|
||||
}
|
||||
|
||||
event pe_section_header(f: fa_file, h: PE::SectionHeader) &priority=5
|
||||
{
|
||||
hook set_file(f);
|
||||
|
||||
# Only EXEs have section headers
|
||||
if ( ! f$pe$is_exe )
|
||||
return;
|
||||
|
||||
if ( ! f$pe?$section_names )
|
||||
f$pe$section_names = vector();
|
||||
f$pe$section_names[|f$pe$section_names|] = h$name;
|
||||
}
|
||||
|
||||
event file_state_remove(f: fa_file) &priority=-5
|
||||
{
|
||||
if ( f?$pe && f$pe?$machine )
|
||||
Log::write(LOG, f$pe);
|
||||
}
|
||||
|
|
@ -195,7 +195,7 @@ event Input::end_of_data(name: string, source: string)
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Unified2::LOG, [$columns=Info, $ev=log_unified2]);
|
||||
Log::create_stream(Unified2::LOG, [$columns=Info, $ev=log_unified2, $path="unified2"]);
|
||||
|
||||
if ( sid_msg == "" )
|
||||
{
|
||||
|
|
|
@ -36,7 +36,7 @@ export {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509]);
|
||||
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509, $path="x509"]);
|
||||
}
|
||||
|
||||
redef record Files::Info += {
|
||||
|
@ -47,6 +47,9 @@ redef record Files::Info += {
|
|||
|
||||
event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=5
|
||||
{
|
||||
if ( ! f$info?$mime_type )
|
||||
f$info$mime_type = "application/pkix-cert";
|
||||
|
||||
f$info$x509 = [$ts=f$info$ts, $id=f$id, $certificate=cert, $handle=cert_ref];
|
||||
}
|
||||
|
||||
|
|
2
scripts/base/frameworks/broker/README
Normal file
2
scripts/base/frameworks/broker/README
Normal file
|
@ -0,0 +1,2 @@
|
|||
The Broker communication framework facilitates connecting to remote Bro
|
||||
instances to share state and transfer events.
|
1
scripts/base/frameworks/broker/__load__.bro
Normal file
1
scripts/base/frameworks/broker/__load__.bro
Normal file
|
@ -0,0 +1 @@
|
|||
@load ./main
|
103
scripts/base/frameworks/broker/main.bro
Normal file
103
scripts/base/frameworks/broker/main.bro
Normal file
|
@ -0,0 +1,103 @@
|
|||
##! Various data structure definitions for use with Bro's communication system.
|
||||
|
||||
module BrokerComm;
|
||||
|
||||
export {
|
||||
|
||||
## A name used to identify this endpoint to peers.
|
||||
## .. bro:see:: BrokerComm::connect BrokerComm::listen
|
||||
const endpoint_name = "" &redef;
|
||||
|
||||
## Change communication behavior.
|
||||
type EndpointFlags: record {
|
||||
## Whether to restrict message topics that can be published to peers.
|
||||
auto_publish: bool &default = T;
|
||||
## Whether to restrict what message topics or data store identifiers
|
||||
## the local endpoint advertises to peers (e.g. subscribing to
|
||||
## events or making a master data store available).
|
||||
auto_advertise: bool &default = T;
|
||||
};
|
||||
|
||||
## Fine-grained tuning of communication behavior for a particular message.
|
||||
type SendFlags: record {
|
||||
## Send the message to the local endpoint.
|
||||
self: bool &default = F;
|
||||
## Send the message to peer endpoints that advertise interest in
|
||||
## the topic associated with the message.
|
||||
peers: bool &default = T;
|
||||
## Send the message to peer endpoints even if they don't advertise
|
||||
## interest in the topic associated with the message.
|
||||
unsolicited: bool &default = F;
|
||||
};
|
||||
|
||||
## Opaque communication data.
|
||||
type Data: record {
|
||||
d: opaque of BrokerComm::Data &optional;
|
||||
};
|
||||
|
||||
## Opaque communication data.
|
||||
type DataVector: vector of BrokerComm::Data;
|
||||
|
||||
## Opaque event communication data.
|
||||
type EventArgs: record {
|
||||
## The name of the event. Not set if invalid event or arguments.
|
||||
name: string &optional;
|
||||
## The arguments to the event.
|
||||
args: DataVector;
|
||||
};
|
||||
|
||||
## Opaque communication data used as a convenient way to wrap key-value
|
||||
## pairs that comprise table entries.
|
||||
type TableItem : record {
|
||||
key: BrokerComm::Data;
|
||||
val: BrokerComm::Data;
|
||||
};
|
||||
}
|
||||
|
||||
module BrokerStore;
|
||||
|
||||
export {
|
||||
|
||||
## Whether a data store query could be completed or not.
|
||||
type QueryStatus: enum {
|
||||
SUCCESS,
|
||||
FAILURE,
|
||||
};
|
||||
|
||||
## An expiry time for a key-value pair inserted in to a data store.
|
||||
type ExpiryTime: record {
|
||||
## Absolute point in time at which to expire the entry.
|
||||
absolute: time &optional;
|
||||
## A point in time relative to the last modification time at which
|
||||
## to expire the entry. New modifications will delay the expiration.
|
||||
since_last_modification: interval &optional;
|
||||
};
|
||||
|
||||
## The result of a data store query.
|
||||
type QueryResult: record {
|
||||
## Whether the query completed or not.
|
||||
status: BrokerStore::QueryStatus;
|
||||
## The result of the query. Certain queries may use a particular
|
||||
## data type (e.g. querying store size always returns a count, but
|
||||
## a lookup may return various data types).
|
||||
result: BrokerComm::Data;
|
||||
};
|
||||
|
||||
## Options to tune the SQLite storage backend.
|
||||
type SQLiteOptions: record {
|
||||
## File system path of the database.
|
||||
path: string &default = "store.sqlite";
|
||||
};
|
||||
|
||||
## Options to tune the RocksDB storage backend.
|
||||
type RocksDBOptions: record {
|
||||
## File system path of the database.
|
||||
path: string &default = "store.rocksdb";
|
||||
};
|
||||
|
||||
## Options to tune the particular storage backends.
|
||||
type BackendOptions: record {
|
||||
sqlite: SQLiteOptions &default = SQLiteOptions();
|
||||
rocksdb: RocksDBOptions &default = RocksDBOptions();
|
||||
};
|
||||
}
|
|
@ -43,35 +43,35 @@ export {
|
|||
## software.
|
||||
TIME_MACHINE,
|
||||
};
|
||||
|
||||
|
||||
## Events raised by a manager and handled by the workers.
|
||||
const manager2worker_events = /Drop::.*/ &redef;
|
||||
|
||||
|
||||
## Events raised by a manager and handled by proxies.
|
||||
const manager2proxy_events = /EMPTY/ &redef;
|
||||
|
||||
|
||||
## Events raised by proxies and handled by a manager.
|
||||
const proxy2manager_events = /EMPTY/ &redef;
|
||||
|
||||
|
||||
## Events raised by proxies and handled by workers.
|
||||
const proxy2worker_events = /EMPTY/ &redef;
|
||||
|
||||
|
||||
## Events raised by workers and handled by a manager.
|
||||
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
|
||||
|
||||
|
||||
## Events raised by workers and handled by proxies.
|
||||
const worker2proxy_events = /EMPTY/ &redef;
|
||||
|
||||
|
||||
## Events raised by TimeMachine instances and handled by a manager.
|
||||
const tm2manager_events = /EMPTY/ &redef;
|
||||
|
||||
|
||||
## Events raised by TimeMachine instances and handled by workers.
|
||||
const tm2worker_events = /EMPTY/ &redef;
|
||||
|
||||
## Events sent by the control host (i.e. BroControl) when dynamically
|
||||
|
||||
## Events sent by the control host (i.e. BroControl) when dynamically
|
||||
## connecting to a running instance to update settings or request data.
|
||||
const control_events = Control::controller_events &redef;
|
||||
|
||||
|
||||
## Record type to indicate a node in a cluster.
|
||||
type Node: record {
|
||||
## Identifies the type of cluster node in this node's configuration.
|
||||
|
@ -96,13 +96,13 @@ export {
|
|||
## Name of a time machine node with which this node connects.
|
||||
time_machine: string &optional;
|
||||
};
|
||||
|
||||
|
||||
## This function can be called at any time to determine if the cluster
|
||||
## framework is being enabled for this run.
|
||||
##
|
||||
## Returns: True if :bro:id:`Cluster::node` has been set.
|
||||
global is_enabled: function(): bool;
|
||||
|
||||
|
||||
## This function can be called at any time to determine what type of
|
||||
## cluster node the current Bro instance is going to be acting as.
|
||||
## If :bro:id:`Cluster::is_enabled` returns false, then
|
||||
|
@ -110,22 +110,25 @@ export {
|
|||
##
|
||||
## Returns: The :bro:type:`Cluster::NodeType` the calling node acts as.
|
||||
global local_node_type: function(): NodeType;
|
||||
|
||||
|
||||
## This gives the value for the number of workers currently connected to,
|
||||
## and it's maintained internally by the cluster framework. It's
|
||||
## primarily intended for use by managers to find out how many workers
|
||||
## and it's maintained internally by the cluster framework. It's
|
||||
## primarily intended for use by managers to find out how many workers
|
||||
## should be responding to requests.
|
||||
global worker_count: count = 0;
|
||||
|
||||
|
||||
## The cluster layout definition. This should be placed into a filter
|
||||
## named cluster-layout.bro somewhere in the BROPATH. It will be
|
||||
## named cluster-layout.bro somewhere in the BROPATH. It will be
|
||||
## automatically loaded if the CLUSTER_NODE environment variable is set.
|
||||
## Note that BroControl handles all of this automatically.
|
||||
const nodes: table[string] of Node = {} &redef;
|
||||
|
||||
|
||||
## This is usually supplied on the command line for each instance
|
||||
## of the cluster that is started up.
|
||||
const node = getenv("CLUSTER_NODE") &redef;
|
||||
|
||||
## Interval for retrying failed connections between cluster nodes.
|
||||
const retry_interval = 1min &redef;
|
||||
}
|
||||
|
||||
function is_enabled(): bool
|
||||
|
@ -158,6 +161,6 @@ event bro_init() &priority=5
|
|||
Reporter::error(fmt("'%s' is not a valid node in the Cluster::nodes configuration", node));
|
||||
terminate();
|
||||
}
|
||||
|
||||
Log::create_stream(Cluster::LOG, [$columns=Info]);
|
||||
|
||||
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]);
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ module Cluster;
|
|||
event bro_init() &priority=9
|
||||
{
|
||||
local me = nodes[node];
|
||||
|
||||
|
||||
for ( i in Cluster::nodes )
|
||||
{
|
||||
local n = nodes[i];
|
||||
|
@ -22,35 +22,35 @@ event bro_init() &priority=9
|
|||
Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id,
|
||||
$connect=F, $class="control",
|
||||
$events=control_events];
|
||||
|
||||
|
||||
if ( me$node_type == MANAGER )
|
||||
{
|
||||
if ( n$node_type == WORKER && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=worker2manager_events, $request_logs=T];
|
||||
|
||||
|
||||
if ( n$node_type == PROXY && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=proxy2manager_events, $request_logs=T];
|
||||
|
||||
|
||||
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1min,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$events=tm2manager_events];
|
||||
}
|
||||
|
||||
|
||||
else if ( me$node_type == PROXY )
|
||||
{
|
||||
if ( n$node_type == WORKER && n$proxy == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
|
||||
$sync=T, $auth=T, $events=worker2proxy_events];
|
||||
|
||||
# accepts connections from the previous one.
|
||||
|
||||
# accepts connections from the previous one.
|
||||
# (This is not ideal for setups with many proxies)
|
||||
# FIXME: Once we're using multiple proxies, we should also figure out some $class scheme ...
|
||||
if ( n$node_type == PROXY )
|
||||
|
@ -58,49 +58,49 @@ event bro_init() &priority=9
|
|||
if ( n?$proxy )
|
||||
Communication::nodes[i]
|
||||
= [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $auth=F, $sync=T, $retry=1mins];
|
||||
$connect=T, $auth=F, $sync=T, $retry=retry_interval];
|
||||
else if ( me?$proxy && me$proxy == i )
|
||||
Communication::nodes[me$proxy]
|
||||
= [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
|
||||
$connect=F, $auth=T, $sync=T];
|
||||
}
|
||||
|
||||
|
||||
# Finally the manager, to send it status updates.
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1mins,
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node,
|
||||
$events=manager2proxy_events];
|
||||
}
|
||||
else if ( me$node_type == WORKER )
|
||||
{
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1mins,
|
||||
$class=node,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$class=node,
|
||||
$events=manager2worker_events];
|
||||
|
||||
|
||||
if ( n$node_type == PROXY && me$proxy == i )
|
||||
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
|
||||
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1mins,
|
||||
$sync=T, $class=node,
|
||||
$connect=T, $retry=retry_interval,
|
||||
$sync=T, $class=node,
|
||||
$events=proxy2worker_events];
|
||||
|
||||
if ( n$node_type == TIME_MACHINE &&
|
||||
|
||||
if ( n$node_type == TIME_MACHINE &&
|
||||
me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T,
|
||||
$retry=1min,
|
||||
$connect=T,
|
||||
$retry=retry_interval,
|
||||
$events=tm2worker_events];
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ const src_names = {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Communication::LOG, [$columns=Info]);
|
||||
Log::create_stream(Communication::LOG, [$columns=Info, $path="communication"]);
|
||||
}
|
||||
|
||||
function do_script_log_common(level: count, src: count, msg: string)
|
||||
|
|
|
@ -38,7 +38,7 @@ redef record connection += {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(DPD::LOG, [$columns=Info]);
|
||||
Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd"]);
|
||||
}
|
||||
|
||||
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=10
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
@load-sigs ./archive
|
||||
@load-sigs ./audio
|
||||
@load-sigs ./font
|
||||
@load-sigs ./general
|
||||
@load-sigs ./image
|
||||
@load-sigs ./msoffice
|
||||
@load-sigs ./libmagic
|
||||
@load-sigs ./video
|
||||
|
||||
@load-sigs ./libmagic
|
176
scripts/base/frameworks/files/magic/archive.sig
Normal file
176
scripts/base/frameworks/files/magic/archive.sig
Normal file
|
@ -0,0 +1,176 @@
|
|||
|
||||
signature file-tar {
|
||||
file-magic /^[[:print:]\x00]{100}([[:digit:]\x20]{7}\x00){3}([[:digit:]\x20]{11}\x00){2}([[:digit:]\x00\x20]{7}[\x20\x00])[0-7\x00]/
|
||||
file-mime "application/x-tar", 100
|
||||
}
|
||||
|
||||
# This is low priority so that files using zip as a
|
||||
# container will be identified correctly.
|
||||
signature file-zip {
|
||||
file-mime "application/zip", 10
|
||||
file-magic /^PK\x03\x04.{2}/
|
||||
}
|
||||
|
||||
# Multivolume Zip archive
|
||||
signature file-multi-zip {
|
||||
file-mime "application/zip", 10
|
||||
file-magic /^PK\x07\x08PK\x03\x04/
|
||||
}
|
||||
|
||||
# RAR
|
||||
signature file-rar {
|
||||
file-mime "application/x-rar", 70
|
||||
file-magic /^Rar!/
|
||||
}
|
||||
|
||||
# GZIP
|
||||
signature file-gzip {
|
||||
file-mime "application/x-gzip", 100
|
||||
file-magic /\x1f\x8b/
|
||||
}
|
||||
|
||||
# Microsoft Cabinet
|
||||
signature file-ms-cab {
|
||||
file-mime "application/vnd.ms-cab-compressed", 110
|
||||
file-magic /^MSCF\x00\x00\x00\x00/
|
||||
}
|
||||
|
||||
# Mac OS X DMG files
|
||||
signature file-dmg {
|
||||
file-magic /^(\x78\x01\x73\x0D\x62\x62\x60|\x78\xDA\x63\x60\x18\x05|\x78\x01\x63\x60\x18\x05|\x78\xDA\x73\x0D|\x78[\x01\xDA]\xED[\xD0-\xD9])/
|
||||
file-mime "application/x-dmg", 100
|
||||
}
|
||||
|
||||
# XAR (eXtensible ARchive) format.
|
||||
# Mac OS X uses this for the .pkg format.
|
||||
signature file-xar {
|
||||
file-magic /^xar\!/
|
||||
file-mime "application/x-xar", 100
|
||||
}
|
||||
|
||||
# RPM
|
||||
signature file-magic-auto352 {
|
||||
file-mime "application/x-rpm", 70
|
||||
file-magic /^(drpm|\xed\xab\xee\xdb)/
|
||||
}
|
||||
|
||||
# StuffIt
|
||||
signature file-stuffit {
|
||||
file-mime "application/x-stuffit", 70
|
||||
file-magic /^(SIT\x21|StuffIt)/
|
||||
}
|
||||
|
||||
# Archived data
|
||||
signature file-x-archive {
|
||||
file-mime "application/x-archive", 70
|
||||
file-magic /^!?<ar(ch)?>/
|
||||
}
|
||||
|
||||
# ARC archive data
|
||||
signature file-arc {
|
||||
file-mime "application/x-arc", 70
|
||||
file-magic /^[\x00-\x7f]{2}[\x02-\x0a\x14\x48]\x1a/
|
||||
}
|
||||
|
||||
# EET archive
|
||||
signature file-eet {
|
||||
file-mime "application/x-eet", 70
|
||||
file-magic /^\x1e\xe7\xff\x00/
|
||||
}
|
||||
|
||||
# Zoo archive
|
||||
signature file-zoo {
|
||||
file-mime "application/x-zoo", 70
|
||||
file-magic /^.{20}\xdc\xa7\xc4\xfd/
|
||||
}
|
||||
|
||||
# LZ4 compressed data (legacy format)
|
||||
signature file-lz4-legacy {
|
||||
file-mime "application/x-lz4", 70
|
||||
file-magic /(\x02\x21\x4c\x18)/
|
||||
}
|
||||
|
||||
# LZ4 compressed data
|
||||
signature file-lz4 {
|
||||
file-mime "application/x-lz4", 70
|
||||
file-magic /^\x04\x22\x4d\x18/
|
||||
}
|
||||
|
||||
# LRZIP compressed data
|
||||
signature file-lrzip {
|
||||
file-mime "application/x-lrzip", 1
|
||||
file-magic /^LRZI/
|
||||
}
|
||||
|
||||
# LZIP compressed data
|
||||
signature file-lzip {
|
||||
file-mime "application/x-lzip", 70
|
||||
file-magic /^LZIP/
|
||||
}
|
||||
|
||||
# Self-extracting PKZIP archive
|
||||
signature file-magic-auto434 {
|
||||
file-mime "application/zip", 340
|
||||
file-magic /^MZ.{28}(Copyright 1989\x2d1990 PKWARE Inc|PKLITE Copr)\x2e/
|
||||
}
|
||||
|
||||
# LHA archive (LZH)
|
||||
signature file-lzh {
|
||||
file-mime "application/x-lzh", 80
|
||||
file-magic /^.{2}-(lh[ abcdex0-9]|lz[s2-8]|lz[s2-8]|pm[s012]|pc1)-/
|
||||
}
|
||||
|
||||
# WARC Archive
|
||||
signature file-warc {
|
||||
file-mime "application/warc", 50
|
||||
file-magic /^WARC\x2f/
|
||||
}
|
||||
|
||||
# 7-zip archive data
|
||||
signature file-7zip {
|
||||
file-mime "application/x-7z-compressed", 50
|
||||
file-magic /^7z\xbc\xaf\x27\x1c/
|
||||
}
|
||||
|
||||
# XZ compressed data
|
||||
signature file-xz {
|
||||
file-mime "application/x-xz", 90
|
||||
file-magic /^\xfd7zXZ\x00/
|
||||
}
|
||||
|
||||
# LHa self-extracting archive
|
||||
signature file-magic-auto436 {
|
||||
file-mime "application/x-lha", 120
|
||||
file-magic /^MZ.{34}LH[aA]\x27s SFX/
|
||||
}
|
||||
|
||||
# ARJ archive data
|
||||
signature file-arj {
|
||||
file-mime "application/x-arj", 50
|
||||
file-magic /^\x60\xea/
|
||||
}
|
||||
|
||||
# Byte-swapped cpio archive
|
||||
signature file-bs-cpio {
|
||||
file-mime "application/x-cpio", 50
|
||||
file-magic /(\x71\xc7|\xc7\x71)/
|
||||
}
|
||||
|
||||
# CPIO archive
|
||||
signature file-cpio {
|
||||
file-mime "application/x-cpio", 50
|
||||
file-magic /^(\xc7\x71|\x71\xc7)/
|
||||
}
|
||||
|
||||
# Compress'd data
|
||||
signature file-compress {
|
||||
file-mime "application/x-compress", 50
|
||||
file-magic /^\x1f\x9d/
|
||||
}
|
||||
|
||||
# LZMA compressed data
|
||||
signature file-lzma {
|
||||
file-mime "application/x-lzma", 71
|
||||
file-magic /^\x5d\x00\x00/
|
||||
}
|
||||
|
13
scripts/base/frameworks/files/magic/audio.sig
Normal file
13
scripts/base/frameworks/files/magic/audio.sig
Normal file
|
@ -0,0 +1,13 @@
|
|||
|
||||
# MPEG v3 audio
|
||||
signature file-mpeg-audio {
|
||||
file-mime "audio/mpeg", 20
|
||||
file-magic /^\xff[\xe2\xe3\xf2\xf3\xf6\xf7\xfa\xfb\xfc\xfd]/
|
||||
}
|
||||
|
||||
# MPEG v4 audio
|
||||
signature file-m4a {
|
||||
file-mime "audio/m4a", 70
|
||||
file-magic /^....ftyp(m4a)/
|
||||
}
|
||||
|
41
scripts/base/frameworks/files/magic/font.sig
Normal file
41
scripts/base/frameworks/files/magic/font.sig
Normal file
|
@ -0,0 +1,41 @@
|
|||
|
||||
# Web Open Font Format
|
||||
signature file-woff {
|
||||
file-magic /^wOFF/
|
||||
file-mime "application/font-woff", 70
|
||||
}
|
||||
|
||||
# TrueType font
|
||||
signature file-ttf {
|
||||
file-mime "application/x-font-ttf", 80
|
||||
file-magic /^\x00\x01\x00\x00\x00/
|
||||
}
|
||||
|
||||
signature file-embedded-opentype {
|
||||
file-mime "application/vnd.ms-fontobject", 50
|
||||
file-magic /^.{34}LP/
|
||||
}
|
||||
|
||||
# X11 SNF font
|
||||
signature file-snf {
|
||||
file-mime "application/x-font-sfn", 70
|
||||
file-magic /^(\x04\x00\x00\x00|\x00\x00\x00\x04).{100}(\x04\x00\x00\x00|\x00\x00\x00\x04)/
|
||||
}
|
||||
|
||||
# OpenType font
|
||||
signature file-opentype {
|
||||
file-mime "application/vnd.ms-opentype", 70
|
||||
file-magic /^OTTO/
|
||||
}
|
||||
|
||||
# FrameMaker Font file
|
||||
signature file-maker-screen-font {
|
||||
file-mime "application/x-mif", 190
|
||||
file-magic /^\x3cMakerScreenFont/
|
||||
}
|
||||
|
||||
# >0 string,=SplineFontDB: (len=13), ["Spline Font Database "], swap_endian=0
|
||||
signature file-spline-font-db {
|
||||
file-mime "application/vnd.font-fontforge-sfd", 160
|
||||
file-magic /^SplineFontDB\x3a/
|
||||
}
|
|
@ -1,18 +1,93 @@
|
|||
# General purpose file magic signatures.
|
||||
|
||||
# Plaintext
|
||||
# (Including BOMs for UTF-8, 16, and 32)
|
||||
signature file-plaintext {
|
||||
file-magic /^([[:print:][:space:]]{10})/
|
||||
file-mime "text/plain", -20
|
||||
file-mime "text/plain", -20
|
||||
file-magic /^(\xef\xbb\xbf|(\x00\x00)?\xfe\xff|\xff\xfe(\x00\x00)?)?[[:space:]\x20-\x7E]{10}/
|
||||
}
|
||||
|
||||
signature file-tar {
|
||||
file-magic /^[[:print:]\x00]{100}([[:digit:]\x20]{7}\x00){3}([[:digit:]\x20]{11}\x00){2}([[:digit:]\x00\x20]{7}[\x20\x00])[0-7\x00]/
|
||||
file-mime "application/x-tar", 100
|
||||
signature file-json {
|
||||
file-mime "text/json", 1
|
||||
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\{[\x0d\x0a[:blank:]]*(["][^"]{1,}["]|[a-zA-Z][a-zA-Z0-9\\_]*)[\x0d\x0a[:blank:]]*:[\x0d\x0a[:blank:]]*(["]|\[|\{|[0-9]|true|false)/
|
||||
}
|
||||
|
||||
signature file-zip {
|
||||
file-mime "application/zip", 10
|
||||
file-magic /^PK\x03\x04.{2}/
|
||||
signature file-json2 {
|
||||
file-mime "text/json", 1
|
||||
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\[[\x0d\x0a[:blank:]]*(((["][^"]{1,}["]|[0-9]{1,}(\.[0-9]{1,})?|true|false)[\x0d\x0a[:blank:]]*,)|\{|\[)[\x0d\x0a[:blank:]]*/
|
||||
}
|
||||
|
||||
# Match empty JSON documents.
|
||||
signature file-json3 {
|
||||
file-mime "text/json", 0
|
||||
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*(\[\]|\{\})[\x0d\x0a[:blank:]]*$/
|
||||
}
|
||||
|
||||
signature file-xml {
|
||||
file-mime "application/xml", 10
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<\?xml /
|
||||
}
|
||||
|
||||
signature file-xhtml {
|
||||
file-mime "text/html", 100
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<(![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]|[hH][tT][mM][lL]|[mM][eE][tT][aA] {1,}[hH][tT][tT][pP]-[eE][qQ][uU][iI][vV])/
|
||||
}
|
||||
|
||||
signature file-html {
|
||||
file-mime "text/html", 49
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]/
|
||||
}
|
||||
|
||||
signature file-html2 {
|
||||
file-mime "text/html", 20
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([hH][eE][aA][dD]|[hH][tT][mM][lL]|[tT][iI][tT][lL][eE]|[bB][oO][dD][yY])/
|
||||
}
|
||||
|
||||
signature file-rss {
|
||||
file-mime "text/rss", 90
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[rR][sS][sS]/
|
||||
}
|
||||
|
||||
signature file-atom {
|
||||
file-mime "text/atom", 100
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([rR][sS][sS][^>]*xmlns:atom|[fF][eE][eE][dD][^>]*xmlns=["']?http:\/\/www.w3.org\/2005\/Atom["']?)/
|
||||
}
|
||||
|
||||
signature file-soap {
|
||||
file-mime "application/soap+xml", 49
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[sS][oO][aA][pP](-[eE][nN][vV])?:[eE][nN][vV][eE][lL][oO][pP][eE]/
|
||||
}
|
||||
|
||||
signature file-cross-domain-policy {
|
||||
file-mime "text/x-cross-domain-policy", 49
|
||||
file-magic /^([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[cC][rR][oO][sS][sS]-[dD][oO][mM][aA][iI][nN]-[pP][oO][lL][iI][cC][yY]/
|
||||
}
|
||||
|
||||
signature file-cross-domain-policy2 {
|
||||
file-mime "text/x-cross-domain-policy", 49
|
||||
file-magic /^([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[cC][rR][oO][sS][sS]-[dD][oO][mM][aA][iI][nN]-[pP][oO][lL][iI][cC][yY]/
|
||||
}
|
||||
|
||||
signature file-xmlrpc {
|
||||
file-mime "application/xml-rpc", 49
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][eE][tT][hH][oO][dD][rR][eE][sS][pP][oO][nN][sS][eE]>/
|
||||
}
|
||||
|
||||
signature file-coldfusion {
|
||||
file-mime "magnus-internal/cold-fusion", 20
|
||||
file-magic /^([\x0d\x0a[:blank:]]*(<!--.*-->)?)*<(CFPARAM|CFSET|CFIF)/
|
||||
}
|
||||
|
||||
# Adobe Flash Media Manifest
|
||||
signature file-f4m {
|
||||
file-mime "application/f4m", 49
|
||||
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][aA][nN][iI][fF][eE][sS][tT][\x0d\x0a[:blank:]]{1,}xmlns=\"http:\/\/ns\.adobe\.com\/f4m\//
|
||||
}
|
||||
|
||||
# Microsoft LNK files
|
||||
signature file-lnk {
|
||||
file-mime "application/x-ms-shortcut", 49
|
||||
file-magic /^\x4C\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x10\x00\x00\x00\x46/
|
||||
}
|
||||
|
||||
signature file-jar {
|
||||
|
@ -21,8 +96,20 @@ signature file-jar {
|
|||
}
|
||||
|
||||
signature file-java-applet {
|
||||
file-magic /^\xca\xfe\xba\xbe...[\x2e-\x34]/
|
||||
file-mime "application/x-java-applet", 71
|
||||
file-magic /^\xca\xfe\xba\xbe...[\x2d-\x34]/
|
||||
}
|
||||
|
||||
# OCSP requests over HTTP.
|
||||
signature file-ocsp-request {
|
||||
file-magic /^.{11,19}\x06\x05\x2b\x0e\x03\x02\x1a/
|
||||
file-mime "application/ocsp-request", 71
|
||||
}
|
||||
|
||||
# OCSP responses over HTTP.
|
||||
signature file-ocsp-response {
|
||||
file-magic /^.{11,19}\x06\x09\x2B\x06\x01\x05\x05\x07\x30\x01\x01/
|
||||
file-mime "application/ocsp-response", 71
|
||||
}
|
||||
|
||||
# Shockwave flash
|
||||
|
@ -37,12 +124,6 @@ signature file-tnef {
|
|||
file-mime "application/vnd.ms-tnef", 100
|
||||
}
|
||||
|
||||
# Mac OS X DMG files
|
||||
signature file-dmg {
|
||||
file-magic /^(\x78\x01\x73\x0D\x62\x62\x60|\x78\xDA\x63\x60\x18\x05|\x78\x01\x63\x60\x18\x05|\x78\xDA\x73\x0D|\x78[\x01\xDA]\xED[\xD0-\xD9])/
|
||||
file-mime "application/x-dmg", 100
|
||||
}
|
||||
|
||||
# Mac OS X Mach-O executable
|
||||
signature file-mach-o {
|
||||
file-magic /^[\xce\xcf]\xfa\xed\xfe/
|
||||
|
@ -55,13 +136,6 @@ signature file-mach-o-universal {
|
|||
file-mime "application/x-mach-o-executable", 100
|
||||
}
|
||||
|
||||
# XAR (eXtensible ARchive) format.
|
||||
# Mac OS X uses this for the .pkg format.
|
||||
signature file-xar {
|
||||
file-magic /^xar\!/
|
||||
file-mime "application/x-xar", 100
|
||||
}
|
||||
|
||||
signature file-pkcs7 {
|
||||
file-magic /^MIME-Version:.*protocol=\"application\/pkcs7-signature\"/
|
||||
file-mime "application/pkcs7-signature", 100
|
||||
|
@ -79,16 +153,6 @@ signature file-jnlp {
|
|||
file-mime "application/x-java-jnlp-file", 100
|
||||
}
|
||||
|
||||
signature file-ico {
|
||||
file-magic /^\x00\x00\x01\x00/
|
||||
file-mime "image/x-icon", 70
|
||||
}
|
||||
|
||||
signature file-cur {
|
||||
file-magic /^\x00\x00\x02\x00/
|
||||
file-mime "image/x-cursor", 70
|
||||
}
|
||||
|
||||
signature file-pcap {
|
||||
file-magic /^(\xa1\xb2\xc3\xd4|\xd4\xc3\xb2\xa1)/
|
||||
file-mime "application/vnd.tcpdump.pcap", 70
|
||||
|
@ -119,7 +183,58 @@ signature file-python {
|
|||
file-mime "text/x-python", 60
|
||||
}
|
||||
|
||||
signature file-awk {
|
||||
file-mime "text/x-awk", 60
|
||||
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?(g|n)?awk/
|
||||
}
|
||||
|
||||
signature file-tcl {
|
||||
file-mime "text/x-tcl", 60
|
||||
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?(wish|tcl)/
|
||||
}
|
||||
|
||||
signature file-lua {
|
||||
file-mime "text/x-lua", 49
|
||||
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?lua/
|
||||
}
|
||||
|
||||
signature file-javascript {
|
||||
file-mime "application/javascript", 60
|
||||
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?node(js)?/
|
||||
}
|
||||
|
||||
signature file-javascript2 {
|
||||
file-mime "application/javascript", 60
|
||||
file-magic /^[\x0d\x0a[:blank:]]*<[sS][cC][rR][iI][pP][tT][[:blank:]]+([tT][yY][pP][eE]|[lL][aA][nN][gG][uU][aA][gG][eE])=['"]?([tT][eE][xX][tT]\/)?[jJ][aA][vV][aA][sS][cC][rR][iI][pP][tT]/
|
||||
}
|
||||
|
||||
signature file-javascript3 {
|
||||
file-mime "application/javascript", 60
|
||||
# This seems to be a somewhat common idiom in javascript.
|
||||
file-magic /^[\x0d\x0a[:blank:]]*for \(;;\);/
|
||||
}
|
||||
|
||||
signature file-javascript4 {
|
||||
file-mime "application/javascript", 60
|
||||
file-magic /^[\x0d\x0a[:blank:]]*document\.write(ln)?[:blank:]?\(/
|
||||
}
|
||||
|
||||
signature file-javascript5 {
|
||||
file-mime "application/javascript", 60
|
||||
file-magic /^\(function\(\)[[:blank:]\n]*\{/
|
||||
}
|
||||
|
||||
signature file-javascript6 {
|
||||
file-mime "application/javascript", 60
|
||||
file-magic /^[\x0d\x0a[:blank:]]*<script>[\x0d\x0a[:blank:]]*(var|function) /
|
||||
}
|
||||
|
||||
signature file-php {
|
||||
file-mime "text/x-php", 60
|
||||
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?php/
|
||||
}
|
||||
|
||||
signature file-php2 {
|
||||
file-magic /^.*<\?php/
|
||||
file-mime "text/x-php", 40
|
||||
}
|
||||
|
@ -135,3 +250,23 @@ signature file-skp {
|
|||
file-magic /^\xFF\xFE\xFF\x0E\x53\x00\x6B\x00\x65\x00\x74\x00\x63\x00\x68\x00\x55\x00\x70\x00\x20\x00\x4D\x00\x6F\x00\x64\x00\x65\x00\x6C\x00/
|
||||
file-mime "application/skp", 100
|
||||
}
|
||||
|
||||
signature file-elf-object {
|
||||
file-mime "application/x-object", 50
|
||||
file-magic /\x7fELF[\x01\x02](\x01.{10}\x01\x00|\x02.{10}\x00\x01)/
|
||||
}
|
||||
|
||||
signature file-elf {
|
||||
file-mime "application/x-executable", 50
|
||||
file-magic /\x7fELF[\x01\x02](\x01.{10}\x02\x00|\x02.{10}\x00\x02)/
|
||||
}
|
||||
|
||||
signature file-elf-sharedlib {
|
||||
file-mime "application/x-sharedlib", 50
|
||||
file-magic /\x7fELF[\x01\x02](\x01.{10}\x03\x00|\x02.{10}\x00\x03)/
|
||||
}
|
||||
|
||||
signature file-elf-coredump {
|
||||
file-mime "application/x-coredump", 50
|
||||
file-magic /\x7fELF[\x01\x02](\x01.{10}\x04\x00|\x02.{10}\x00\x04)/
|
||||
}
|
||||
|
|
166
scripts/base/frameworks/files/magic/image.sig
Normal file
166
scripts/base/frameworks/files/magic/image.sig
Normal file
|
@ -0,0 +1,166 @@
|
|||
|
||||
signature file-tiff {
|
||||
file-mime "image/tiff", 70
|
||||
file-magic /^(MM\x00[\x2a\x2b]|II[\x2a\x2b]\x00)/
|
||||
}
|
||||
|
||||
signature file-gif {
|
||||
file-mime "image/gif", 70
|
||||
file-magic /^GIF8/
|
||||
}
|
||||
|
||||
# JPEG image
|
||||
signature file-jpeg {
|
||||
file-mime "image/jpeg", 52
|
||||
file-magic /^\xff\xd8/
|
||||
}
|
||||
|
||||
signature file-bmp {
|
||||
file-mime "image/x-ms-bmp", 50
|
||||
file-magic /BM.{12}[\x0c\x28\x40\x6c\x7c\x80]\x00/
|
||||
}
|
||||
|
||||
signature file-ico {
|
||||
file-magic /^\x00\x00\x01\x00/
|
||||
file-mime "image/x-icon", 70
|
||||
}
|
||||
|
||||
signature file-cur {
|
||||
file-magic /^\x00\x00\x02\x00/
|
||||
file-mime "image/x-cursor", 70
|
||||
}
|
||||
|
||||
signature file-magic-auto289 {
|
||||
file-mime "image/vnd.adobe.photoshop", 70
|
||||
file-magic /^8BPS/
|
||||
}
|
||||
|
||||
signature file-png {
|
||||
file-mime "image/png", 110
|
||||
file-magic /^\x89PNG/
|
||||
}
|
||||
|
||||
# JPEG 2000
|
||||
signature file-jp2 {
|
||||
file-mime "image/jp2", 60
|
||||
file-magic /.{4}ftypjp2/
|
||||
}
|
||||
|
||||
# JPEG 2000
|
||||
signature file-jp22 {
|
||||
file-mime "image/jp2", 70
|
||||
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}jp2 /
|
||||
}
|
||||
|
||||
# JPEG 2000
|
||||
signature file-jpx {
|
||||
file-mime "image/jpx", 70
|
||||
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}jpx /
|
||||
}
|
||||
|
||||
# JPEG 2000
|
||||
signature file-jpm {
|
||||
file-mime "image/jpm", 70
|
||||
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}jpm /
|
||||
}
|
||||
|
||||
# Xcursor image
|
||||
signature file-x-cursor {
|
||||
file-mime "image/x-xcursor", 70
|
||||
file-magic /^Xcur/
|
||||
}
|
||||
|
||||
# NIFF image
|
||||
signature file-niff {
|
||||
file-mime "image/x-niff", 70
|
||||
file-magic /^IIN1/
|
||||
}
|
||||
|
||||
# OpenEXR image
|
||||
signature file-openexr {
|
||||
file-mime "image/x-exr", 70
|
||||
file-magic /^\x76\x2f\x31\x01/
|
||||
}
|
||||
|
||||
# DPX image
|
||||
signature file-dpx {
|
||||
file-mime "image/x-dpx", 70
|
||||
file-magic /^SDPX/
|
||||
}
|
||||
|
||||
# Cartesian Perceptual Compression image
|
||||
signature file-cpi {
|
||||
file-mime "image/x-cpi", 70
|
||||
file-magic /(CPC\xb2)/
|
||||
}
|
||||
|
||||
signature file-orf {
|
||||
file-mime "image/x-olympus-orf", 70
|
||||
file-magic /IIR[OS]|MMOR/
|
||||
}
|
||||
|
||||
# Foveon X3F raw image
|
||||
signature file-x3r {
|
||||
file-mime "image/x-x3f", 70
|
||||
file-magic /^FOVb/
|
||||
}
|
||||
|
||||
# Paint.NET image
|
||||
signature file-paint-net {
|
||||
file-mime "image/x-paintnet", 70
|
||||
file-magic /^PDN3/
|
||||
}
|
||||
|
||||
# Corel Draw Picture
|
||||
signature file-coreldraw {
|
||||
file-mime "image/x-coreldraw", 70
|
||||
file-magic /^RIFF....CDR[A6]/
|
||||
}
|
||||
|
||||
# Netpbm PAM image
|
||||
signature file-netbpm{
|
||||
file-mime "image/x-portable-pixmap", 50
|
||||
file-magic /^P7/
|
||||
}
|
||||
|
||||
# JPEG 2000 image
|
||||
signature file-jpeg-2000 {
|
||||
file-mime "image/jp2", 50
|
||||
file-magic /^....jP/
|
||||
}
|
||||
|
||||
# DjVU Images
|
||||
signature file-djvu {
|
||||
file-mime "image/vnd.djvu", 70
|
||||
file-magic /AT\x26TFORM.{4}(DJV[MUI]|THUM)/
|
||||
}
|
||||
|
||||
# DWG AutoDesk AutoCAD
|
||||
signature file-dwg {
|
||||
file-mime "image/vnd.dwg", 90
|
||||
file-magic /^(AC[12]\.|AC10)/
|
||||
}
|
||||
|
||||
# GIMP XCF image
|
||||
signature file-gimp-xcf {
|
||||
file-mime "image/x-xcf", 110
|
||||
file-magic /^gimp xcf/
|
||||
}
|
||||
|
||||
# Polar Monitor Bitmap text
|
||||
signature file-polar-monitor-bitmap {
|
||||
file-mime "image/x-polar-monitor-bitmap", 160
|
||||
file-magic /^\x5bBitmapInfo2\x5d/
|
||||
}
|
||||
|
||||
# Award BIOS bitmap
|
||||
signature file-award-bitmap {
|
||||
file-mime "image/x-award-bmp", 20
|
||||
file-magic /^AWBM/
|
||||
}
|
||||
|
||||
# Award BIOS Logo, 136 x 84
|
||||
signature file-award-bios-logo {
|
||||
file-mime "image/x-award-bioslogo", 50
|
||||
file-magic /^\x11[\x06\x09]/
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -26,3 +26,9 @@ signature file-pptx {
|
|||
file-magic /^PK\x03\x04.{26}(\[Content_Types\]\.xml|_rels\x2f\.rels|ppt\x2f).*PK\x03\x04.{26}ppt\x2f/
|
||||
file-mime "application/vnd.openxmlformats-officedocument.presentationml.presentation", 80
|
||||
}
|
||||
|
||||
signature file-msaccess {
|
||||
file-mime "application/x-msaccess", 180
|
||||
file-magic /.{4}Standard (Jet|ACE) DB\x00/
|
||||
}
|
||||
|
||||
|
|
105
scripts/base/frameworks/files/magic/video.sig
Normal file
105
scripts/base/frameworks/files/magic/video.sig
Normal file
|
@ -0,0 +1,105 @@
|
|||
|
||||
# Macromedia Flash Video
|
||||
signature file-flv {
|
||||
file-mime "video/x-flv", 60
|
||||
file-magic /^FLV/
|
||||
}
|
||||
|
||||
# FLI animation
|
||||
signature file-fli {
|
||||
file-mime "video/x-fli", 50
|
||||
file-magic /^.{4}\x11\xaf/
|
||||
}
|
||||
|
||||
# FLC animation
|
||||
signature file-flc {
|
||||
file-mime "video/x-flc", 50
|
||||
file-magic /^.{4}\x12\xaf/
|
||||
}
|
||||
|
||||
# Motion JPEG 2000
|
||||
signature file-mj2 {
|
||||
file-mime "video/mj2", 70
|
||||
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}mjp2/
|
||||
}
|
||||
|
||||
# MNG video
|
||||
signature file-mng {
|
||||
file-mime "video/x-mng", 70
|
||||
file-magic /^\x8aMNG/
|
||||
}
|
||||
|
||||
# JNG video
|
||||
signature file-jng {
|
||||
file-mime "video/x-jng", 70
|
||||
file-magic /^\x8bJNG/
|
||||
}
|
||||
|
||||
# Generic MPEG container
|
||||
signature file-mpeg {
|
||||
file-mime "video/mpeg", 50
|
||||
file-magic /(\x00\x00\x01[\xb0-\xbb])/
|
||||
}
|
||||
|
||||
# MPV
|
||||
signature file-mpv {
|
||||
file-mime "video/mpv", 71
|
||||
file-magic /(\x00\x00\x01\xb3)/
|
||||
}
|
||||
|
||||
# H.264
|
||||
signature file-h264 {
|
||||
file-mime "video/h264", 41
|
||||
file-magic /(\x00\x00\x00\x01)([\x07\x27\x47\x67\x87\xa7\xc7\xe7])/
|
||||
}
|
||||
|
||||
# WebM video
|
||||
signature file-webm {
|
||||
file-mime "video/webm", 70
|
||||
file-magic /(\x1a\x45\xdf\xa3)(.*)(B\x82)(.{1})(webm)/
|
||||
}
|
||||
|
||||
# Matroska video
|
||||
signature file-matroska {
|
||||
file-mime "video/x-matroska", 110
|
||||
file-magic /(\x1a\x45\xdf\xa3)(.*)(B\x82)(.{1})(matroska)/
|
||||
}
|
||||
|
||||
# MP2P
|
||||
signature file-mp2p {
|
||||
file-mime "video/mp2p", 21
|
||||
file-magic /\x00\x00\x01\xba([\x40-\x7f\xc0-\xff])/
|
||||
}
|
||||
|
||||
# MPEG transport stream data. These files typically have the extension "ts".
|
||||
# Note: The 0x47 repeats every 188 bytes. Using four as the number of
|
||||
# occurrences for the test here is arbitrary.
|
||||
signature file-mp2t {
|
||||
file-mime "video/mp2t", 40
|
||||
file-magic /^(\x47.{187}){4}/
|
||||
}
|
||||
|
||||
# Silicon Graphics video
|
||||
signature file-sgi-movie {
|
||||
file-mime "video/x-sgi-movie", 70
|
||||
file-magic /^MOVI/
|
||||
}
|
||||
|
||||
# Apple QuickTime movie
|
||||
signature file-quicktime {
|
||||
file-mime "video/quicktime", 70
|
||||
file-magic /^....(mdat|moov)/
|
||||
}
|
||||
|
||||
# MPEG v4 video
|
||||
signature file-mp4 {
|
||||
file-mime "video/mp4", 70
|
||||
file-magic /^....ftyp(isom|mp4[12])/
|
||||
}
|
||||
|
||||
# 3GPP Video
|
||||
signature file-3gpp {
|
||||
file-mime "video/3gpp", 60
|
||||
file-magic /^....ftyp(3g[egps2]|avc1|mmp4)/
|
||||
}
|
||||
|
|
@ -129,12 +129,11 @@ export {
|
|||
## files based on the detected mime type of the file.
|
||||
const analyze_by_mime_type_automatically = T &redef;
|
||||
|
||||
## The default setting for if the file reassembler is enabled for
|
||||
## each file.
|
||||
## The default setting for file reassembly.
|
||||
const enable_reassembler = T &redef;
|
||||
|
||||
## The default per-file reassembly buffer size.
|
||||
const reassembly_buffer_size = 1048576 &redef;
|
||||
const reassembly_buffer_size = 524288 &redef;
|
||||
|
||||
## Allows the file reassembler to be used if it's necessary because the
|
||||
## file is transferred out of order.
|
||||
|
@ -313,7 +312,7 @@ global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: A
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files]);
|
||||
Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files, $path="files"]);
|
||||
}
|
||||
|
||||
function set_info(f: fa_file)
|
||||
|
@ -484,16 +483,19 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
|
|||
add f$info$rx_hosts[f$is_orig ? cid$resp_h : cid$orig_h];
|
||||
}
|
||||
|
||||
event file_mime_type(f: fa_file, mime_type: string) &priority=10
|
||||
event file_sniff(f: fa_file, meta: fa_metadata) &priority=10
|
||||
{
|
||||
set_info(f);
|
||||
|
||||
f$info$mime_type = mime_type;
|
||||
if ( ! meta?$mime_type )
|
||||
return;
|
||||
|
||||
f$info$mime_type = meta$mime_type;
|
||||
|
||||
if ( analyze_by_mime_type_automatically &&
|
||||
mime_type in mime_type_to_analyzers )
|
||||
meta$mime_type in mime_type_to_analyzers )
|
||||
{
|
||||
local analyzers = mime_type_to_analyzers[mime_type];
|
||||
local analyzers = mime_type_to_analyzers[meta$mime_type];
|
||||
for ( a in analyzers )
|
||||
{
|
||||
add f$info$analyzers[Files::analyzer_name(a)];
|
||||
|
|
|
@ -1,18 +1,25 @@
|
|||
##! The input framework provides a way to read previously stored data either
|
||||
##! as an event stream or into a bro table.
|
||||
##! as an event stream or into a Bro table.
|
||||
|
||||
module Input;
|
||||
|
||||
export {
|
||||
type Event: enum {
|
||||
## New data has been imported.
|
||||
EVENT_NEW = 0,
|
||||
## Existing data has been changed.
|
||||
EVENT_CHANGED = 1,
|
||||
## Previously existing data has been removed.
|
||||
EVENT_REMOVED = 2,
|
||||
};
|
||||
|
||||
## Type that defines the input stream read mode.
|
||||
type Mode: enum {
|
||||
## Do not automatically reread the file after it has been read.
|
||||
MANUAL = 0,
|
||||
## Reread the entire file each time a change is found.
|
||||
REREAD = 1,
|
||||
## Read data from end of file each time new data is appended.
|
||||
STREAM = 2
|
||||
};
|
||||
|
||||
|
@ -24,20 +31,20 @@ export {
|
|||
|
||||
## Separator between fields.
|
||||
## Please note that the separator has to be exactly one character long.
|
||||
## Can be overwritten by individual writers.
|
||||
## Individual readers can use a different value.
|
||||
const separator = "\t" &redef;
|
||||
|
||||
## Separator between set elements.
|
||||
## Please note that the separator has to be exactly one character long.
|
||||
## Can be overwritten by individual writers.
|
||||
## Individual readers can use a different value.
|
||||
const set_separator = "," &redef;
|
||||
|
||||
## String to use for empty fields.
|
||||
## Can be overwritten by individual writers.
|
||||
## Individual readers can use a different value.
|
||||
const empty_field = "(empty)" &redef;
|
||||
|
||||
## String to use for an unset &optional field.
|
||||
## Can be overwritten by individual writers.
|
||||
## Individual readers can use a different value.
|
||||
const unset_field = "-" &redef;
|
||||
|
||||
## Flag that controls if the input framework accepts records
|
||||
|
@ -47,11 +54,11 @@ export {
|
|||
## abort. Defaults to false (abort).
|
||||
const accept_unsupported_types = F &redef;
|
||||
|
||||
## TableFilter description type used for the `table` method.
|
||||
## A table input stream type used to send data to a Bro table.
|
||||
type TableDescription: record {
|
||||
# Common definitions for tables and events
|
||||
|
||||
## String that allows the reader to find the source.
|
||||
## String that allows the reader to find the source of the data.
|
||||
## For `READER_ASCII`, this is the filename.
|
||||
source: string;
|
||||
|
||||
|
@ -61,7 +68,8 @@ export {
|
|||
## Read mode to use for this stream.
|
||||
mode: Mode &default=default_mode;
|
||||
|
||||
## Descriptive name. Used to remove a stream at a later time.
|
||||
## Name of the input stream. This is used by some functions to
|
||||
## manipulate the stream.
|
||||
name: string;
|
||||
|
||||
# Special definitions for tables
|
||||
|
@ -73,31 +81,35 @@ export {
|
|||
idx: any;
|
||||
|
||||
## Record that defines the values used as the elements of the table.
|
||||
## If this is undefined, then *destination* has to be a set.
|
||||
## If this is undefined, then *destination* must be a set.
|
||||
val: any &optional;
|
||||
|
||||
## Defines if the value of the table is a record (default), or a single value.
|
||||
## When this is set to false, then *val* can only contain one element.
|
||||
## Defines if the value of the table is a record (default), or a single
|
||||
## value. When this is set to false, then *val* can only contain one
|
||||
## element.
|
||||
want_record: bool &default=T;
|
||||
|
||||
## The event that is raised each time a value is added to, changed in or removed
|
||||
## from the table. The event will receive an Input::Event enum as the first
|
||||
## argument, the *idx* record as the second argument and the value (record) as the
|
||||
## third argument.
|
||||
ev: any &optional; # event containing idx, val as values.
|
||||
## The event that is raised each time a value is added to, changed in,
|
||||
## or removed from the table. The event will receive an
|
||||
## Input::TableDescription as the first argument, an Input::Event
|
||||
## enum as the second argument, the *idx* record as the third argument
|
||||
## and the value (record) as the fourth argument.
|
||||
ev: any &optional;
|
||||
|
||||
## Predicate function that can decide if an insertion, update or removal should
|
||||
## really be executed. Parameters are the same as for the event. If true is
|
||||
## returned, the update is performed. If false is returned, it is skipped.
|
||||
## Predicate function that can decide if an insertion, update or removal
|
||||
## should really be executed. Parameters have same meaning as for the
|
||||
## event.
|
||||
## If true is returned, the update is performed. If false is returned,
|
||||
## it is skipped.
|
||||
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
|
||||
|
||||
## A key/value table that will be passed on the reader.
|
||||
## Interpretation of the values is left to the writer, but
|
||||
## A key/value table that will be passed to the reader.
|
||||
## Interpretation of the values is left to the reader, but
|
||||
## usually they will be used for configuration purposes.
|
||||
config: table[string] of string &default=table();
|
||||
config: table[string] of string &default=table();
|
||||
};
|
||||
|
||||
## EventFilter description type used for the `event` method.
|
||||
## An event input stream type used to send input data to a Bro event.
|
||||
type EventDescription: record {
|
||||
# Common definitions for tables and events
|
||||
|
||||
|
@ -116,19 +128,26 @@ export {
|
|||
|
||||
# Special definitions for events
|
||||
|
||||
## Record describing the fields to be retrieved from the source input.
|
||||
## Record type describing the fields to be retrieved from the input
|
||||
## source.
|
||||
fields: any;
|
||||
|
||||
## If this is false, the event receives each value in fields as a separate argument.
|
||||
## If this is set to true (default), the event receives all fields in a single record value.
|
||||
## If this is false, the event receives each value in *fields* as a
|
||||
## separate argument.
|
||||
## If this is set to true (default), the event receives all fields in
|
||||
## a single record value.
|
||||
want_record: bool &default=T;
|
||||
|
||||
## The event that is raised each time a new line is received from the reader.
|
||||
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
|
||||
## The event that is raised each time a new line is received from the
|
||||
## reader. The event will receive an Input::EventDescription record
|
||||
## as the first argument, an Input::Event enum as the second
|
||||
## argument, and the fields (as specified in *fields*) as the following
|
||||
## arguments (this will either be a single record value containing
|
||||
## all fields, or each field value as a separate argument).
|
||||
ev: any;
|
||||
|
||||
## A key/value table that will be passed on the reader.
|
||||
## Interpretation of the values is left to the writer, but
|
||||
## A key/value table that will be passed to the reader.
|
||||
## Interpretation of the values is left to the reader, but
|
||||
## usually they will be used for configuration purposes.
|
||||
config: table[string] of string &default=table();
|
||||
};
|
||||
|
@ -155,28 +174,29 @@ export {
|
|||
## field will be the same value as the *source* field.
|
||||
name: string;
|
||||
|
||||
## A key/value table that will be passed on the reader.
|
||||
## Interpretation of the values is left to the writer, but
|
||||
## A key/value table that will be passed to the reader.
|
||||
## Interpretation of the values is left to the reader, but
|
||||
## usually they will be used for configuration purposes.
|
||||
config: table[string] of string &default=table();
|
||||
};
|
||||
|
||||
## Create a new table input from a given source.
|
||||
## Create a new table input stream from a given source.
|
||||
##
|
||||
## description: `TableDescription` record describing the source.
|
||||
##
|
||||
## Returns: true on success.
|
||||
global add_table: function(description: Input::TableDescription) : bool;
|
||||
|
||||
## Create a new event input from a given source.
|
||||
## Create a new event input stream from a given source.
|
||||
##
|
||||
## description: `EventDescription` record describing the source.
|
||||
##
|
||||
## Returns: true on success.
|
||||
global add_event: function(description: Input::EventDescription) : bool;
|
||||
|
||||
## Create a new file analysis input from a given source. Data read from
|
||||
## the source is automatically forwarded to the file analysis framework.
|
||||
## Create a new file analysis input stream from a given source. Data read
|
||||
## from the source is automatically forwarded to the file analysis
|
||||
## framework.
|
||||
##
|
||||
## description: A record describing the source.
|
||||
##
|
||||
|
@ -199,7 +219,11 @@ export {
|
|||
|
||||
## Event that is called when the end of a data source has been reached,
|
||||
## including after an update.
|
||||
global end_of_data: event(name: string, source:string);
|
||||
##
|
||||
## name: Name of the input stream.
|
||||
##
|
||||
## source: String that identifies the data source (such as the filename).
|
||||
global end_of_data: event(name: string, source: string);
|
||||
}
|
||||
|
||||
@load base/bif/input.bif
|
||||
|
|
|
@ -11,7 +11,9 @@ export {
|
|||
##
|
||||
## name: name of the input stream.
|
||||
## source: source of the input stream.
|
||||
## exit_code: exit code of the program, or number of the signal that forced the program to exit.
|
||||
## signal_exit: false when program exited normally, true when program was forced to exit by a signal.
|
||||
## exit_code: exit code of the program, or number of the signal that forced
|
||||
## the program to exit.
|
||||
## signal_exit: false when program exited normally, true when program was
|
||||
## forced to exit by a signal.
|
||||
global process_finished: event(name: string, source:string, exit_code:count, signal_exit:bool);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@ export {
|
|||
FILE_NAME,
|
||||
## Certificate SHA-1 hash.
|
||||
CERT_HASH,
|
||||
## Public key MD5 hash. (SSH server host keys are a good example.)
|
||||
PUBKEY_HASH,
|
||||
};
|
||||
|
||||
## Data about an :bro:type:`Intel::Item`.
|
||||
|
@ -174,7 +176,7 @@ global min_data_store: MinDataStore &redef;
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(LOG, [$columns=Info, $ev=log_intel]);
|
||||
Log::create_stream(LOG, [$columns=Info, $ev=log_intel, $path="intel"]);
|
||||
}
|
||||
|
||||
function find(s: Seen): bool
|
||||
|
|
|
@ -6,9 +6,10 @@
|
|||
module Log;
|
||||
|
||||
export {
|
||||
## Type that defines an ID unique to each log stream. Scripts creating new log
|
||||
## streams need to redef this enum to add their own specific log ID. The log ID
|
||||
## implicitly determines the default name of the generated log file.
|
||||
## Type that defines an ID unique to each log stream. Scripts creating new
|
||||
## log streams need to redef this enum to add their own specific log ID.
|
||||
## The log ID implicitly determines the default name of the generated log
|
||||
## file.
|
||||
type Log::ID: enum {
|
||||
## Dummy place-holder.
|
||||
UNKNOWN
|
||||
|
@ -20,25 +21,24 @@ export {
|
|||
## If true, remote logging is by default enabled for all filters.
|
||||
const enable_remote_logging = T &redef;
|
||||
|
||||
## Default writer to use if a filter does not specify
|
||||
## anything else.
|
||||
## Default writer to use if a filter does not specify anything else.
|
||||
const default_writer = WRITER_ASCII &redef;
|
||||
|
||||
## Default separator between fields for logwriters.
|
||||
## Can be overwritten by individual writers.
|
||||
## Default separator to use between fields.
|
||||
## Individual writers can use a different value.
|
||||
const separator = "\t" &redef;
|
||||
|
||||
## Separator between set elements.
|
||||
## Can be overwritten by individual writers.
|
||||
## Default separator to use between elements of a set.
|
||||
## Individual writers can use a different value.
|
||||
const set_separator = "," &redef;
|
||||
|
||||
## String to use for empty fields. This should be different from
|
||||
## *unset_field* to make the output unambiguous.
|
||||
## Can be overwritten by individual writers.
|
||||
## Default string to use for empty fields. This should be different
|
||||
## from *unset_field* to make the output unambiguous.
|
||||
## Individual writers can use a different value.
|
||||
const empty_field = "(empty)" &redef;
|
||||
|
||||
## String to use for an unset &optional field.
|
||||
## Can be overwritten by individual writers.
|
||||
## Default string to use for an unset &optional field.
|
||||
## Individual writers can use a different value.
|
||||
const unset_field = "-" &redef;
|
||||
|
||||
## Type defining the content of a logging stream.
|
||||
|
@ -50,11 +50,17 @@ export {
|
|||
## The event receives a single same parameter, an instance of
|
||||
## type ``columns``.
|
||||
ev: any &optional;
|
||||
|
||||
## A path that will be inherited by any filters added to the
|
||||
## stream which do not already specify their own path.
|
||||
path: string &optional;
|
||||
};
|
||||
|
||||
## Builds the default path values for log filters if not otherwise
|
||||
## specified by a filter. The default implementation uses *id*
|
||||
## to derive a name.
|
||||
## to derive a name. Upon adding a filter to a stream, if neither
|
||||
## ``path`` nor ``path_func`` is explicitly set by them, then
|
||||
## this function is used as the ``path_func``.
|
||||
##
|
||||
## id: The ID associated with the log stream.
|
||||
##
|
||||
|
@ -63,7 +69,7 @@ export {
|
|||
## If no ``path`` is defined for the filter, then the first call
|
||||
## to the function will contain an empty string.
|
||||
##
|
||||
## rec: An instance of the streams's ``columns`` type with its
|
||||
## rec: An instance of the stream's ``columns`` type with its
|
||||
## fields set to the values to be logged.
|
||||
##
|
||||
## Returns: The path to be used for the filter.
|
||||
|
@ -81,7 +87,8 @@ export {
|
|||
terminating: bool; ##< True if rotation occured due to Bro shutting down.
|
||||
};
|
||||
|
||||
## Default rotation interval. Zero disables rotation.
|
||||
## Default rotation interval to use for filters that do not specify
|
||||
## an interval. Zero disables rotation.
|
||||
##
|
||||
## Note that this is overridden by the BroControl LogRotationInterval
|
||||
## option.
|
||||
|
@ -116,8 +123,8 @@ export {
|
|||
## Indicates whether a log entry should be recorded.
|
||||
## If not given, all entries are recorded.
|
||||
##
|
||||
## rec: An instance of the streams's ``columns`` type with its
|
||||
## fields set to the values to logged.
|
||||
## rec: An instance of the stream's ``columns`` type with its
|
||||
## fields set to the values to be logged.
|
||||
##
|
||||
## Returns: True if the entry is to be recorded.
|
||||
pred: function(rec: any): bool &optional;
|
||||
|
@ -125,10 +132,10 @@ export {
|
|||
## Output path for recording entries matching this
|
||||
## filter.
|
||||
##
|
||||
## The specific interpretation of the string is up to
|
||||
## the used writer, and may for example be the destination
|
||||
## The specific interpretation of the string is up to the
|
||||
## logging writer, and may for example be the destination
|
||||
## file name. Generally, filenames are expected to be given
|
||||
## without any extensions; writers will add appropiate
|
||||
## without any extensions; writers will add appropriate
|
||||
## extensions automatically.
|
||||
##
|
||||
## If this path is found to conflict with another filter's
|
||||
|
@ -143,7 +150,9 @@ export {
|
|||
## to compute the string dynamically. It is ok to return
|
||||
## different strings for separate calls, but be careful: it's
|
||||
## easy to flood the disk by returning a new string for each
|
||||
## connection.
|
||||
## connection. Upon adding a filter to a stream, if neither
|
||||
## ``path`` nor ``path_func`` is explicitly set by them, then
|
||||
## :bro:see:`Log::default_path_func` is used.
|
||||
##
|
||||
## id: The ID associated with the log stream.
|
||||
##
|
||||
|
@ -153,7 +162,7 @@ export {
|
|||
## then the first call to the function will contain an
|
||||
## empty string.
|
||||
##
|
||||
## rec: An instance of the streams's ``columns`` type with its
|
||||
## rec: An instance of the stream's ``columns`` type with its
|
||||
## fields set to the values to be logged.
|
||||
##
|
||||
## Returns: The path to be used for the filter, which will be
|
||||
|
@ -177,7 +186,7 @@ export {
|
|||
## If true, entries are passed on to remote peers.
|
||||
log_remote: bool &default=enable_remote_logging;
|
||||
|
||||
## Rotation interval.
|
||||
## Rotation interval. Zero disables rotation.
|
||||
interv: interval &default=default_rotation_interval;
|
||||
|
||||
## Callback function to trigger for rotated files. If not set, the
|
||||
|
@ -207,9 +216,9 @@ export {
|
|||
|
||||
## Removes a logging stream completely, stopping all the threads.
|
||||
##
|
||||
## id: The ID enum to be associated with the new logging stream.
|
||||
## id: The ID associated with the logging stream.
|
||||
##
|
||||
## Returns: True if a new stream was successfully removed.
|
||||
## Returns: True if the stream was successfully removed.
|
||||
##
|
||||
## .. bro:see:: Log::create_stream
|
||||
global remove_stream: function(id: ID) : bool;
|
||||
|
@ -379,6 +388,8 @@ export {
|
|||
global active_streams: table[ID] of Stream = table();
|
||||
}
|
||||
|
||||
global all_streams: table[ID] of Stream = table();
|
||||
|
||||
# We keep a script-level copy of all filters so that we can manipulate them.
|
||||
global filters: table[ID, string] of Filter;
|
||||
|
||||
|
@ -463,6 +474,7 @@ function create_stream(id: ID, stream: Stream) : bool
|
|||
return F;
|
||||
|
||||
active_streams[id] = stream;
|
||||
all_streams[id] = stream;
|
||||
|
||||
return add_default_filter(id);
|
||||
}
|
||||
|
@ -470,6 +482,7 @@ function create_stream(id: ID, stream: Stream) : bool
|
|||
function remove_stream(id: ID) : bool
|
||||
{
|
||||
delete active_streams[id];
|
||||
delete all_streams[id];
|
||||
return __remove_stream(id);
|
||||
}
|
||||
|
||||
|
@ -482,10 +495,12 @@ function disable_stream(id: ID) : bool
|
|||
|
||||
function add_filter(id: ID, filter: Filter) : bool
|
||||
{
|
||||
# This is a work-around for the fact that we can't forward-declare
|
||||
# the default_path_func and then use it as &default in the record
|
||||
# definition.
|
||||
if ( ! filter?$path_func )
|
||||
local stream = all_streams[id];
|
||||
|
||||
if ( stream?$path && ! filter?$path )
|
||||
filter$path = stream$path;
|
||||
|
||||
if ( ! filter?$path && ! filter?$path_func )
|
||||
filter$path_func = default_path_func;
|
||||
|
||||
filters[id, filter$name] = filter;
|
||||
|
|
|
@ -37,6 +37,8 @@ export {
|
|||
user: string;
|
||||
## The remote host to which to transfer logs.
|
||||
host: string;
|
||||
## The port to connect to. Defaults to 22
|
||||
host_port: count &default=22;
|
||||
## The path/directory on the remote host to send logs.
|
||||
path: string;
|
||||
};
|
||||
|
@ -63,8 +65,8 @@ function sftp_postprocessor(info: Log::RotationInfo): bool
|
|||
{
|
||||
local dst = fmt("%s/%s.%s.log", d$path, info$path,
|
||||
strftime(Log::sftp_rotation_date_format, info$open));
|
||||
command += fmt("echo put %s %s | sftp -b - %s@%s;", info$fname, dst,
|
||||
d$user, d$host);
|
||||
command += fmt("echo put %s %s | sftp -P %d -b - %s@%s;", info$fname, dst,
|
||||
d$host_port, d$user, d$host);
|
||||
}
|
||||
|
||||
command += fmt("/bin/rm %s", info$fname);
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
##! Interface for the ASCII log writer. Redefinable options are available
|
||||
##! to tweak the output format of ASCII logs.
|
||||
##!
|
||||
##! The ASCII writer supports currently one writer-specific filter option via
|
||||
##! ``config``: setting ``tsv`` to the string ``T`` turns the output into
|
||||
##! The ASCII writer currently supports one writer-specific per-filter config
|
||||
##! option: setting ``tsv`` to the string ``T`` turns the output into
|
||||
##! "tab-separated-value" mode where only a single header row with the column
|
||||
##! names is printed out as meta information, with no "# fields" prepended; no
|
||||
##! other meta data gets included in that mode.
|
||||
##! other meta data gets included in that mode. Example filter using this::
|
||||
##!
|
||||
##! Example filter using this::
|
||||
##!
|
||||
##! local my_filter: Log::Filter = [$name = "my-filter", $writer = Log::WRITER_ASCII, $config = table(["tsv"] = "T")];
|
||||
##! local f: Log::Filter = [$name = "my-filter",
|
||||
##! $writer = Log::WRITER_ASCII,
|
||||
##! $config = table(["tsv"] = "T")];
|
||||
##!
|
||||
|
||||
module LogAscii;
|
||||
|
@ -29,6 +29,8 @@ export {
|
|||
## Format of timestamps when writing out JSON. By default, the JSON
|
||||
## formatter will use double values for timestamps which represent the
|
||||
## number of seconds from the UNIX epoch.
|
||||
##
|
||||
## This option is also available as a per-filter ``$config`` option.
|
||||
const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef;
|
||||
|
||||
## If true, include lines with log meta information such as column names
|
||||
|
|
|
@ -19,7 +19,7 @@ export {
|
|||
const unset_field = Log::unset_field &redef;
|
||||
|
||||
## String to use for empty fields. This should be different from
|
||||
## *unset_field* to make the output unambiguous.
|
||||
## *unset_field* to make the output unambiguous.
|
||||
const empty_field = Log::empty_field &redef;
|
||||
}
|
||||
|
||||
|
|
15
scripts/base/frameworks/netcontrol/__load__.bro
Normal file
15
scripts/base/frameworks/netcontrol/__load__.bro
Normal file
|
@ -0,0 +1,15 @@
|
|||
@load ./types
|
||||
@load ./main
|
||||
@load ./plugins
|
||||
@load ./drop
|
||||
@load ./shunt
|
||||
@load ./catch-and-release
|
||||
|
||||
# The cluster framework must be loaded first.
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@load ./cluster
|
||||
@else
|
||||
@load ./non-cluster
|
||||
@endif
|
104
scripts/base/frameworks/netcontrol/catch-and-release.bro
Normal file
104
scripts/base/frameworks/netcontrol/catch-and-release.bro
Normal file
|
@ -0,0 +1,104 @@
|
|||
##! Implementation of catch-and-release functionality for NetControl.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ./main
|
||||
@load ./drop
|
||||
|
||||
export {
|
||||
## Stops all packets involving an IP address from being forwarded. This function
|
||||
## uses catch-and-release functionality, where the IP address is only dropped for
|
||||
## a short amount of time that is incremented steadily when the IP is encountered
|
||||
## again.
|
||||
##
|
||||
## a: The address to be dropped.
|
||||
##
|
||||
## t: How long to drop it, with 0 being indefinitly.
|
||||
##
|
||||
## location: An optional string describing where the drop was triggered.
|
||||
##
|
||||
## Returns: The id of the inserted rule on succes and zero on failure.
|
||||
global drop_address_catch_release: function(a: addr, location: string &default="") : string;
|
||||
|
||||
## Time intervals for which a subsequent drops of the same IP take
|
||||
## effect.
|
||||
const catch_release_intervals: vector of interval = vector(10min, 1hr, 24hrs, 7days) &redef;
|
||||
}
|
||||
|
||||
function per_block_interval(t: table[addr] of count, idx: addr): interval
|
||||
{
|
||||
local ct = t[idx];
|
||||
|
||||
# watch for the time of the next block...
|
||||
local blocktime = catch_release_intervals[ct];
|
||||
if ( (ct+1) in catch_release_intervals )
|
||||
blocktime = catch_release_intervals[ct+1];
|
||||
|
||||
return blocktime;
|
||||
}
|
||||
|
||||
# This is the internally maintained table containing all the currently going on catch-and-release
|
||||
# blocks.
|
||||
global blocks: table[addr] of count = {}
|
||||
&create_expire=0secs
|
||||
&expire_func=per_block_interval;
|
||||
|
||||
function current_block_interval(s: set[addr], idx: addr): interval
|
||||
{
|
||||
if ( idx !in blocks )
|
||||
{
|
||||
Reporter::error(fmt("Address %s not in blocks while inserting into current_blocks!", idx));
|
||||
return 0sec;
|
||||
}
|
||||
|
||||
return catch_release_intervals[blocks[idx]];
|
||||
}
|
||||
|
||||
global current_blocks: set[addr] = set()
|
||||
&create_expire=0secs
|
||||
&expire_func=current_block_interval;
|
||||
|
||||
function drop_address_catch_release(a: addr, location: string &default=""): string
|
||||
{
|
||||
if ( a in blocks )
|
||||
{
|
||||
Reporter::warning(fmt("Address %s already blocked using catch-and-release - ignoring duplicate", a));
|
||||
return "";
|
||||
}
|
||||
|
||||
local block_interval = catch_release_intervals[0];
|
||||
local ret = drop_address(a, block_interval, location);
|
||||
if ( ret != "" )
|
||||
{
|
||||
blocks[a] = 0;
|
||||
add current_blocks[a];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
function check_conn(a: addr)
|
||||
{
|
||||
if ( a in blocks )
|
||||
{
|
||||
if ( a in current_blocks )
|
||||
# block has not been applied yet?
|
||||
return;
|
||||
|
||||
# ok, this one returned again while still in the backoff period.
|
||||
local try = blocks[a];
|
||||
if ( (try+1) in catch_release_intervals )
|
||||
++try;
|
||||
|
||||
blocks[a] = try;
|
||||
add current_blocks[a];
|
||||
local block_interval = catch_release_intervals[try];
|
||||
drop_address(a, block_interval, "Re-drop by catch-and-release");
|
||||
}
|
||||
}
|
||||
|
||||
event new_connection(c: connection)
|
||||
{
|
||||
# let's only check originating connections...
|
||||
check_conn(c$id$orig_h);
|
||||
}
|
99
scripts/base/frameworks/netcontrol/cluster.bro
Normal file
99
scripts/base/frameworks/netcontrol/cluster.bro
Normal file
|
@ -0,0 +1,99 @@
|
|||
##! Cluster support for the NetControl framework.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module NetControl;
|
||||
|
||||
export {
|
||||
## This is the event used to transport add_rule calls to the manager.
|
||||
global cluster_netcontrol_add_rule: event(r: Rule);
|
||||
|
||||
## This is the event used to transport remove_rule calls to the manager.
|
||||
global cluster_netcontrol_remove_rule: event(id: string);
|
||||
}
|
||||
|
||||
## Workers need ability to forward commands to manager.
|
||||
redef Cluster::worker2manager_events += /NetControl::cluster_netcontrol_(add|remove)_rule/;
|
||||
## Workers need to see the result events from the manager.
|
||||
redef Cluster::manager2worker_events += /NetControl::rule_(added|removed|timeout|error)/;
|
||||
|
||||
|
||||
function activate(p: PluginState, priority: int)
|
||||
{
|
||||
# we only run the activate function on the manager.
|
||||
if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
return;
|
||||
|
||||
activate_impl(p, priority);
|
||||
}
|
||||
|
||||
global local_rule_count: count = 1;
|
||||
|
||||
function add_rule(r: Rule) : string
|
||||
{
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
return add_rule_impl(r);
|
||||
else
|
||||
{
|
||||
if ( r$id == "" )
|
||||
r$id = cat(Cluster::node, ":", ++local_rule_count);
|
||||
|
||||
event NetControl::cluster_netcontrol_add_rule(r);
|
||||
return r$id;
|
||||
}
|
||||
}
|
||||
|
||||
function remove_rule(id: string) : bool
|
||||
{
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
return remove_rule_impl(id);
|
||||
else
|
||||
{
|
||||
event NetControl::cluster_netcontrol_remove_rule(id);
|
||||
return T; # well, we can't know here. So - just hope...
|
||||
}
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event NetControl::cluster_netcontrol_add_rule(r: Rule)
|
||||
{
|
||||
add_rule_impl(r);
|
||||
}
|
||||
|
||||
event NetControl::cluster_netcontrol_remove_rule(id: string)
|
||||
{
|
||||
remove_rule_impl(id);
|
||||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event rule_expire(r: Rule, p: PluginState) &priority=-5
|
||||
{
|
||||
rule_expire_impl(r, p);
|
||||
}
|
||||
|
||||
event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5
|
||||
{
|
||||
rule_added_impl(r, p, msg);
|
||||
|
||||
if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire )
|
||||
schedule r$expire { rule_expire(r, p) };
|
||||
}
|
||||
|
||||
event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5
|
||||
{
|
||||
rule_removed_impl(r, p, msg);
|
||||
}
|
||||
|
||||
event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5
|
||||
{
|
||||
rule_timeout_impl(r, i, p);
|
||||
}
|
||||
|
||||
event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5
|
||||
{
|
||||
rule_error_impl(r, p, msg);
|
||||
}
|
||||
@endif
|
||||
|
98
scripts/base/frameworks/netcontrol/drop.bro
Normal file
98
scripts/base/frameworks/netcontrol/drop.bro
Normal file
|
@ -0,0 +1,98 @@
|
|||
##! Implementation of the drop functionality for NetControl.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ./main
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { DROP };
|
||||
|
||||
## Stops all packets involving an IP address from being forwarded.
|
||||
##
|
||||
## a: The address to be dropped.
|
||||
##
|
||||
## t: How long to drop it, with 0 being indefinitly.
|
||||
##
|
||||
## location: An optional string describing where the drop was triggered.
|
||||
##
|
||||
## Returns: The id of the inserted rule on succes and zero on failure.
|
||||
global drop_address: function(a: addr, t: interval, location: string &default="") : string;
|
||||
|
||||
## Stops all packets involving an connection address from being forwarded.
|
||||
##
|
||||
## c: The connection to be dropped.
|
||||
##
|
||||
## t: How long to drop it, with 0 being indefinitly.
|
||||
##
|
||||
## location: An optional string describing where the drop was triggered.
|
||||
##
|
||||
## Returns: The id of the inserted rule on succes and zero on failure.
|
||||
global drop_connection: function(c: conn_id, t: interval, location: string &default="") : string;
|
||||
|
||||
type DropInfo: record {
|
||||
## Time at which the recorded activity occurred.
|
||||
ts: time &log;
|
||||
## ID of the rule; unique during each Bro run
|
||||
rule_id: string &log;
|
||||
orig_h: addr &log; ##< The originator's IP address.
|
||||
orig_p: port &log &optional; ##< The originator's port number.
|
||||
resp_h: addr &log &optional; ##< The responder's IP address.
|
||||
resp_p: port &log &optional; ##< The responder's port number.
|
||||
## Expiry time of the shunt
|
||||
expire: interval &log;
|
||||
## Location where the underlying action was triggered.
|
||||
location: string &log &optional;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the :bro:type:`NetControl::ShuntInfo`
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_netcontrol_drop: event(rec: DropInfo);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(NetControl::DROP, [$columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop"]);
|
||||
}
|
||||
|
||||
function drop_connection(c: conn_id, t: interval, location: string &default="") : string
|
||||
{
|
||||
local e: Entity = [$ty=CONNECTION, $conn=c];
|
||||
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||
|
||||
local id = add_rule(r);
|
||||
|
||||
# Error should already be logged
|
||||
if ( id == "" )
|
||||
return id;
|
||||
|
||||
local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=c$orig_h, $orig_p=c$orig_p, $resp_h=c$resp_h, $resp_p=c$resp_p, $expire=t);
|
||||
|
||||
if ( location != "" )
|
||||
log$location=location;
|
||||
|
||||
Log::write(DROP, log);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
function drop_address(a: addr, t: interval, location: string &default="") : string
|
||||
{
|
||||
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
|
||||
local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||
|
||||
local id = add_rule(r);
|
||||
|
||||
# Error should already be logged
|
||||
if ( id == "" )
|
||||
return id;
|
||||
|
||||
local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=a, $expire=t);
|
||||
|
||||
if ( location != "" )
|
||||
log$location=location;
|
||||
|
||||
Log::write(DROP, log);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
935
scripts/base/frameworks/netcontrol/main.bro
Normal file
935
scripts/base/frameworks/netcontrol/main.bro
Normal file
|
@ -0,0 +1,935 @@
|
|||
##! Bro's packet aquisition and control framework.
|
||||
##!
|
||||
##! This plugin-based framework allows to control the traffic that Bro monitors
|
||||
##! as well as, if having access to the forwarding path, the traffic the network
|
||||
##! forwards. By default, the framework lets everything through, to both Bro
|
||||
##! itself as well as on the network. Scripts can then add rules to impose
|
||||
##! restrictions on entities, such as specific connections or IP addresses.
|
||||
##!
|
||||
##! This framework has two APIs: a high-level and low-level. The high-level API
|
||||
##! provides convinience functions for a set of common operations. The
|
||||
##! low-level API provides full flexibility.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ./plugin
|
||||
@load ./types
|
||||
|
||||
export {
|
||||
## The framework's logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
# ###
|
||||
# ### Generic functions and events.
|
||||
# ###
|
||||
|
||||
# Activates a plugin.
|
||||
#
|
||||
# p: The plugin to acticate.
|
||||
#
|
||||
# priority: The higher the priority, the earlier this plugin will be checked
|
||||
# whether it supports an operation, relative to other plugins.
|
||||
global activate: function(p: PluginState, priority: int);
|
||||
|
||||
# Event that is used to initialize plugins. Place all plugin initialization
|
||||
# related functionality in this event.
|
||||
global NetControl::init: event();
|
||||
|
||||
# Event that is raised once all plugins activated in ``NetControl::init`` have finished
|
||||
# their initialization.
|
||||
global NetControl::init_done: event();
|
||||
|
||||
# ###
|
||||
# ### High-level API.
|
||||
# ###
|
||||
|
||||
# ### Note - other high level primitives are in catch-and-release.bro, shunt.bro and
|
||||
# ### drop.bro
|
||||
|
||||
## Allows all traffic involving a specific IP address to be forwarded.
|
||||
##
|
||||
## a: The address to be whitelistet.
|
||||
##
|
||||
## t: How long to whitelist it, with 0 being indefinitly.
|
||||
##
|
||||
## location: An optional string describing whitelist was triddered.
|
||||
##
|
||||
## Returns: The id of the inserted rule on succes and zero on failure.
|
||||
global whitelist_address: function(a: addr, t: interval, location: string &default="") : string;
|
||||
|
||||
## Allows all traffic involving a specific IP subnet to be forwarded.
|
||||
##
|
||||
## s: The subnet to be whitelistet.
|
||||
##
|
||||
## t: How long to whitelist it, with 0 being indefinitly.
|
||||
##
|
||||
## location: An optional string describing whitelist was triddered.
|
||||
##
|
||||
## Returns: The id of the inserted rule on succes and zero on failure.
|
||||
global whitelist_subnet: function(s: subnet, t: interval, location: string &default="") : string;
|
||||
|
||||
## Redirects an uni-directional flow to another port.
|
||||
##
|
||||
## f: The flow to redirect.
|
||||
##
|
||||
## out_port: Port to redirect the flow to
|
||||
##
|
||||
## t: How long to leave the redirect in place, with 0 being indefinitly.
|
||||
##
|
||||
## location: An optional string describing where the redirect was triggered.
|
||||
##
|
||||
## Returns: The id of the inserted rule on succes and zero on failure.
|
||||
global redirect_flow: function(f: flow_id, out_port: count, t: interval, location: string &default="") : string;
|
||||
|
||||
## Quarantines a host by redirecting rewriting DNS queries to the network dns server dns
|
||||
## to the host. Host has to answer to all queries with its own address. Only http communication
|
||||
## from infected to quarantinehost is allowed.
|
||||
##
|
||||
## infected: the host to quarantine
|
||||
##
|
||||
## dns: the network dns server
|
||||
##
|
||||
## quarantine: the quarantine server running a dns and a web server
|
||||
##
|
||||
## t: how long to leave the quarantine in place
|
||||
##
|
||||
## Returns: Vector of inserted rules on success, empty list on failure.
|
||||
global quarantine_host: function(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string;
|
||||
|
||||
## Flushes all state.
|
||||
global clear: function();
|
||||
|
||||
# ###
|
||||
# ### Low-level API.
|
||||
# ###
|
||||
|
||||
###### Manipulation of rules.
|
||||
|
||||
## Installs a rule.
|
||||
##
|
||||
## r: The rule to install.
|
||||
##
|
||||
## Returns: If succesful, returns an ID string unique to the rule that can later
|
||||
## be used to refer to it. If unsuccessful, returns an empty string. The ID is also
|
||||
## assigned to ``r$id``. Note that "successful" means "a plugin knew how to handle
|
||||
## the rule", it doesn't necessarily mean that it was indeed successfully put in
|
||||
## place, because that might happen asynchronously and thus fail only later.
|
||||
global add_rule: function(r: Rule) : string;
|
||||
|
||||
## Removes a rule.
|
||||
##
|
||||
## id: The rule to remove, specified as the ID returned by :bro:id:`add_rule` .
|
||||
##
|
||||
## Returns: True if succesful, the relevant plugin indicated that it knew how
|
||||
## to handle the removal. Note that again "success" means the plugin accepted the
|
||||
## removal. They might still fail to put it into effect, as that might happen
|
||||
## asynchronously and thus go wrong at that point.
|
||||
global remove_rule: function(id: string) : bool;
|
||||
|
||||
## Searches all rules affecting a certain IP address.
|
||||
##
|
||||
## ip: The ip address to search for
|
||||
##
|
||||
## Returns: vector of all rules affecting the IP address
|
||||
global find_rules_addr: function(ip: addr) : vector of Rule;
|
||||
|
||||
## Searches all rules affecting a certain subnet.
|
||||
##
|
||||
## sn: The subnet to search for
|
||||
##
|
||||
## Returns: vector of all rules affecting the subnet
|
||||
global find_rules_subnet: function(sn: subnet) : vector of Rule;
|
||||
|
||||
###### Asynchronous feedback on rules.
|
||||
|
||||
## Confirms that a rule was put in place.
|
||||
##
|
||||
## r: The rule now in place.
|
||||
##
|
||||
## p: The state for the plugin that put it into place.
|
||||
##
|
||||
## msg: An optional informational message by the plugin.
|
||||
global rule_added: event(r: Rule, p: PluginState, msg: string &default="");
|
||||
|
||||
## Reports that a rule was removed due to a remove: function() call.
|
||||
##
|
||||
## r: The rule now removed.
|
||||
##
|
||||
## p: The state for the plugin that had the rule in place and now
|
||||
## removed it.
|
||||
##
|
||||
## msg: An optional informational message by the plugin.
|
||||
global rule_removed: event(r: Rule, p: PluginState, msg: string &default="");
|
||||
|
||||
## Reports that a rule was removed internally due to a timeout.
|
||||
##
|
||||
## r: The rule now removed.
|
||||
##
|
||||
## i: Additional flow information, if supported by the protocol.
|
||||
##
|
||||
## p: The state for the plugin that had the rule in place and now
|
||||
## removed it.
|
||||
##
|
||||
## msg: An optional informational message by the plugin.
|
||||
global rule_timeout: event(r: Rule, i: FlowInfo, p: PluginState);
|
||||
|
||||
## Reports an error when operating on a rule.
|
||||
##
|
||||
## r: The rule that encountered an error.
|
||||
##
|
||||
## p: The state for the plugin that reported the error.
|
||||
##
|
||||
## msg: An optional informational message by the plugin.
|
||||
global rule_error: event(r: Rule, p: PluginState, msg: string &default="");
|
||||
|
||||
## Hook that allows the modification of rules passed to add_rule before they
|
||||
## are passed on to the plugins. If one of the hooks uses break, the rule is
|
||||
## ignored and not passed on to any plugin.
|
||||
##
|
||||
## r: The rule to be added
|
||||
global NetControl::rule_policy: hook(r: Rule);
|
||||
|
||||
##### Plugin functions
|
||||
|
||||
## Function called by plugins once they finished their activation. After all
|
||||
## plugins defined in bro_init finished to activate, rules will start to be sent
|
||||
## to the plugins. Rules that scripts try to set before the backends are ready
|
||||
## will be discarded.
|
||||
global plugin_activated: function(p: PluginState);
|
||||
|
||||
## Type of an entry in the NetControl log.
|
||||
type InfoCategory: enum {
|
||||
## A log entry reflecting a framework message.
|
||||
MESSAGE,
|
||||
## A log entry reflecting a framework message.
|
||||
ERROR,
|
||||
## A log entry about about a rule.
|
||||
RULE
|
||||
};
|
||||
|
||||
## State of an entry in the NetControl log.
|
||||
type InfoState: enum {
|
||||
REQUESTED,
|
||||
SUCCEEDED,
|
||||
FAILED,
|
||||
REMOVED,
|
||||
TIMEOUT,
|
||||
};
|
||||
|
||||
## The record type defining the column fields of the NetControl log.
|
||||
type Info: record {
|
||||
## Time at which the recorded activity occurred.
|
||||
ts: time &log;
|
||||
## ID of the rule; unique during each Bro run
|
||||
rule_id: string &log &optional;
|
||||
## Type of the log entry.
|
||||
category: InfoCategory &log &optional;
|
||||
## The command the log entry is about.
|
||||
cmd: string &log &optional;
|
||||
## State the log entry reflects.
|
||||
state: InfoState &log &optional;
|
||||
## String describing an action the entry is about.
|
||||
action: string &log &optional;
|
||||
## The target type of the action.
|
||||
target: TargetType &log &optional;
|
||||
## Type of the entity the log entry is about.
|
||||
entity_type: string &log &optional;
|
||||
## String describing the entity the log entry is about.
|
||||
entity: string &log &optional;
|
||||
## String describing the optional modification of the entry (e.h. redirect)
|
||||
mod: string &log &optional;
|
||||
## String with an additional message.
|
||||
msg: string &log &optional;
|
||||
## Number describing the priority of the log entry
|
||||
priority: int &log &optional;
|
||||
## Expiry time of the log entry
|
||||
expire: interval &log &optional;
|
||||
## Location where the underlying action was triggered.
|
||||
location: string &log &optional;
|
||||
## Plugin triggering the log entry.
|
||||
plugin: string &log &optional;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the :bro:type:`NetControl::Info`
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_netcontrol: event(rec: Info);
|
||||
}
|
||||
|
||||
redef record Rule += {
|
||||
##< Internally set to the plugins handling the rule.
|
||||
_plugin_ids: set[count] &default=count_set();
|
||||
##< Internally set to the plugins on which the rule is currently active.
|
||||
_active_plugin_ids: set[count] &default=count_set();
|
||||
##< Track if the rule was added succesfully by all responsible plugins.
|
||||
_added: bool &default=F;
|
||||
};
|
||||
|
||||
# Variable tracking the state of plugin activation. Once all plugins that
|
||||
# have been added in bro_init are activated, this will switch to T and
|
||||
# the event NetControl::init_done will be raised.
|
||||
global plugins_active: bool = F;
|
||||
|
||||
# Set to true at the end of bro_init (with very low priority).
|
||||
# Used to track when plugin activation could potentially be finished
|
||||
global bro_init_done: bool = F;
|
||||
|
||||
# The counters that are used to generate the rule and plugin IDs
|
||||
global rule_counter: count = 1;
|
||||
global plugin_counter: count = 1;
|
||||
|
||||
# List of the currently active plugins
|
||||
global plugins: vector of PluginState;
|
||||
global plugin_ids: table[count] of PluginState;
|
||||
|
||||
# These tables hold information about rules.
|
||||
global rules: table[string] of Rule; # Rules indexed by id and cid
|
||||
|
||||
# All rules that apply to a certain subnet/IP address.
|
||||
global rules_by_subnets: table[subnet] of set[string];
|
||||
|
||||
# Rules pertaining to a specific entity.
|
||||
# There always only can be one rule of each type for one entity.
|
||||
global rule_entities: table[Entity, RuleType] of Rule;
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(NetControl::LOG, [$columns=Info, $ev=log_netcontrol, $path="netcontrol"]);
|
||||
}
|
||||
|
||||
function entity_to_info(info: Info, e: Entity)
|
||||
{
|
||||
info$entity_type = fmt("%s", e$ty);
|
||||
|
||||
switch ( e$ty ) {
|
||||
case ADDRESS:
|
||||
info$entity = fmt("%s", e$ip);
|
||||
break;
|
||||
|
||||
case CONNECTION:
|
||||
info$entity = fmt("%s/%d<->%s/%d",
|
||||
e$conn$orig_h, e$conn$orig_p,
|
||||
e$conn$resp_h, e$conn$resp_p);
|
||||
break;
|
||||
|
||||
case FLOW:
|
||||
local ffrom_ip = "*";
|
||||
local ffrom_port = "*";
|
||||
local fto_ip = "*";
|
||||
local fto_port = "*";
|
||||
local ffrom_mac = "*";
|
||||
local fto_mac = "*";
|
||||
if ( e$flow?$src_h )
|
||||
ffrom_ip = cat(e$flow$src_h);
|
||||
if ( e$flow?$src_p )
|
||||
ffrom_port = fmt("%d", e$flow$src_p);
|
||||
if ( e$flow?$dst_h )
|
||||
fto_ip = cat(e$flow$dst_h);
|
||||
if ( e$flow?$dst_p )
|
||||
fto_port = fmt("%d", e$flow$dst_p);
|
||||
info$entity = fmt("%s/%s->%s/%s",
|
||||
ffrom_ip, ffrom_port,
|
||||
fto_ip, fto_port);
|
||||
if ( e$flow?$src_m || e$flow?$dst_m )
|
||||
{
|
||||
if ( e$flow?$src_m )
|
||||
ffrom_mac = e$flow$src_m;
|
||||
if ( e$flow?$dst_m )
|
||||
fto_mac = e$flow$dst_m;
|
||||
|
||||
info$entity = fmt("%s (%s->%s)", info$entity, ffrom_mac, fto_mac);
|
||||
}
|
||||
break;
|
||||
|
||||
case MAC:
|
||||
info$entity = e$mac;
|
||||
break;
|
||||
|
||||
default:
|
||||
info$entity = "<unknown entity type>";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
function rule_to_info(info: Info, r: Rule)
|
||||
{
|
||||
info$action = fmt("%s", r$ty);
|
||||
info$target = r$target;
|
||||
info$rule_id = r$id;
|
||||
info$expire = r$expire;
|
||||
info$priority = r$priority;
|
||||
|
||||
if ( r?$location && r$location != "" )
|
||||
info$location = r$location;
|
||||
|
||||
if ( r$ty == REDIRECT )
|
||||
info$mod = fmt("-> %d", r$out_port);
|
||||
|
||||
if ( r$ty == MODIFY )
|
||||
{
|
||||
local mfrom_ip = "_";
|
||||
local mfrom_port = "_";
|
||||
local mto_ip = "_";
|
||||
local mto_port = "_";
|
||||
local mfrom_mac = "_";
|
||||
local mto_mac = "_";
|
||||
if ( r$mod?$src_h )
|
||||
mfrom_ip = cat(r$mod$src_h);
|
||||
if ( r$mod?$src_p )
|
||||
mfrom_port = fmt("%d", r$mod$src_p);
|
||||
if ( r$mod?$dst_h )
|
||||
mto_ip = cat(r$mod$dst_h);
|
||||
if ( r$mod?$dst_p )
|
||||
mto_port = fmt("%d", r$mod$dst_p);
|
||||
|
||||
if ( r$mod?$src_m )
|
||||
mfrom_mac = r$mod$src_m;
|
||||
if ( r$mod?$dst_m )
|
||||
mto_mac = r$mod$dst_m;
|
||||
|
||||
info$mod = fmt("Src: %s/%s (%s) Dst: %s/%s (%s)",
|
||||
mfrom_ip, mfrom_port, mfrom_mac, mto_ip, mto_port, mto_mac);
|
||||
|
||||
if ( r$mod?$redirect_port )
|
||||
info$mod = fmt("%s -> %d", info$mod, r$mod$redirect_port);
|
||||
|
||||
}
|
||||
|
||||
entity_to_info(info, r$entity);
|
||||
}
|
||||
|
||||
function log_msg(msg: string, p: PluginState)
|
||||
{
|
||||
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)]);
|
||||
}
|
||||
|
||||
function log_error(msg: string, p: PluginState)
|
||||
{
|
||||
Log::write(LOG, [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]);
|
||||
}
|
||||
|
||||
function log_msg_no_plugin(msg: string)
|
||||
{
|
||||
Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg]);
|
||||
}
|
||||
|
||||
function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: string &default="")
|
||||
{
|
||||
local info: Info = [$ts=network_time()];
|
||||
info$category = RULE;
|
||||
info$cmd = cmd;
|
||||
info$state = state;
|
||||
info$plugin = p$plugin$name(p);
|
||||
if ( msg != "" )
|
||||
info$msg = msg;
|
||||
|
||||
rule_to_info(info, r);
|
||||
|
||||
Log::write(LOG, info);
|
||||
}
|
||||
|
||||
function log_rule_error(r: Rule, msg: string, p: PluginState)
|
||||
{
|
||||
local info: Info = [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)];
|
||||
rule_to_info(info, r);
|
||||
Log::write(LOG, info);
|
||||
}
|
||||
|
||||
function log_rule_no_plugin(r: Rule, state: InfoState, msg: string)
|
||||
{
|
||||
local info: Info = [$ts=network_time()];
|
||||
info$category = RULE;
|
||||
info$state = state;
|
||||
info$msg = msg;
|
||||
|
||||
rule_to_info(info, r);
|
||||
|
||||
Log::write(LOG, info);
|
||||
}
|
||||
|
||||
function whitelist_address(a: addr, t: interval, location: string &default="") : string
|
||||
{
|
||||
local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)];
|
||||
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||
|
||||
return add_rule(r);
|
||||
}
|
||||
|
||||
function whitelist_subnet(s: subnet, t: interval, location: string &default="") : string
|
||||
{
|
||||
local e: Entity = [$ty=ADDRESS, $ip=s];
|
||||
local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location];
|
||||
|
||||
return add_rule(r);
|
||||
}
|
||||
|
||||
|
||||
function redirect_flow(f: flow_id, out_port: count, t: interval, location: string &default="") : string
|
||||
{
|
||||
local flow = NetControl::Flow(
|
||||
$src_h=addr_to_subnet(f$src_h),
|
||||
$src_p=f$src_p,
|
||||
$dst_h=addr_to_subnet(f$dst_h),
|
||||
$dst_p=f$dst_p
|
||||
);
|
||||
local e: Entity = [$ty=FLOW, $flow=flow];
|
||||
local r: Rule = [$ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port];
|
||||
|
||||
return add_rule(r);
|
||||
}
|
||||
|
||||
function quarantine_host(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string
|
||||
{
|
||||
local orules: vector of string = vector();
|
||||
local edrop: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected))];
|
||||
local rdrop: Rule = [$ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location];
|
||||
orules[|orules|] = add_rule(rdrop);
|
||||
|
||||
local todnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp)];
|
||||
local todnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=todnse, $expire=t, $location=location, $mod=FlowMod($dst_h=quarantine), $priority=+5);
|
||||
orules[|orules|] = add_rule(todnsr);
|
||||
|
||||
local fromdnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected))];
|
||||
local fromdnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=fromdnse, $expire=t, $location=location, $mod=FlowMod($src_h=dns), $priority=+5);
|
||||
orules[|orules|] = add_rule(fromdnsr);
|
||||
|
||||
local wle: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp)];
|
||||
local wlr = Rule($ty=WHITELIST, $target=FORWARD, $entity=wle, $expire=t, $location=location, $priority=+5);
|
||||
orules[|orules|] = add_rule(wlr);
|
||||
|
||||
return orules;
|
||||
}
|
||||
|
||||
function check_plugins()
|
||||
{
|
||||
if ( plugins_active )
|
||||
return;
|
||||
|
||||
local all_active = T;
|
||||
for ( i in plugins )
|
||||
{
|
||||
local p = plugins[i];
|
||||
if ( p$_activated == F )
|
||||
all_active = F;
|
||||
}
|
||||
|
||||
if ( all_active )
|
||||
{
|
||||
plugins_active = T;
|
||||
|
||||
# Skip log message if there are no plugins
|
||||
if ( |plugins| > 0 )
|
||||
log_msg_no_plugin("plugin initialization done");
|
||||
|
||||
event NetControl::init_done();
|
||||
}
|
||||
}
|
||||
|
||||
function plugin_activated(p: PluginState)
|
||||
{
|
||||
local id = p$_id;
|
||||
if ( id !in plugin_ids )
|
||||
{
|
||||
log_error("unknown plugin activated", p);
|
||||
return;
|
||||
}
|
||||
plugin_ids[id]$_activated = T;
|
||||
log_msg("activation finished", p);
|
||||
|
||||
if ( bro_init_done )
|
||||
check_plugins();
|
||||
}
|
||||
|
||||
event bro_init() &priority=-5
|
||||
{
|
||||
event NetControl::init();
|
||||
}
|
||||
|
||||
event NetControl::init() &priority=-20
|
||||
{
|
||||
bro_init_done = T;
|
||||
|
||||
check_plugins();
|
||||
|
||||
if ( plugins_active == F )
|
||||
log_msg_no_plugin("waiting for plugins to initialize");
|
||||
}
|
||||
|
||||
# Low-level functions that only runs on the manager (or standalone) Bro node.
|
||||
|
||||
function activate_impl(p: PluginState, priority: int)
|
||||
{
|
||||
p$_priority = priority;
|
||||
plugins[|plugins|] = p;
|
||||
sort(plugins, function(p1: PluginState, p2: PluginState) : int { return p2$_priority - p1$_priority; });
|
||||
|
||||
plugin_ids[plugin_counter] = p;
|
||||
p$_id = plugin_counter;
|
||||
++plugin_counter;
|
||||
|
||||
# perform one-time initialization
|
||||
if ( p$plugin?$init )
|
||||
{
|
||||
log_msg(fmt("activating plugin with priority %d", priority), p);
|
||||
p$plugin$init(p);
|
||||
}
|
||||
else
|
||||
{
|
||||
# no initialization necessary, mark plugin as active right away
|
||||
plugin_activated(p);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
function add_one_subnet_entry(s: subnet, r: Rule)
|
||||
{
|
||||
if ( ! check_subnet(s, rules_by_subnets) )
|
||||
rules_by_subnets[s] = set(r$id);
|
||||
else
|
||||
add rules_by_subnets[s][r$id];
|
||||
}
|
||||
|
||||
function add_subnet_entry(rule: Rule)
|
||||
{
|
||||
local e = rule$entity;
|
||||
if ( e$ty == ADDRESS )
|
||||
{
|
||||
add_one_subnet_entry(e$ip, rule);
|
||||
}
|
||||
else if ( e$ty == CONNECTION )
|
||||
{
|
||||
add_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule);
|
||||
add_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule);
|
||||
}
|
||||
else if ( e$ty == FLOW )
|
||||
{
|
||||
if ( e$flow?$src_h )
|
||||
add_one_subnet_entry(e$flow$src_h, rule);
|
||||
if ( e$flow?$dst_h )
|
||||
add_one_subnet_entry(e$flow$dst_h, rule);
|
||||
}
|
||||
}
|
||||
|
||||
function remove_one_subnet_entry(s: subnet, r: Rule)
|
||||
{
|
||||
if ( ! check_subnet(s, rules_by_subnets) )
|
||||
return;
|
||||
|
||||
if ( r$id !in rules_by_subnets[s] )
|
||||
return;
|
||||
|
||||
delete rules_by_subnets[s][r$id];
|
||||
if ( |rules_by_subnets[s]| == 0 )
|
||||
delete rules_by_subnets[s];
|
||||
}
|
||||
|
||||
function remove_subnet_entry(rule: Rule)
|
||||
{
|
||||
local e = rule$entity;
|
||||
if ( e$ty == ADDRESS )
|
||||
{
|
||||
remove_one_subnet_entry(e$ip, rule);
|
||||
}
|
||||
else if ( e$ty == CONNECTION )
|
||||
{
|
||||
remove_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule);
|
||||
remove_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule);
|
||||
}
|
||||
else if ( e$ty == FLOW )
|
||||
{
|
||||
if ( e$flow?$src_h )
|
||||
remove_one_subnet_entry(e$flow$src_h, rule);
|
||||
if ( e$flow?$dst_h )
|
||||
remove_one_subnet_entry(e$flow$dst_h, rule);
|
||||
}
|
||||
}
|
||||
|
||||
function find_rules_subnet(sn: subnet) : vector of Rule
|
||||
{
|
||||
local ret: vector of Rule = vector();
|
||||
|
||||
local matches = matching_subnets(sn, rules_by_subnets);
|
||||
|
||||
for ( m in matches )
|
||||
{
|
||||
local sn_entry = matches[m];
|
||||
local rule_ids = rules_by_subnets[sn_entry];
|
||||
for ( rule_id in rules_by_subnets[sn_entry] )
|
||||
{
|
||||
if ( rule_id in rules )
|
||||
ret[|ret|] = rules[rule_id];
|
||||
else
|
||||
Reporter::error("find_rules_subnet - internal data structure error, missing rule");
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
function find_rules_addr(ip: addr) : vector of Rule
|
||||
{
|
||||
return find_rules_subnet(addr_to_subnet(ip));
|
||||
}
|
||||
|
||||
function add_rule_impl(rule: Rule) : string
|
||||
{
|
||||
if ( ! plugins_active )
|
||||
{
|
||||
log_rule_no_plugin(rule, FAILED, "plugins not initialized yet");
|
||||
return "";
|
||||
}
|
||||
|
||||
rule$cid = ++rule_counter; # numeric id that can be used by plugins for their rules.
|
||||
|
||||
if ( ! rule?$id || rule$id == "" )
|
||||
rule$id = cat(rule$cid);
|
||||
|
||||
if ( ! hook NetControl::rule_policy(rule) )
|
||||
return "";
|
||||
|
||||
if ( [rule$entity, rule$ty] in rule_entities )
|
||||
{
|
||||
log_rule_no_plugin(rule, FAILED, "discarded duplicate insertion");
|
||||
return "";
|
||||
}
|
||||
|
||||
local accepted = F;
|
||||
local priority: int = +0;
|
||||
|
||||
for ( i in plugins )
|
||||
{
|
||||
local p = plugins[i];
|
||||
|
||||
if ( p$_activated == F )
|
||||
next;
|
||||
|
||||
# in this case, rule was accepted by earlier plugin and this plugin has a lower
|
||||
# priority. Abort and do not send there...
|
||||
if ( accepted == T && p$_priority != priority )
|
||||
break;
|
||||
|
||||
if ( p$plugin$add_rule(p, rule) )
|
||||
{
|
||||
accepted = T;
|
||||
priority = p$_priority;
|
||||
log_rule(rule, "ADD", REQUESTED, p);
|
||||
|
||||
add rule$_plugin_ids[p$_id];
|
||||
}
|
||||
}
|
||||
|
||||
if ( accepted )
|
||||
{
|
||||
rules[rule$id] = rule;
|
||||
rule_entities[rule$entity, rule$ty] = rule;
|
||||
|
||||
add_subnet_entry(rule);
|
||||
|
||||
return rule$id;
|
||||
}
|
||||
|
||||
log_rule_no_plugin(rule, FAILED, "not supported");
|
||||
return "";
|
||||
}
|
||||
|
||||
function remove_rule_plugin(r: Rule, p: PluginState): bool
|
||||
{
|
||||
local success = T;
|
||||
|
||||
if ( ! p$plugin$remove_rule(p, r) )
|
||||
{
|
||||
# still continue and send to other plugins
|
||||
log_rule_error(r, "remove failed", p);
|
||||
success = F;
|
||||
}
|
||||
else
|
||||
{
|
||||
log_rule(r, "REMOVE", REQUESTED, p);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
function remove_rule_impl(id: string) : bool
|
||||
{
|
||||
if ( id !in rules )
|
||||
{
|
||||
Reporter::error(fmt("Rule %s does not exist in NetControl::remove_rule", id));
|
||||
return F;
|
||||
}
|
||||
|
||||
local r = rules[id];
|
||||
|
||||
local success = T;
|
||||
for ( plugin_id in r$_active_plugin_ids )
|
||||
{
|
||||
local p = plugin_ids[plugin_id];
|
||||
success = remove_rule_plugin(r, p);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
function rule_expire_impl(r: Rule, p: PluginState) &priority=-5
|
||||
{
|
||||
# do not emit timeout events on shutdown
|
||||
if ( bro_is_terminating() )
|
||||
return;
|
||||
|
||||
if ( r$id !in rules )
|
||||
# Removed already.
|
||||
return;
|
||||
|
||||
event NetControl::rule_timeout(r, FlowInfo(), p); # timeout implementation will handle the removal
|
||||
}
|
||||
|
||||
function rule_added_impl(r: Rule, p: PluginState, msg: string &default="")
|
||||
{
|
||||
if ( r$id !in rules )
|
||||
{
|
||||
log_rule_error(r, "Addition of unknown rule", p);
|
||||
return;
|
||||
}
|
||||
|
||||
# use our version to prevent operating on copies.
|
||||
local rule = rules[r$id];
|
||||
if ( p$_id !in rule$_plugin_ids )
|
||||
{
|
||||
log_rule_error(rule, "Rule added to non-responsible plugin", p);
|
||||
return;
|
||||
}
|
||||
|
||||
log_rule(r, "ADD", SUCCEEDED, p, msg);
|
||||
|
||||
add rule$_active_plugin_ids[p$_id];
|
||||
if ( |rule$_plugin_ids| == |rule$_active_plugin_ids| )
|
||||
{
|
||||
# rule was completely added.
|
||||
rule$_added = T;
|
||||
}
|
||||
}
|
||||
|
||||
function rule_cleanup(r: Rule)
|
||||
{
|
||||
if ( |r$_active_plugin_ids| > 0 )
|
||||
return;
|
||||
|
||||
remove_subnet_entry(r);
|
||||
|
||||
delete rule_entities[r$entity, r$ty];
|
||||
delete rules[r$id];
|
||||
}
|
||||
|
||||
function rule_removed_impl(r: Rule, p: PluginState, msg: string &default="")
|
||||
{
|
||||
if ( r$id !in rules )
|
||||
{
|
||||
log_rule_error(r, "Removal of non-existing rule", p);
|
||||
return;
|
||||
}
|
||||
|
||||
# use our version to prevent operating on copies.
|
||||
local rule = rules[r$id];
|
||||
|
||||
if ( p$_id !in rule$_plugin_ids )
|
||||
{
|
||||
log_rule_error(r, "Removed from non-assigned plugin", p);
|
||||
return;
|
||||
}
|
||||
|
||||
if ( p$_id in rule$_active_plugin_ids )
|
||||
{
|
||||
delete rule$_active_plugin_ids[p$_id];
|
||||
}
|
||||
|
||||
log_rule(rule, "REMOVE", SUCCEEDED, p, msg);
|
||||
rule_cleanup(rule);
|
||||
}
|
||||
|
||||
function rule_timeout_impl(r: Rule, i: FlowInfo, p: PluginState)
|
||||
{
|
||||
if ( r$id !in rules )
|
||||
{
|
||||
log_rule_error(r, "Timeout of non-existing rule", p);
|
||||
return;
|
||||
}
|
||||
|
||||
local rule = rules[r$id];
|
||||
|
||||
local msg = "";
|
||||
if ( i?$packet_count )
|
||||
msg = fmt("Packets: %d", i$packet_count);
|
||||
if ( i?$byte_count )
|
||||
{
|
||||
if ( msg != "" )
|
||||
msg = msg + " ";
|
||||
msg = fmt("%sBytes: %s", msg, i$byte_count);
|
||||
}
|
||||
|
||||
log_rule(rule, "EXPIRE", TIMEOUT, p, msg);
|
||||
|
||||
if ( ! p$plugin$can_expire )
|
||||
{
|
||||
# in this case, we actually have to delete the rule and the timeout
|
||||
# call just originated locally
|
||||
remove_rule_plugin(rule, p);
|
||||
return;
|
||||
}
|
||||
|
||||
if ( p$_id !in rule$_plugin_ids )
|
||||
{
|
||||
log_rule_error(r, "Timeout from non-assigned plugin", p);
|
||||
return;
|
||||
}
|
||||
|
||||
if ( p$_id in rule$_active_plugin_ids )
|
||||
{
|
||||
delete rule$_active_plugin_ids[p$_id];
|
||||
}
|
||||
|
||||
rule_cleanup(rule);
|
||||
}
|
||||
|
||||
function rule_error_impl(r: Rule, p: PluginState, msg: string &default="")
|
||||
{
|
||||
if ( r$id !in rules )
|
||||
{
|
||||
log_rule_error(r, "Error of non-existing rule", p);
|
||||
return;
|
||||
}
|
||||
|
||||
local rule = rules[r$id];
|
||||
|
||||
log_rule_error(rule, msg, p);
|
||||
|
||||
# Remove the plugin both from active and all plugins of the rule. If there
|
||||
# are no plugins left afterwards - delete it
|
||||
if ( p$_id !in rule$_plugin_ids )
|
||||
{
|
||||
log_rule_error(r, "Error from non-assigned plugin", p);
|
||||
return;
|
||||
}
|
||||
|
||||
if ( p$_id in rule$_active_plugin_ids )
|
||||
{
|
||||
# error during removal. Let's pretend it worked.
|
||||
delete rule$_plugin_ids[p$_id];
|
||||
delete rule$_active_plugin_ids[p$_id];
|
||||
rule_cleanup(rule);
|
||||
}
|
||||
else
|
||||
{
|
||||
# error during insertion. Meh. If we are the only plugin, remove the rule again.
|
||||
# Otherwhise - keep it, minus us.
|
||||
delete rule$_plugin_ids[p$_id];
|
||||
if ( |rule$_plugin_ids| == 0 )
|
||||
{
|
||||
rule_cleanup(rule);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function clear()
|
||||
{
|
||||
for ( id in rules )
|
||||
remove_rule(id);
|
||||
}
|
47
scripts/base/frameworks/netcontrol/non-cluster.bro
Normal file
47
scripts/base/frameworks/netcontrol/non-cluster.bro
Normal file
|
@ -0,0 +1,47 @@
|
|||
module NetControl;
|
||||
|
||||
@load ./main
|
||||
|
||||
function activate(p: PluginState, priority: int)
|
||||
{
|
||||
activate_impl(p, priority);
|
||||
}
|
||||
|
||||
function add_rule(r: Rule) : string
|
||||
{
|
||||
return add_rule_impl(r);
|
||||
}
|
||||
|
||||
function remove_rule(id: string) : bool
|
||||
{
|
||||
return remove_rule_impl(id);
|
||||
}
|
||||
|
||||
event rule_expire(r: Rule, p: PluginState) &priority=-5
|
||||
{
|
||||
rule_expire_impl(r, p);
|
||||
}
|
||||
|
||||
event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5
|
||||
{
|
||||
rule_added_impl(r, p, msg);
|
||||
|
||||
if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire )
|
||||
schedule r$expire { rule_expire(r, p) };
|
||||
}
|
||||
|
||||
event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5
|
||||
{
|
||||
rule_removed_impl(r, p, msg);
|
||||
}
|
||||
|
||||
event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5
|
||||
{
|
||||
rule_timeout_impl(r, i, p);
|
||||
}
|
||||
|
||||
event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5
|
||||
{
|
||||
rule_error_impl(r, p, msg);
|
||||
}
|
||||
|
89
scripts/base/frameworks/netcontrol/plugin.bro
Normal file
89
scripts/base/frameworks/netcontrol/plugin.bro
Normal file
|
@ -0,0 +1,89 @@
|
|||
##! Plugin interface for NetControl backends.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ./types
|
||||
|
||||
export {
|
||||
## State for a plugin instance.
|
||||
type PluginState: record {
|
||||
## Table for a plugin to store custom, instance-specfific state.
|
||||
config: table[string] of string &default=table();
|
||||
|
||||
## Unique plugin identifier -- used for backlookup of plugins from Rules. Set internally.
|
||||
_id: count &optional;
|
||||
|
||||
## Set internally.
|
||||
_priority: int &default=+0;
|
||||
|
||||
## Set internally. Signifies if the plugin has returned that it has activated succesfully
|
||||
_activated: bool &default=F;
|
||||
};
|
||||
|
||||
# Definition of a plugin.
|
||||
#
|
||||
# Generally a plugin needs to implement only what it can support. By
|
||||
# returning failure, it indicates that it can't support something and the
|
||||
# the framework will then try another plugin, if available; or inform the
|
||||
# that the operation failed. If a function isn't implemented by a plugin,
|
||||
# that's considered an implicit failure to support the operation.
|
||||
#
|
||||
# If plugin accepts a rule operation, it *must* generate one of the reporting
|
||||
# events ``rule_{added,remove,error}`` to signal if it indeed worked out;
|
||||
# this is separate from accepting the operation because often a plugin
|
||||
# will only know later (i.e., asynchrously) if that was an error for
|
||||
# something it thought it could handle.
|
||||
type Plugin: record {
|
||||
# Returns a descriptive name of the plugin instance, suitable for use in logging
|
||||
# messages. Note that this function is not optional.
|
||||
name: function(state: PluginState) : string;
|
||||
|
||||
## If true, plugin can expire rules itself. If false,
|
||||
## framework will manage rule expiration.
|
||||
can_expire: bool;
|
||||
|
||||
# One-time initialization function called when plugin gets registered, and
|
||||
# before any other methods are called.
|
||||
#
|
||||
# If this function is provided, NetControl assumes that the plugin has to
|
||||
# perform, potentially lengthy, initialization before the plugin will become
|
||||
# active. In this case, the plugin has to call ``NetControl::plugin_activated``,
|
||||
# once initialization finishes.
|
||||
init: function(state: PluginState) &optional;
|
||||
|
||||
# One-time finalization function called when a plugin is shutdown; no further
|
||||
# functions will be called afterwords.
|
||||
done: function(state: PluginState) &optional;
|
||||
|
||||
# Implements the add_rule() operation. If the plugin accepts the rule,
|
||||
# it returns true, false otherwise. The rule will already have its
|
||||
# ``id`` field set, which the plugin may use for identification
|
||||
# purposes.
|
||||
add_rule: function(state: PluginState, r: Rule) : bool &optional;
|
||||
|
||||
# Implements the remove_rule() operation. This will only be called for
|
||||
# rules that the plugins has previously accepted with add_rule(). The
|
||||
# ``id`` field will match that of the add_rule() call. Generally,
|
||||
# a plugin that accepts an add_rule() should also accept the
|
||||
# remove_rule().
|
||||
remove_rule: function(state: PluginState, r: Rule) : bool &optional;
|
||||
|
||||
# A transaction groups a number of operations. The plugin can add them internally
|
||||
# and postpone putting them into effect until committed. This allows to build a
|
||||
# configuration of multiple rules at once, including replaying a previous state.
|
||||
transaction_begin: function(state: PluginState) &optional;
|
||||
transaction_end: function(state: PluginState) &optional;
|
||||
};
|
||||
|
||||
# Table for a plugin to store instance-specific configuration information.
|
||||
#
|
||||
# Note, it would be nicer to pass the Plugin instance to all the below, instead
|
||||
# of this state table. However Bro's type resolver has trouble with refering to a
|
||||
# record type from inside itself.
|
||||
redef record PluginState += {
|
||||
## The plugin that the state belongs to. (Defined separately
|
||||
## because of cyclic type dependency.)
|
||||
plugin: Plugin &optional;
|
||||
};
|
||||
|
||||
}
|
5
scripts/base/frameworks/netcontrol/plugins/__load__.bro
Normal file
5
scripts/base/frameworks/netcontrol/plugins/__load__.bro
Normal file
|
@ -0,0 +1,5 @@
|
|||
@load ./debug
|
||||
@load ./openflow
|
||||
@load ./packetfilter
|
||||
@load ./broker
|
||||
@load ./acld
|
294
scripts/base/frameworks/netcontrol/plugins/acld.bro
Normal file
294
scripts/base/frameworks/netcontrol/plugins/acld.bro
Normal file
|
@ -0,0 +1,294 @@
|
|||
##! Acld plugin for the netcontrol framework.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ../main
|
||||
@load ../plugin
|
||||
@load base/frameworks/broker
|
||||
|
||||
export {
|
||||
type AclRule : record {
|
||||
command: string;
|
||||
cookie: count;
|
||||
arg: string;
|
||||
comment: string &optional;
|
||||
};
|
||||
|
||||
type AcldConfig: record {
|
||||
## The acld topic used to send events to
|
||||
acld_topic: string;
|
||||
## Broker host to connect to
|
||||
acld_host: addr;
|
||||
## Broker port to connect to
|
||||
acld_port: port;
|
||||
## Do we accept rules for the monitor path? Default false
|
||||
monitor: bool &default=F;
|
||||
## Do we accept rules for the forward path? Default true
|
||||
forward: bool &default=T;
|
||||
|
||||
## Predicate that is called on rule insertion or removal.
|
||||
##
|
||||
## p: Current plugin state
|
||||
##
|
||||
## r: The rule to be inserted or removed
|
||||
##
|
||||
## Returns: T if the rule can be handled by the current backend, F otherwhise
|
||||
check_pred: function(p: PluginState, r: Rule): bool &optional;
|
||||
};
|
||||
|
||||
## Instantiates the acld plugin.
|
||||
global create_acld: function(config: AcldConfig) : PluginState;
|
||||
|
||||
redef record PluginState += {
|
||||
acld_config: AcldConfig &optional;
|
||||
## The ID of this acld instance - for the mapping to PluginStates
|
||||
acld_id: count &optional;
|
||||
};
|
||||
|
||||
## Hook that is called after a rule is converted to an acld rule.
|
||||
## The hook may modify the rule before it is sent to acld.
|
||||
## Setting the acld command to F will cause the rule to be rejected
|
||||
## by the plugin
|
||||
##
|
||||
## p: Current plugin state
|
||||
##
|
||||
## r: The rule to be inserted or removed
|
||||
##
|
||||
## ar: The acld rule to be inserted or removed
|
||||
global NetControl::acld_rule_policy: hook(p: PluginState, r: Rule, ar: AclRule);
|
||||
|
||||
## Events that are sent from us to Broker
|
||||
global acld_add_rule: event(id: count, r: Rule, ar: AclRule);
|
||||
global acld_remove_rule: event(id: count, r: Rule, ar: AclRule);
|
||||
|
||||
## Events that are sent from Broker to us
|
||||
global acld_rule_added: event(id: count, r: Rule, msg: string);
|
||||
global acld_rule_removed: event(id: count, r: Rule, msg: string);
|
||||
global acld_rule_error: event(id: count, r: Rule, msg: string);
|
||||
}
|
||||
|
||||
global netcontrol_acld_peers: table[port, string] of PluginState;
|
||||
global netcontrol_acld_topics: set[string] = set();
|
||||
global netcontrol_acld_id: table[count] of PluginState = table();
|
||||
global netcontrol_acld_current_id: count = 0;
|
||||
|
||||
const acld_add_to_remove: table[string] of string = {
|
||||
["drop"] = "restore",
|
||||
["whitelist"] = "remwhitelist",
|
||||
["blockhosthost"] = "restorehosthost",
|
||||
["droptcpport"] = "restoretcpport",
|
||||
["dropudpport"] = "restoreudpport",
|
||||
["droptcpdsthostport"] ="restoretcpdsthostport",
|
||||
["dropudpdsthostport"] ="restoreudpdsthostport",
|
||||
["permittcpdsthostport"] ="unpermittcpdsthostport",
|
||||
["permitudpdsthostport"] ="unpermitudpdsthostport",
|
||||
["nullzero"] ="nonullzero"
|
||||
};
|
||||
|
||||
event NetControl::acld_rule_added(id: count, r: Rule, msg: string)
|
||||
{
|
||||
if ( id !in netcontrol_acld_id )
|
||||
{
|
||||
Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id));
|
||||
return;
|
||||
}
|
||||
|
||||
local p = netcontrol_acld_id[id];
|
||||
|
||||
event NetControl::rule_added(r, p, msg);
|
||||
}
|
||||
|
||||
event NetControl::acld_rule_removed(id: count, r: Rule, msg: string)
|
||||
{
|
||||
if ( id !in netcontrol_acld_id )
|
||||
{
|
||||
Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id));
|
||||
return;
|
||||
}
|
||||
|
||||
local p = netcontrol_acld_id[id];
|
||||
|
||||
event NetControl::rule_removed(r, p, msg);
|
||||
}
|
||||
|
||||
event NetControl::acld_rule_error(id: count, r: Rule, msg: string)
|
||||
{
|
||||
if ( id !in netcontrol_acld_id )
|
||||
{
|
||||
Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id));
|
||||
return;
|
||||
}
|
||||
|
||||
local p = netcontrol_acld_id[id];
|
||||
|
||||
event NetControl::rule_error(r, p, msg);
|
||||
}
|
||||
|
||||
function acld_name(p: PluginState) : string
|
||||
{
|
||||
return fmt("Acld-%s", p$acld_config$acld_topic);
|
||||
}
|
||||
|
||||
# check that subnet specifies an addr
|
||||
function check_sn(sn: subnet) : bool
|
||||
{
|
||||
if ( is_v4_subnet(sn) && subnet_width(sn) == 32 )
|
||||
return T;
|
||||
if ( is_v6_subnet(sn) && subnet_width(sn) == 128 )
|
||||
return T;
|
||||
|
||||
Reporter::error(fmt("Acld: rule_to_acl_rule was given a subnet that does not specify a distinct address where needed - %s", sn));
|
||||
return F;
|
||||
}
|
||||
|
||||
function rule_to_acl_rule(p: PluginState, r: Rule) : AclRule
|
||||
{
|
||||
local e = r$entity;
|
||||
|
||||
local command: string = "";
|
||||
local arg: string = "";
|
||||
|
||||
if ( e$ty == ADDRESS )
|
||||
{
|
||||
if ( r$ty == DROP )
|
||||
command = "drop";
|
||||
else if ( r$ty == WHITELIST )
|
||||
command = "whitelist";
|
||||
arg = cat(e$ip);
|
||||
}
|
||||
else if ( e$ty == FLOW )
|
||||
{
|
||||
local f = e$flow;
|
||||
if ( ( ! f?$src_h ) && ( ! f?$src_p ) && f?$dst_h && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) )
|
||||
{
|
||||
if ( !check_sn(f$dst_h) )
|
||||
command = ""; # invalid addr, do nothing
|
||||
else if ( is_tcp_port(f$dst_p) && r$ty == DROP )
|
||||
command = "droptcpdsthostport";
|
||||
else if ( is_tcp_port(f$dst_p) && r$ty == WHITELIST )
|
||||
command = "permittcpdsthostport";
|
||||
else if ( is_udp_port(f$dst_p) && r$ty == DROP)
|
||||
command = "dropucpdsthostport";
|
||||
else if ( is_udp_port(f$dst_p) && r$ty == WHITELIST)
|
||||
command = "permitucpdsthostport";
|
||||
|
||||
arg = fmt("%s %d", subnet_to_addr(f$dst_h), f$dst_p);
|
||||
}
|
||||
else if ( f?$src_h && ( ! f?$src_p ) && f?$dst_h && ( ! f?$dst_p ) && ( ! f?$src_m ) && ( ! f?$dst_m ) )
|
||||
{
|
||||
if ( !check_sn(f$src_h) || !check_sn(f$dst_h) )
|
||||
command = "";
|
||||
else if ( r$ty == DROP )
|
||||
command = "blockhosthost";
|
||||
arg = fmt("%s %s", subnet_to_addr(f$src_h), subnet_to_addr(f$dst_h));
|
||||
}
|
||||
else if ( ( ! f?$src_h ) && ( ! f?$src_p ) && ( ! f?$dst_h ) && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) )
|
||||
{
|
||||
if ( is_tcp_port(f$dst_p) && r$ty == DROP )
|
||||
command = "droptcpport";
|
||||
else if ( is_udp_port(f$dst_p) && r$ty == DROP )
|
||||
command = "dropudpport";
|
||||
arg = fmt("%d", f$dst_p);
|
||||
}
|
||||
}
|
||||
|
||||
local ar = AclRule($command=command, $cookie=r$cid, $arg=arg);
|
||||
if ( r?$location )
|
||||
ar$comment = r$location;
|
||||
|
||||
hook NetControl::acld_rule_policy(p, r, ar);
|
||||
|
||||
return ar;
|
||||
}
|
||||
|
||||
function acld_check_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
local c = p$acld_config;
|
||||
|
||||
if ( p$acld_config?$check_pred )
|
||||
return p$acld_config$check_pred(p, r);
|
||||
|
||||
if ( r$target == MONITOR && c$monitor )
|
||||
return T;
|
||||
|
||||
if ( r$target == FORWARD && c$forward )
|
||||
return T;
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function acld_add_rule_fun(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
if ( ! acld_check_rule(p, r) )
|
||||
return F;
|
||||
|
||||
local ar = rule_to_acl_rule(p, r);
|
||||
|
||||
if ( ar$command == "" )
|
||||
return F;
|
||||
|
||||
BrokerComm::event(p$acld_config$acld_topic, BrokerComm::event_args(acld_add_rule, p$acld_id, r, ar));
|
||||
return T;
|
||||
}
|
||||
|
||||
function acld_remove_rule_fun(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
if ( ! acld_check_rule(p, r) )
|
||||
return F;
|
||||
|
||||
local ar = rule_to_acl_rule(p, r);
|
||||
if ( ar$command in acld_add_to_remove )
|
||||
ar$command = acld_add_to_remove[ar$command];
|
||||
else
|
||||
return F;
|
||||
|
||||
BrokerComm::event(p$acld_config$acld_topic, BrokerComm::event_args(acld_remove_rule, p$acld_id, r, ar));
|
||||
return T;
|
||||
}
|
||||
|
||||
function acld_init(p: PluginState)
|
||||
{
|
||||
BrokerComm::enable();
|
||||
BrokerComm::connect(cat(p$acld_config$acld_host), p$acld_config$acld_port, 1sec);
|
||||
BrokerComm::subscribe_to_events(p$acld_config$acld_topic);
|
||||
}
|
||||
|
||||
event BrokerComm::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
|
||||
{
|
||||
if ( [peer_port, peer_address] !in netcontrol_acld_peers )
|
||||
# ok, this one was none of ours...
|
||||
return;
|
||||
|
||||
local p = netcontrol_acld_peers[peer_port, peer_address];
|
||||
plugin_activated(p);
|
||||
}
|
||||
|
||||
global acld_plugin = Plugin(
|
||||
$name=acld_name,
|
||||
$can_expire = F,
|
||||
$add_rule = acld_add_rule_fun,
|
||||
$remove_rule = acld_remove_rule_fun,
|
||||
$init = acld_init
|
||||
);
|
||||
|
||||
function create_acld(config: AcldConfig) : PluginState
|
||||
{
|
||||
if ( config$acld_topic in netcontrol_acld_topics )
|
||||
Reporter::warning(fmt("Topic %s was added to NetControl acld plugin twice. Possible duplication of commands", config$acld_topic));
|
||||
else
|
||||
add netcontrol_acld_topics[config$acld_topic];
|
||||
|
||||
local host = cat(config$acld_host);
|
||||
local p: PluginState = [$acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id];
|
||||
|
||||
if ( [config$acld_port, host] in netcontrol_acld_peers )
|
||||
Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, config$acld_port));
|
||||
else
|
||||
netcontrol_acld_peers[config$acld_port, host] = p;
|
||||
|
||||
netcontrol_acld_id[netcontrol_acld_current_id] = p;
|
||||
++netcontrol_acld_current_id;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
163
scripts/base/frameworks/netcontrol/plugins/broker.bro
Normal file
163
scripts/base/frameworks/netcontrol/plugins/broker.bro
Normal file
|
@ -0,0 +1,163 @@
|
|||
##! Broker plugin for the netcontrol framework. Sends the raw data structures
|
||||
##! used in NetControl on to Broker to allow for easy handling, e.g., of
|
||||
##! command-line scripts.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ../main
|
||||
@load ../plugin
|
||||
@load base/frameworks/broker
|
||||
|
||||
export {
|
||||
## Instantiates the broker plugin.
|
||||
global create_broker: function(host: addr, host_port: port, topic: string, can_expire: bool &default=F) : PluginState;
|
||||
|
||||
redef record PluginState += {
|
||||
## The broker topic used to send events to
|
||||
broker_topic: string &optional;
|
||||
## The ID of this broker instance - for the mapping to PluginStates
|
||||
broker_id: count &optional;
|
||||
## Broker host to connect to
|
||||
broker_host: addr &optional;
|
||||
## Broker port to connect to
|
||||
broker_port: port &optional;
|
||||
};
|
||||
|
||||
global broker_add_rule: event(id: count, r: Rule);
|
||||
global broker_remove_rule: event(id: count, r: Rule);
|
||||
|
||||
global broker_rule_added: event(id: count, r: Rule, msg: string);
|
||||
global broker_rule_removed: event(id: count, r: Rule, msg: string);
|
||||
global broker_rule_error: event(id: count, r: Rule, msg: string);
|
||||
global broker_rule_timeout: event(id: count, r: Rule, i: FlowInfo);
|
||||
}
|
||||
|
||||
global netcontrol_broker_peers: table[port, string] of PluginState;
|
||||
global netcontrol_broker_topics: set[string] = set();
|
||||
global netcontrol_broker_id: table[count] of PluginState = table();
|
||||
global netcontrol_broker_current_id: count = 0;
|
||||
|
||||
event NetControl::broker_rule_added(id: count, r: Rule, msg: string)
|
||||
{
|
||||
if ( id !in netcontrol_broker_id )
|
||||
{
|
||||
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
|
||||
return;
|
||||
}
|
||||
|
||||
local p = netcontrol_broker_id[id];
|
||||
|
||||
event NetControl::rule_added(r, p, msg);
|
||||
}
|
||||
|
||||
event NetControl::broker_rule_removed(id: count, r: Rule, msg: string)
|
||||
{
|
||||
if ( id !in netcontrol_broker_id )
|
||||
{
|
||||
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
|
||||
return;
|
||||
}
|
||||
|
||||
local p = netcontrol_broker_id[id];
|
||||
|
||||
event NetControl::rule_removed(r, p, msg);
|
||||
}
|
||||
|
||||
event NetControl::broker_rule_error(id: count, r: Rule, msg: string)
|
||||
{
|
||||
if ( id !in netcontrol_broker_id )
|
||||
{
|
||||
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
|
||||
return;
|
||||
}
|
||||
|
||||
local p = netcontrol_broker_id[id];
|
||||
|
||||
event NetControl::rule_error(r, p, msg);
|
||||
}
|
||||
|
||||
event NetControl::broker_rule_timeout(id: count, r: Rule, i: FlowInfo)
|
||||
{
|
||||
if ( id !in netcontrol_broker_id )
|
||||
{
|
||||
Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id));
|
||||
return;
|
||||
}
|
||||
|
||||
local p = netcontrol_broker_id[id];
|
||||
|
||||
event NetControl::rule_timeout(r, i, p);
|
||||
}
|
||||
|
||||
function broker_name(p: PluginState) : string
|
||||
{
|
||||
return fmt("Broker-%s", p$broker_topic);
|
||||
}
|
||||
|
||||
function broker_add_rule_fun(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
BrokerComm::event(p$broker_topic, BrokerComm::event_args(broker_add_rule, p$broker_id, r));
|
||||
return T;
|
||||
}
|
||||
|
||||
function broker_remove_rule_fun(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
BrokerComm::event(p$broker_topic, BrokerComm::event_args(broker_remove_rule, p$broker_id, r));
|
||||
return T;
|
||||
}
|
||||
|
||||
function broker_init(p: PluginState)
|
||||
{
|
||||
BrokerComm::enable();
|
||||
BrokerComm::connect(cat(p$broker_host), p$broker_port, 1sec);
|
||||
BrokerComm::subscribe_to_events(p$broker_topic);
|
||||
}
|
||||
|
||||
event BrokerComm::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
|
||||
{
|
||||
if ( [peer_port, peer_address] !in netcontrol_broker_peers )
|
||||
return;
|
||||
|
||||
local p = netcontrol_broker_peers[peer_port, peer_address];
|
||||
plugin_activated(p);
|
||||
}
|
||||
|
||||
global broker_plugin = Plugin(
|
||||
$name=broker_name,
|
||||
$can_expire = F,
|
||||
$add_rule = broker_add_rule_fun,
|
||||
$remove_rule = broker_remove_rule_fun,
|
||||
$init = broker_init
|
||||
);
|
||||
|
||||
global broker_plugin_can_expire = Plugin(
|
||||
$name=broker_name,
|
||||
$can_expire = T,
|
||||
$add_rule = broker_add_rule_fun,
|
||||
$remove_rule = broker_remove_rule_fun,
|
||||
$init = broker_init
|
||||
);
|
||||
|
||||
function create_broker(host: addr, host_port: port, topic: string, can_expire: bool &default=F) : PluginState
|
||||
{
|
||||
if ( topic in netcontrol_broker_topics )
|
||||
Reporter::warning(fmt("Topic %s was added to NetControl broker plugin twice. Possible duplication of commands", topic));
|
||||
else
|
||||
add netcontrol_broker_topics[topic];
|
||||
|
||||
local plugin = broker_plugin;
|
||||
if ( can_expire )
|
||||
plugin = broker_plugin_can_expire;
|
||||
|
||||
local p: PluginState = [$broker_host=host, $broker_port=host_port, $plugin=plugin, $broker_topic=topic, $broker_id=netcontrol_broker_current_id];
|
||||
|
||||
if ( [host_port, cat(host)] in netcontrol_broker_peers )
|
||||
Reporter::warning(fmt("Peer %s:%s was added to NetControl broker plugin twice.", host, host_port));
|
||||
else
|
||||
netcontrol_broker_peers[host_port, cat(host)] = p;
|
||||
|
||||
netcontrol_broker_id[netcontrol_broker_current_id] = p;
|
||||
++netcontrol_broker_current_id;
|
||||
|
||||
return p;
|
||||
}
|
99
scripts/base/frameworks/netcontrol/plugins/debug.bro
Normal file
99
scripts/base/frameworks/netcontrol/plugins/debug.bro
Normal file
|
@ -0,0 +1,99 @@
|
|||
##! Debugging plugin for the NetControl framework, providing insight into
|
||||
##! executed operations.
|
||||
|
||||
@load ../plugin
|
||||
@load ../main
|
||||
|
||||
module NetControl;
|
||||
|
||||
export {
|
||||
## Instantiates a debug plugin for the NetControl framework. The debug
|
||||
## plugin simply logs the operations it receives.
|
||||
##
|
||||
## do_something: If true, the plugin will claim it supports all operations; if
|
||||
## false, it will indicate it doesn't support any.
|
||||
global create_debug: function(do_something: bool) : PluginState;
|
||||
}
|
||||
|
||||
function do_something(p: PluginState) : bool
|
||||
{
|
||||
return p$config["all"] == "1";
|
||||
}
|
||||
|
||||
function debug_name(p: PluginState) : string
|
||||
{
|
||||
return fmt("Debug-%s", (do_something(p) ? "All" : "None"));
|
||||
}
|
||||
|
||||
function debug_log(p: PluginState, msg: string)
|
||||
{
|
||||
print fmt("netcontrol debug (%s): %s", debug_name(p), msg);
|
||||
}
|
||||
|
||||
function debug_init(p: PluginState)
|
||||
{
|
||||
debug_log(p, "init");
|
||||
plugin_activated(p);
|
||||
}
|
||||
|
||||
function debug_done(p: PluginState)
|
||||
{
|
||||
debug_log(p, "init");
|
||||
}
|
||||
|
||||
function debug_add_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
local s = fmt("add_rule: %s", r);
|
||||
debug_log(p, s);
|
||||
|
||||
if ( do_something(p) )
|
||||
{
|
||||
event NetControl::rule_added(r, p);
|
||||
return T;
|
||||
}
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function debug_remove_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
local s = fmt("remove_rule: %s", r);
|
||||
debug_log(p, s);
|
||||
|
||||
event NetControl::rule_removed(r, p);
|
||||
return T;
|
||||
}
|
||||
|
||||
function debug_transaction_begin(p: PluginState)
|
||||
{
|
||||
debug_log(p, "transaction_begin");
|
||||
}
|
||||
|
||||
function debug_transaction_end(p: PluginState)
|
||||
{
|
||||
debug_log(p, "transaction_end");
|
||||
}
|
||||
|
||||
global debug_plugin = Plugin(
|
||||
$name=debug_name,
|
||||
$can_expire = F,
|
||||
$init = debug_init,
|
||||
$done = debug_done,
|
||||
$add_rule = debug_add_rule,
|
||||
$remove_rule = debug_remove_rule,
|
||||
$transaction_begin = debug_transaction_begin,
|
||||
$transaction_end = debug_transaction_end
|
||||
);
|
||||
|
||||
function create_debug(do_something: bool) : PluginState
|
||||
{
|
||||
local p: PluginState = [$plugin=debug_plugin];
|
||||
|
||||
# FIXME: Why's the default not working?
|
||||
p$config = table();
|
||||
p$config["all"] = (do_something ? "1" : "0");
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
|
432
scripts/base/frameworks/netcontrol/plugins/openflow.bro
Normal file
432
scripts/base/frameworks/netcontrol/plugins/openflow.bro
Normal file
|
@ -0,0 +1,432 @@
|
|||
##! OpenFlow plugin for the NetControl framework.
|
||||
|
||||
@load ../main
|
||||
@load ../plugin
|
||||
@load base/frameworks/openflow
|
||||
|
||||
module NetControl;
|
||||
|
||||
export {
|
||||
type OfConfig: record {
|
||||
monitor: bool &default=T;
|
||||
forward: bool &default=T;
|
||||
idle_timeout: count &default=0;
|
||||
table_id: count &optional;
|
||||
priority_offset: int &default=+0; ##< add this to all rule priorities. Can be useful if you want the openflow priorities be offset from the netcontrol priorities without having to write a filter function.
|
||||
|
||||
## Predicate that is called on rule insertion or removal.
|
||||
##
|
||||
## p: Current plugin state
|
||||
##
|
||||
## r: The rule to be inserted or removed
|
||||
##
|
||||
## Returns: T if the rule can be handled by the current backend, F otherwhise
|
||||
check_pred: function(p: PluginState, r: Rule): bool &optional;
|
||||
match_pred: function(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match): vector of OpenFlow::ofp_match &optional;
|
||||
flow_mod_pred: function(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod &optional;
|
||||
};
|
||||
|
||||
redef record PluginState += {
|
||||
## OpenFlow controller for NetControl OpenFlow plugin
|
||||
of_controller: OpenFlow::Controller &optional;
|
||||
## OpenFlow configuration record that is passed on initialization
|
||||
of_config: OfConfig &optional;
|
||||
};
|
||||
|
||||
type OfTable: record {
|
||||
p: PluginState;
|
||||
r: Rule;
|
||||
c: count &default=0; # how many replies did we see so far? needed for ids where we have multiple rules...
|
||||
packet_count: count &default=0;
|
||||
byte_count: count &default=0;
|
||||
duration_sec: double &default=0.0;
|
||||
};
|
||||
|
||||
## the time interval after which an openflow message is considered to be timed out
|
||||
## and we delete it from our internal tracking.
|
||||
const openflow_message_timeout = 20secs &redef;
|
||||
|
||||
## the time interval after we consider a flow timed out. This should be fairly high (or
|
||||
## even disabled) if you expect a lot of long flows. However, one also will have state
|
||||
## buildup for quite a while if keeping this around...
|
||||
const openflow_flow_timeout = 24hrs &redef;
|
||||
|
||||
## Instantiates an openflow plugin for the NetControl framework.
|
||||
global create_openflow: function(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState;
|
||||
}
|
||||
|
||||
global of_messages: table[count, OpenFlow::ofp_flow_mod_command] of OfTable &create_expire=openflow_message_timeout
|
||||
&expire_func=function(t: table[count, OpenFlow::ofp_flow_mod_command] of OfTable, idx: any): interval
|
||||
{
|
||||
local rid: count;
|
||||
local command: OpenFlow::ofp_flow_mod_command;
|
||||
[rid, command] = idx;
|
||||
|
||||
local p = t[rid, command]$p;
|
||||
local r = t[rid, command]$r;
|
||||
event NetControl::rule_error(r, p, "Timeout during rule insertion/removal");
|
||||
return 0secs;
|
||||
};
|
||||
|
||||
global of_flows: table[count] of OfTable &create_expire=openflow_flow_timeout;
|
||||
global of_instances: table[string] of PluginState;
|
||||
|
||||
function openflow_name(p: PluginState) : string
|
||||
{
|
||||
return fmt("Openflow-%s", p$of_controller$describe(p$of_controller$state));
|
||||
}
|
||||
|
||||
function openflow_check_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
local c = p$of_config;
|
||||
|
||||
if ( p$of_config?$check_pred )
|
||||
return p$of_config$check_pred(p, r);
|
||||
|
||||
if ( r$target == MONITOR && c$monitor )
|
||||
return T;
|
||||
|
||||
if ( r$target == FORWARD && c$forward )
|
||||
return T;
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function openflow_match_pred(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match) : vector of OpenFlow::ofp_match
|
||||
{
|
||||
if ( p$of_config?$match_pred )
|
||||
return p$of_config$match_pred(p, e, m);
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
function openflow_flow_mod_pred(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod
|
||||
{
|
||||
if ( p$of_config?$flow_mod_pred )
|
||||
return p$of_config$flow_mod_pred(p, r, m);
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
function determine_dl_type(s: subnet): count
|
||||
{
|
||||
local pdl = OpenFlow::ETH_IPv4;
|
||||
if ( is_v6_subnet(s) )
|
||||
pdl = OpenFlow::ETH_IPv6;
|
||||
|
||||
return pdl;
|
||||
}
|
||||
|
||||
function determine_proto(p: port): count
|
||||
{
|
||||
local proto = OpenFlow::IP_TCP;
|
||||
if ( is_udp_port(p) )
|
||||
proto = OpenFlow::IP_UDP;
|
||||
else if ( is_icmp_port(p) )
|
||||
proto = OpenFlow::IP_ICMP;
|
||||
|
||||
return proto;
|
||||
}
|
||||
|
||||
function entity_to_match(p: PluginState, e: Entity): vector of OpenFlow::ofp_match
|
||||
{
|
||||
local v : vector of OpenFlow::ofp_match = vector();
|
||||
|
||||
if ( e$ty == CONNECTION )
|
||||
{
|
||||
v[|v|] = OpenFlow::match_conn(e$conn); # forward and...
|
||||
v[|v|] = OpenFlow::match_conn(e$conn, T); # reverse
|
||||
return openflow_match_pred(p, e, v);
|
||||
}
|
||||
|
||||
if ( e$ty == MAC )
|
||||
{
|
||||
v[|v|] = OpenFlow::ofp_match(
|
||||
$dl_src=e$mac
|
||||
);
|
||||
v[|v|] = OpenFlow::ofp_match(
|
||||
$dl_dst=e$mac
|
||||
);
|
||||
|
||||
return openflow_match_pred(p, e, v);
|
||||
}
|
||||
|
||||
local dl_type = OpenFlow::ETH_IPv4;
|
||||
|
||||
if ( e$ty == ADDRESS )
|
||||
{
|
||||
if ( is_v6_subnet(e$ip) )
|
||||
dl_type = OpenFlow::ETH_IPv6;
|
||||
|
||||
v[|v|] = OpenFlow::ofp_match(
|
||||
$dl_type=dl_type,
|
||||
$nw_src=e$ip
|
||||
);
|
||||
|
||||
v[|v|] = OpenFlow::ofp_match(
|
||||
$dl_type=dl_type,
|
||||
$nw_dst=e$ip
|
||||
);
|
||||
|
||||
return openflow_match_pred(p, e, v);
|
||||
}
|
||||
|
||||
local proto = OpenFlow::IP_TCP;
|
||||
|
||||
if ( e$ty == FLOW )
|
||||
{
|
||||
local m = OpenFlow::ofp_match();
|
||||
local f = e$flow;
|
||||
|
||||
if ( f?$src_m )
|
||||
m$dl_src=f$src_m;
|
||||
if ( f?$dst_m )
|
||||
m$dl_dst=f$dst_m;
|
||||
|
||||
if ( f?$src_h )
|
||||
{
|
||||
m$dl_type = determine_dl_type(f$src_h);
|
||||
m$nw_src = f$src_h;
|
||||
}
|
||||
|
||||
if ( f?$dst_h )
|
||||
{
|
||||
m$dl_type = determine_dl_type(f$dst_h);
|
||||
m$nw_dst = f$dst_h;
|
||||
}
|
||||
|
||||
if ( f?$src_p )
|
||||
{
|
||||
m$nw_proto = determine_proto(f$src_p);
|
||||
m$tp_src = port_to_count(f$src_p);
|
||||
}
|
||||
|
||||
if ( f?$dst_p )
|
||||
{
|
||||
m$nw_proto = determine_proto(f$dst_p);
|
||||
m$tp_dst = port_to_count(f$dst_p);
|
||||
}
|
||||
|
||||
v[|v|] = m;
|
||||
|
||||
return openflow_match_pred(p, e, v);
|
||||
}
|
||||
|
||||
Reporter::error(fmt("Entity type %s not supported for openflow yet", cat(e$ty)));
|
||||
return openflow_match_pred(p, e, v);
|
||||
}
|
||||
|
||||
function openflow_rule_to_flow_mod(p: PluginState, r: Rule) : OpenFlow::ofp_flow_mod
|
||||
{
|
||||
local c = p$of_config;
|
||||
|
||||
local flow_mod = OpenFlow::ofp_flow_mod(
|
||||
$cookie=OpenFlow::generate_cookie(r$cid*2), # leave one space for the cases in which we need two rules.
|
||||
$command=OpenFlow::OFPFC_ADD,
|
||||
$idle_timeout=c$idle_timeout,
|
||||
$priority=int_to_count(r$priority + c$priority_offset),
|
||||
$flags=OpenFlow::OFPFF_SEND_FLOW_REM # please notify us when flows are removed
|
||||
);
|
||||
|
||||
if ( r?$expire )
|
||||
flow_mod$hard_timeout = double_to_count(interval_to_double(r$expire));
|
||||
if ( c?$table_id )
|
||||
flow_mod$table_id = c$table_id;
|
||||
|
||||
if ( r$ty == DROP )
|
||||
{
|
||||
# default, nothing to do. We simply do not add an output port to the rule...
|
||||
}
|
||||
else if ( r$ty == WHITELIST )
|
||||
{
|
||||
# at the moment our interpretation of whitelist is to hand this off to the switches L2/L3 routing.
|
||||
flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL);
|
||||
}
|
||||
else if ( r$ty == MODIFY )
|
||||
{
|
||||
# if no ports are given, just assume normal pipeline...
|
||||
flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL);
|
||||
|
||||
local mod = r$mod;
|
||||
if ( mod?$redirect_port )
|
||||
flow_mod$actions$out_ports = vector(mod$redirect_port);
|
||||
|
||||
if ( mod?$src_h )
|
||||
flow_mod$actions$nw_src = mod$src_h;
|
||||
if ( mod?$dst_h )
|
||||
flow_mod$actions$nw_dst = mod$dst_h;
|
||||
if ( mod?$src_m )
|
||||
flow_mod$actions$dl_src = mod$src_m;
|
||||
if ( mod?$dst_m )
|
||||
flow_mod$actions$dl_dst = mod$dst_m;
|
||||
if ( mod?$src_p )
|
||||
flow_mod$actions$tp_src = mod$src_p;
|
||||
if ( mod?$dst_p )
|
||||
flow_mod$actions$tp_dst = mod$dst_p;
|
||||
}
|
||||
else if ( r$ty == REDIRECT )
|
||||
{
|
||||
# redirect to port c
|
||||
flow_mod$actions$out_ports = vector(r$out_port);
|
||||
}
|
||||
else
|
||||
{
|
||||
Reporter::error(fmt("Rule type %s not supported for openflow yet", cat(r$ty)));
|
||||
}
|
||||
|
||||
return openflow_flow_mod_pred(p, r, flow_mod);
|
||||
}
|
||||
|
||||
function openflow_add_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
if ( ! openflow_check_rule(p, r) )
|
||||
return F;
|
||||
|
||||
local flow_mod = openflow_rule_to_flow_mod(p, r);
|
||||
local matches = entity_to_match(p, r$entity);
|
||||
|
||||
for ( i in matches )
|
||||
{
|
||||
if ( OpenFlow::flow_mod(p$of_controller, matches[i], flow_mod) )
|
||||
{
|
||||
of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r);
|
||||
flow_mod = copy(flow_mod);
|
||||
++flow_mod$cookie;
|
||||
}
|
||||
else
|
||||
event rule_error(r, p, "Error while executing OpenFlow::flow_mod");
|
||||
}
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function openflow_remove_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
if ( ! openflow_check_rule(p, r) )
|
||||
return F;
|
||||
|
||||
local flow_mod: OpenFlow::ofp_flow_mod = [
|
||||
$cookie=OpenFlow::generate_cookie(r$cid*2),
|
||||
$command=OpenFlow::OFPFC_DELETE
|
||||
];
|
||||
|
||||
if ( OpenFlow::flow_mod(p$of_controller, [], flow_mod) )
|
||||
of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r);
|
||||
else
|
||||
{
|
||||
event rule_error(r, p, "Error while executing OpenFlow::flow_mod");
|
||||
return F;
|
||||
}
|
||||
|
||||
# if this was an address or mac match, we also need to remove the reverse
|
||||
if ( r$entity$ty == ADDRESS || r$entity$ty == MAC )
|
||||
{
|
||||
local flow_mod_2 = copy(flow_mod);
|
||||
++flow_mod_2$cookie;
|
||||
OpenFlow::flow_mod(p$of_controller, [], flow_mod_2);
|
||||
}
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3
|
||||
{
|
||||
local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2;
|
||||
if ( [id, flow_mod$command] !in of_messages )
|
||||
return;
|
||||
|
||||
local r = of_messages[id,flow_mod$command]$r;
|
||||
local p = of_messages[id,flow_mod$command]$p;
|
||||
local c = of_messages[id,flow_mod$command]$c;
|
||||
|
||||
if ( r$entity$ty == ADDRESS || r$entity$ty == MAC )
|
||||
{
|
||||
++of_messages[id,flow_mod$command]$c;
|
||||
if ( of_messages[id,flow_mod$command]$c < 2 )
|
||||
return; # will do stuff once the second part arrives...
|
||||
}
|
||||
|
||||
delete of_messages[id,flow_mod$command];
|
||||
|
||||
if ( p$of_controller$supports_flow_removed )
|
||||
of_flows[id] = OfTable($p=p, $r=r);
|
||||
|
||||
if ( flow_mod$command == OpenFlow::OFPFC_ADD )
|
||||
event NetControl::rule_added(r, p, msg);
|
||||
else if ( flow_mod$command == OpenFlow::OFPFC_DELETE || flow_mod$command == OpenFlow::OFPFC_DELETE_STRICT )
|
||||
event NetControl::rule_removed(r, p, msg);
|
||||
}
|
||||
|
||||
event OpenFlow::flow_mod_failure(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3
|
||||
{
|
||||
local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2;
|
||||
if ( [id, flow_mod$command] !in of_messages )
|
||||
return;
|
||||
|
||||
local r = of_messages[id,flow_mod$command]$r;
|
||||
local p = of_messages[id,flow_mod$command]$p;
|
||||
delete of_messages[id,flow_mod$command];
|
||||
|
||||
event NetControl::rule_error(r, p, msg);
|
||||
}
|
||||
|
||||
event OpenFlow::flow_removed(name: string, match: OpenFlow::ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count)
|
||||
{
|
||||
local id = OpenFlow::get_cookie_uid(cookie)/2;
|
||||
if ( id !in of_flows )
|
||||
return;
|
||||
|
||||
local rec = of_flows[id];
|
||||
local r = rec$r;
|
||||
local p = rec$p;
|
||||
|
||||
if ( r$entity$ty == ADDRESS || r$entity$ty == MAC )
|
||||
{
|
||||
++of_flows[id]$c;
|
||||
if ( of_flows[id]$c < 2 )
|
||||
return; # will do stuff once the second part arrives...
|
||||
else
|
||||
event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval((rec$duration_sec+duration_sec)/2), $packet_count=packet_count+rec$packet_count, $byte_count=byte_count+rec$byte_count), p);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval(duration_sec+0.0), $packet_count=packet_count, $byte_count=byte_count), p);
|
||||
}
|
||||
|
||||
function openflow_init(p: PluginState)
|
||||
{
|
||||
local name = p$of_controller$state$_name;
|
||||
if ( name in of_instances )
|
||||
Reporter::error(fmt("OpenFlow instance %s added to NetControl twice.", name));
|
||||
|
||||
of_instances[name] = p;
|
||||
|
||||
# let's check, if our OpenFlow controller is already active. If not, we have to wait for it to become active.
|
||||
if ( p$of_controller$state$_activated )
|
||||
plugin_activated(p);
|
||||
}
|
||||
|
||||
event OpenFlow::controller_activated(name: string, controller: OpenFlow::Controller)
|
||||
{
|
||||
if ( name in of_instances )
|
||||
plugin_activated(of_instances[name]);
|
||||
}
|
||||
|
||||
global openflow_plugin = Plugin(
|
||||
$name=openflow_name,
|
||||
$can_expire = T,
|
||||
$init = openflow_init,
|
||||
# $done = openflow_done,
|
||||
$add_rule = openflow_add_rule,
|
||||
$remove_rule = openflow_remove_rule
|
||||
# $transaction_begin = openflow_transaction_begin,
|
||||
# $transaction_end = openflow_transaction_end
|
||||
);
|
||||
|
||||
function create_openflow(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState
|
||||
{
|
||||
local p: PluginState = [$plugin=openflow_plugin, $of_controller=controller, $of_config=config];
|
||||
|
||||
return p;
|
||||
}
|
113
scripts/base/frameworks/netcontrol/plugins/packetfilter.bro
Normal file
113
scripts/base/frameworks/netcontrol/plugins/packetfilter.bro
Normal file
|
@ -0,0 +1,113 @@
|
|||
##! NetControl plugin for the process-level PacketFilter that comes with
|
||||
##! Bro. Since the PacketFilter in Bro is quite limited in scope
|
||||
##! and can only add/remove filters for addresses, this is quite
|
||||
##! limited in scope at the moment.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ../plugin
|
||||
|
||||
export {
|
||||
## Instantiates the packetfilter plugin.
|
||||
global create_packetfilter: function() : PluginState;
|
||||
}
|
||||
|
||||
# Check if we can handle this rule. If it specifies ports or
|
||||
# anything Bro cannot handle, simply ignore it for now.
|
||||
function packetfilter_check_rule(r: Rule) : bool
|
||||
{
|
||||
if ( r$ty != DROP )
|
||||
return F;
|
||||
|
||||
if ( r$target != MONITOR )
|
||||
return F;
|
||||
|
||||
local e = r$entity;
|
||||
if ( e$ty == ADDRESS )
|
||||
return T;
|
||||
|
||||
if ( e$ty != FLOW ) # everything else requires ports or MAC stuff
|
||||
return F;
|
||||
|
||||
if ( e$flow?$src_p || e$flow?$dst_p || e$flow?$src_m || e$flow?$dst_m )
|
||||
return F;
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
|
||||
function packetfilter_add_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
if ( ! packetfilter_check_rule(r) )
|
||||
return F;
|
||||
|
||||
local e = r$entity;
|
||||
if ( e$ty == ADDRESS )
|
||||
{
|
||||
install_src_net_filter(e$ip, 0, 1.0);
|
||||
install_dst_net_filter(e$ip, 0, 1.0);
|
||||
return T;
|
||||
}
|
||||
|
||||
if ( e$ty == FLOW )
|
||||
{
|
||||
local f = e$flow;
|
||||
if ( f?$src_h )
|
||||
install_src_net_filter(f$src_h, 0, 1.0);
|
||||
if ( f?$dst_h )
|
||||
install_dst_net_filter(f$dst_h, 0, 1.0);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function packetfilter_remove_rule(p: PluginState, r: Rule) : bool
|
||||
{
|
||||
if ( ! packetfilter_check_rule(r) )
|
||||
return F;
|
||||
|
||||
local e = r$entity;
|
||||
if ( e$ty == ADDRESS )
|
||||
{
|
||||
uninstall_src_net_filter(e$ip);
|
||||
uninstall_dst_net_filter(e$ip);
|
||||
return T;
|
||||
}
|
||||
|
||||
if ( e$ty == FLOW )
|
||||
{
|
||||
local f = e$flow;
|
||||
if ( f?$src_h )
|
||||
uninstall_src_net_filter(f$src_h);
|
||||
if ( f?$dst_h )
|
||||
uninstall_dst_net_filter(f$dst_h);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function packetfilter_name(p: PluginState) : string
|
||||
{
|
||||
return "Packetfilter";
|
||||
}
|
||||
|
||||
global packetfilter_plugin = Plugin(
|
||||
$name=packetfilter_name,
|
||||
$can_expire = F,
|
||||
# $init = packetfilter_init,
|
||||
# $done = packetfilter_done,
|
||||
$add_rule = packetfilter_add_rule,
|
||||
$remove_rule = packetfilter_remove_rule
|
||||
);
|
||||
|
||||
function create_packetfilter() : PluginState
|
||||
{
|
||||
local p: PluginState = [$plugin=packetfilter_plugin];
|
||||
|
||||
return p;
|
||||
}
|
||||
|
69
scripts/base/frameworks/netcontrol/shunt.bro
Normal file
69
scripts/base/frameworks/netcontrol/shunt.bro
Normal file
|
@ -0,0 +1,69 @@
|
|||
##! Implementation of the shunt functionality for NetControl.
|
||||
|
||||
module NetControl;
|
||||
|
||||
@load ./main
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { SHUNT };
|
||||
|
||||
## Stops forwarding a uni-directional flow's packets to Bro.
|
||||
##
|
||||
## f: The flow to shunt.
|
||||
##
|
||||
## t: How long to leave the shunt in place, with 0 being indefinitly.
|
||||
##
|
||||
## location: An optional string describing where the shunt was triggered.
|
||||
##
|
||||
## Returns: The id of the inserted rule on succes and zero on failure.
|
||||
global shunt_flow: function(f: flow_id, t: interval, location: string &default="") : string;
|
||||
|
||||
type ShuntInfo: record {
|
||||
## Time at which the recorded activity occurred.
|
||||
ts: time &log;
|
||||
## ID of the rule; unique during each Bro run
|
||||
rule_id: string &log;
|
||||
## Flow ID of the shunted flow
|
||||
f: flow_id &log;
|
||||
## Expiry time of the shunt
|
||||
expire: interval &log;
|
||||
## Location where the underlying action was triggered.
|
||||
location: string &log &optional;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the :bro:type:`NetControl::ShuntInfo`
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_netcontrol_shunt: event(rec: ShuntInfo);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(NetControl::SHUNT, [$columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt"]);
|
||||
}
|
||||
|
||||
function shunt_flow(f: flow_id, t: interval, location: string &default="") : string
|
||||
{
|
||||
local flow = NetControl::Flow(
|
||||
$src_h=addr_to_subnet(f$src_h),
|
||||
$src_p=f$src_p,
|
||||
$dst_h=addr_to_subnet(f$dst_h),
|
||||
$dst_p=f$dst_p
|
||||
);
|
||||
local e: Entity = [$ty=FLOW, $flow=flow];
|
||||
local r: Rule = [$ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location];
|
||||
|
||||
local id = add_rule(r);
|
||||
|
||||
# Error should already be logged
|
||||
if ( id == "" )
|
||||
return id;
|
||||
|
||||
local log = ShuntInfo($ts=network_time(), $rule_id=id, $f=f, $expire=t);
|
||||
if ( location != "" )
|
||||
log$location=location;
|
||||
|
||||
Log::write(SHUNT, log);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
109
scripts/base/frameworks/netcontrol/types.bro
Normal file
109
scripts/base/frameworks/netcontrol/types.bro
Normal file
|
@ -0,0 +1,109 @@
|
|||
##! Types used by the NetControl framework.
|
||||
|
||||
module NetControl;
|
||||
|
||||
export {
|
||||
const default_priority: int = +0 &redef;
|
||||
const whitelist_priority: int = +5 &redef;
|
||||
|
||||
## Type of a :bro:id:`Entity` for defining an action.
|
||||
type EntityType: enum {
|
||||
ADDRESS, ##< Activity involving a specific IP address.
|
||||
CONNECTION, ##< All of a bi-directional connection's activity.
|
||||
FLOW, ##< All of a uni-directional flow's activity. Can contain wildcards.
|
||||
MAC, ##< Activity involving a MAC address.
|
||||
};
|
||||
|
||||
## Type of a :bro:id:`Flow` for defining a flow.
|
||||
type Flow: record {
|
||||
src_h: subnet &optional; ##< The source IP address/subnet.
|
||||
src_p: port &optional; ##< The source port number.
|
||||
dst_h: subnet &optional; ##< The destination IP address/subnet.
|
||||
dst_p: port &optional; ##< The desintation port number.
|
||||
src_m: string &optional; ##< The source MAC address.
|
||||
dst_m: string &optional; ##< The destination MAC address.
|
||||
};
|
||||
|
||||
## Type defining the enity an :bro:id:`Rule` is operating on.
|
||||
type Entity: record {
|
||||
ty: EntityType; ##< Type of entity.
|
||||
conn: conn_id &optional; ##< Used with :bro:id:`CONNECTION` .
|
||||
flow: Flow &optional; ##< Used with :bro:id:`FLOW` .
|
||||
ip: subnet &optional; ##< Used with bro:id:`ADDRESS`; can specifiy a CIDR subnet.
|
||||
mac: string &optional; ##< Used with :bro:id:`MAC`.
|
||||
};
|
||||
|
||||
## Target of :bro:id:`Rule` action.
|
||||
type TargetType: enum {
|
||||
FORWARD, #< Apply rule actively to traffic on forwarding path.
|
||||
MONITOR, #< Apply rule passively to traffic sent to Bro for monitoring.
|
||||
};
|
||||
|
||||
## Type of rules that the framework supports. Each type lists the
|
||||
## :bro:id:`Rule` argument(s) it uses, if any.
|
||||
##
|
||||
## Plugins may extend this type to define their own.
|
||||
type RuleType: enum {
|
||||
## Stop forwarding all packets matching entity.
|
||||
##
|
||||
## No arguments.
|
||||
DROP,
|
||||
|
||||
## Begin modifying all packets matching entity.
|
||||
##
|
||||
## .. todo::
|
||||
## Define arguments.
|
||||
MODIFY,
|
||||
|
||||
## Begin redirecting all packets matching entity.
|
||||
##
|
||||
## .. todo::
|
||||
## c: output port to redirect traffic to.
|
||||
REDIRECT,
|
||||
|
||||
## Whitelists all packets of an entity, meaning no restrictions will be applied.
|
||||
## While whitelisting is the default if no rule matches an this can type can be
|
||||
## used to override lower-priority rules that would otherwise take effect for the
|
||||
## entity.
|
||||
WHITELIST,
|
||||
};
|
||||
|
||||
## Type of a :bro:id:`FlowMod` for defining a flow modification action.
|
||||
type FlowMod: record {
|
||||
src_h: addr &optional; ##< The source IP address.
|
||||
src_p: count &optional; ##< The source port number.
|
||||
dst_h: addr &optional; ##< The destination IP address.
|
||||
dst_p: count &optional; ##< The desintation port number.
|
||||
src_m: string &optional; ##< The source MAC address.
|
||||
dst_m: string &optional; ##< The destination MAC address.
|
||||
redirect_port: count &optional;
|
||||
};
|
||||
|
||||
## A rule for the framework to put in place. Of all rules currently in
|
||||
## place, the first match will be taken, sorted by priority. All
|
||||
## further rules will be ignored.
|
||||
type Rule: record {
|
||||
ty: RuleType; ##< Type of rule.
|
||||
target: TargetType; ##< Where to apply rule.
|
||||
entity: Entity; ##< Entity to apply rule to.
|
||||
expire: interval &optional; ##< Timeout after which to expire the rule.
|
||||
priority: int &default=default_priority; ##< Priority if multiple rules match an entity (larger value is higher priority).
|
||||
location: string &optional; ##< Optional string describing where/what installed the rule.
|
||||
|
||||
out_port: count &optional; ##< Argument for bro:id:`REDIRECT` rules.
|
||||
mod: FlowMod &optional; ##< Argument for :bro:id:`MODIFY` rules.
|
||||
|
||||
id: string &default=""; ##< Internally determined unique ID for this rule. Will be set when added.
|
||||
cid: count &default=0; ##< Internally determined unique numeric ID for this rule. Set when added.
|
||||
};
|
||||
|
||||
## Information of a flow that can be provided by switches when the flow times out.
|
||||
## Currently this is heavily influenced by the data that OpenFlow returns by default.
|
||||
## That being said - their design makes sense and this is probably the data one
|
||||
## can expect to be available.
|
||||
type FlowInfo: record {
|
||||
duration: interval &optional; ##< total duration of the rule
|
||||
packet_count: count &optional; ##< number of packets exchanged over connections matched by the rule
|
||||
byte_count: count &optional; ##< total bytes exchanged over connections matched by the rule
|
||||
};
|
||||
}
|
|
@ -19,9 +19,9 @@ export {
|
|||
## the :bro:id:`NOTICE` function. The convention is to give a general
|
||||
## category along with the specific notice separating words with
|
||||
## underscores and using leading capitals on each word except for
|
||||
## abbreviations which are kept in all capitals. For example,
|
||||
## abbreviations which are kept in all capitals. For example,
|
||||
## SSH::Password_Guessing is for hosts that have crossed a threshold of
|
||||
## heuristically determined failed SSH logins.
|
||||
## failed SSH logins.
|
||||
type Type: enum {
|
||||
## Notice reporting a count of how often a notice occurred.
|
||||
Tally,
|
||||
|
@ -349,9 +349,9 @@ function log_mailing_postprocessor(info: Log::RotationInfo): bool
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice]);
|
||||
Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice, $path="notice"]);
|
||||
|
||||
Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info]);
|
||||
Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info, $path="notice_alarm"]);
|
||||
# If Bro is configured for mailing notices, set up mailing for alarms.
|
||||
# Make sure that this alarm log is also output as text so that it can
|
||||
# be packaged up and emailed later.
|
||||
|
|
|
@ -294,7 +294,7 @@ global current_conn: connection;
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Weird::LOG, [$columns=Info, $ev=log_weird]);
|
||||
Log::create_stream(Weird::LOG, [$columns=Info, $ev=log_weird, $path="weird"]);
|
||||
}
|
||||
|
||||
function flow_id_string(src: addr, dst: addr): string
|
||||
|
|
13
scripts/base/frameworks/openflow/__load__.bro
Normal file
13
scripts/base/frameworks/openflow/__load__.bro
Normal file
|
@ -0,0 +1,13 @@
|
|||
@load ./consts
|
||||
@load ./types
|
||||
@load ./main
|
||||
@load ./plugins
|
||||
|
||||
# The cluster framework must be loaded first.
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@load ./cluster
|
||||
@else
|
||||
@load ./non-cluster
|
||||
@endif
|
120
scripts/base/frameworks/openflow/cluster.bro
Normal file
120
scripts/base/frameworks/openflow/cluster.bro
Normal file
|
@ -0,0 +1,120 @@
|
|||
##! Cluster support for the OpenFlow framework.
|
||||
|
||||
@load ./main
|
||||
@load base/frameworks/cluster
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
export {
|
||||
## This is the event used to transport flow_mod messages to the manager.
|
||||
global cluster_flow_mod: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod);
|
||||
|
||||
## This is the event used to transport flow_clear messages to the manager.
|
||||
global cluster_flow_clear: event(name: string);
|
||||
}
|
||||
|
||||
## Workers need ability to forward commands to manager.
|
||||
redef Cluster::worker2manager_events += /OpenFlow::cluster_flow_(mod|clear)/;
|
||||
|
||||
# the flow_mod function wrapper
|
||||
function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool
|
||||
{
|
||||
if ( ! controller?$flow_mod )
|
||||
return F;
|
||||
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
return controller$flow_mod(controller$state, match, flow_mod);
|
||||
else
|
||||
event OpenFlow::cluster_flow_mod(controller$state$_name, match, flow_mod);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function flow_clear(controller: Controller): bool
|
||||
{
|
||||
if ( ! controller?$flow_clear )
|
||||
return F;
|
||||
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
return controller$flow_clear(controller$state);
|
||||
else
|
||||
event OpenFlow::cluster_flow_clear(controller$state$_name);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event OpenFlow::cluster_flow_mod(name: string, match: ofp_match, flow_mod: ofp_flow_mod)
|
||||
{
|
||||
if ( name !in name_to_controller )
|
||||
{
|
||||
Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name));
|
||||
return;
|
||||
}
|
||||
|
||||
local c = name_to_controller[name];
|
||||
|
||||
if ( ! c$state$_activated )
|
||||
return;
|
||||
|
||||
if ( c?$flow_mod )
|
||||
c$flow_mod(c$state, match, flow_mod);
|
||||
}
|
||||
|
||||
event OpenFlow::cluster_flow_clear(name: string)
|
||||
{
|
||||
if ( name !in name_to_controller )
|
||||
{
|
||||
Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name));
|
||||
return;
|
||||
}
|
||||
|
||||
local c = name_to_controller[name];
|
||||
|
||||
if ( ! c$state$_activated )
|
||||
return;
|
||||
|
||||
if ( c?$flow_clear )
|
||||
c$flow_clear(c$state);
|
||||
}
|
||||
@endif
|
||||
|
||||
function register_controller(tpe: OpenFlow::Plugin, name: string, controller: Controller)
|
||||
{
|
||||
controller$state$_name = cat(tpe, name);
|
||||
controller$state$_plugin = tpe;
|
||||
|
||||
# we only run the init functions on the manager.
|
||||
if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
return;
|
||||
|
||||
register_controller_impl(tpe, name, controller);
|
||||
}
|
||||
|
||||
function unregister_controller(controller: Controller)
|
||||
{
|
||||
# we only run the on the manager.
|
||||
if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
return;
|
||||
|
||||
unregister_controller_impl(controller);
|
||||
}
|
||||
|
||||
function lookup_controller(name: string): vector of Controller
|
||||
{
|
||||
# we only run the on the manager. Otherwhise we don't have a mapping or state -> return empty
|
||||
if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
return vector();
|
||||
|
||||
# I am not quite sure if we can actually get away with this - in the
|
||||
# current state, this means that the individual nodes cannot lookup
|
||||
# a controller by name.
|
||||
#
|
||||
# This means that there can be no reactions to things on the actual
|
||||
# worker nodes - because they cannot look up a name. On the other hand -
|
||||
# currently we also do not even send the events to the worker nodes (at least
|
||||
# not if we are using broker). Because of that I am not really feeling that
|
||||
# badly about it...
|
||||
|
||||
return lookup_controller_impl(name);
|
||||
}
|
229
scripts/base/frameworks/openflow/consts.bro
Normal file
229
scripts/base/frameworks/openflow/consts.bro
Normal file
|
@ -0,0 +1,229 @@
|
|||
##! Constants used by the OpenFlow framework.
|
||||
|
||||
# All types/constants not specific to OpenFlow will be defined here
|
||||
# unitl they somehow get into Bro.
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
# Some cookie specific constants.
|
||||
# first 24 bits
|
||||
const COOKIE_BID_SIZE = 16777216;
|
||||
# start at bit 40 (1 << 40)
|
||||
const COOKIE_BID_START = 1099511627776;
|
||||
# bro specific cookie ID shall have the 42 bit set (1 << 42)
|
||||
const BRO_COOKIE_ID = 4;
|
||||
# 8 bits group identifier
|
||||
const COOKIE_GID_SIZE = 256;
|
||||
# start at bit 32 (1 << 32)
|
||||
const COOKIE_GID_START = 4294967296;
|
||||
# 32 bits unique identifier
|
||||
const COOKIE_UID_SIZE = 4294967296;
|
||||
# start at bit 0 (1 << 0)
|
||||
const COOKIE_UID_START = 0;
|
||||
|
||||
export {
|
||||
# All ethertypes can be found at
|
||||
# http://standards.ieee.org/develop/regauth/ethertype/eth.txt
|
||||
# but are not interesting for us at this point
|
||||
#type ethertype: enum {
|
||||
# Internet protocol version 4
|
||||
const ETH_IPv4 = 0x0800;
|
||||
# Address resolution protocol
|
||||
const ETH_ARP = 0x0806;
|
||||
# Wake on LAN
|
||||
const ETH_WOL = 0x0842;
|
||||
# Reverse address resolution protocol
|
||||
const ETH_RARP = 0x8035;
|
||||
# Appletalk
|
||||
const ETH_APPLETALK = 0x809B;
|
||||
# Appletalk address resolution protocol
|
||||
const ETH_APPLETALK_ARP = 0x80F3;
|
||||
# IEEE 802.1q & IEEE 802.1aq
|
||||
const ETH_VLAN = 0x8100;
|
||||
# Novell IPX old
|
||||
const ETH_IPX_OLD = 0x8137;
|
||||
# Novell IPX
|
||||
const ETH_IPX = 0x8138;
|
||||
# Internet protocol version 6
|
||||
const ETH_IPv6 = 0x86DD;
|
||||
# IEEE 802.3x
|
||||
const ETH_ETHER_FLOW_CONTROL = 0x8808;
|
||||
# Multiprotocol Label Switching unicast
|
||||
const ETH_MPLS_UNICAST = 0x8847;
|
||||
# Multiprotocol Label Switching multicast
|
||||
const ETH_MPLS_MULTICAST = 0x8848;
|
||||
# Point-to-point protocol over Ethernet discovery phase (rfc2516)
|
||||
const ETH_PPPOE_DISCOVERY = 0x8863;
|
||||
# Point-to-point protocol over Ethernet session phase (rfc2516)
|
||||
const ETH_PPPOE_SESSION = 0x8864;
|
||||
# Jumbo frames
|
||||
const ETH_JUMBO_FRAMES = 0x8870;
|
||||
# IEEE 802.1X
|
||||
const ETH_EAP_OVER_LAN = 0x888E;
|
||||
# IEEE 802.1ad & IEEE 802.1aq
|
||||
const ETH_PROVIDER_BRIDING = 0x88A8;
|
||||
# IEEE 802.1ae
|
||||
const ETH_MAC_SECURITY = 0x88E5;
|
||||
# IEEE 802.1ad (QinQ)
|
||||
const ETH_QINQ = 0x9100;
|
||||
#};
|
||||
|
||||
# A list of ip protocol numbers can be found at
|
||||
# http://en.wikipedia.org/wiki/List_of_IP_protocol_numbers
|
||||
#type iptype: enum {
|
||||
# IPv6 Hop-by-Hop Option (RFC2460)
|
||||
const IP_HOPOPT = 0x00;
|
||||
# Internet Control Message Protocol (RFC792)
|
||||
const IP_ICMP = 0x01;
|
||||
# Internet Group Management Protocol (RFC1112)
|
||||
const IP_IGMP = 0x02;
|
||||
# Gateway-to-Gateway Protocol (RFC823)
|
||||
const IP_GGP = 0x03;
|
||||
# IP-Within-IP (encapsulation) (RFC2003)
|
||||
const IP_IPIP = 0x04;
|
||||
# Internet Stream Protocol (RFC1190;RFC1819)
|
||||
const IP_ST = 0x05;
|
||||
# Tansmission Control Protocol (RFC793)
|
||||
const IP_TCP = 0x06;
|
||||
# Core-based trees (RFC2189)
|
||||
const IP_CBT = 0x07;
|
||||
# Exterior Gateway Protocol (RFC888)
|
||||
const IP_EGP = 0x08;
|
||||
# Interior Gateway Protocol (any private interior
|
||||
# gateway (used by Cisco for their IGRP))
|
||||
const IP_IGP = 0x09;
|
||||
# User Datagram Protocol (RFC768)
|
||||
const IP_UDP = 0x11;
|
||||
# Reliable Datagram Protocol (RFC908)
|
||||
const IP_RDP = 0x1B;
|
||||
# IPv6 Encapsulation (RFC2473)
|
||||
const IP_IPv6 = 0x29;
|
||||
# Resource Reservation Protocol (RFC2205)
|
||||
const IP_RSVP = 0x2E;
|
||||
# Generic Routing Encapsulation (RFC2784;RFC2890)
|
||||
const IP_GRE = 0x2F;
|
||||
# Open Shortest Path First (RFC1583)
|
||||
const IP_OSPF = 0x59;
|
||||
# Multicast Transport Protocol
|
||||
const IP_MTP = 0x5C;
|
||||
# IP-within-IP Encapsulation Protocol (RFC2003)
|
||||
### error 0x5E;
|
||||
# Ethernet-within-IP Encapsulation Protocol (RFC3378)
|
||||
const IP_ETHERIP = 0x61;
|
||||
# Layer Two Tunneling Protocol Version 3 (RFC3931)
|
||||
const IP_L2TP = 0x73;
|
||||
# Intermediate System to Intermediate System (IS-IS) Protocol over IPv4 (RFC1142;RFC1195)
|
||||
const IP_ISIS = 0x7C;
|
||||
# Fibre Channel
|
||||
const IP_FC = 0x85;
|
||||
# Multiprotocol Label Switching Encapsulated in IP (RFC4023)
|
||||
const IP_MPLS = 0x89;
|
||||
#};
|
||||
|
||||
## Return value for a cookie from a flow
|
||||
## which is not added, modified or deleted
|
||||
## from the bro openflow framework
|
||||
const INVALID_COOKIE = 0xffffffffffffffff;
|
||||
# Openflow pysical port definitions
|
||||
## Send the packet out the input port. This
|
||||
## virual port must be explicitly used in
|
||||
## order to send back out of the input port.
|
||||
const OFPP_IN_PORT = 0xfffffff8;
|
||||
## Perform actions in flow table.
|
||||
## NB: This can only be the destination port
|
||||
## for packet-out messages.
|
||||
const OFPP_TABLE = 0xfffffff9;
|
||||
## Process with normal L2/L3 switching.
|
||||
const OFPP_NORMAL = 0xfffffffa;
|
||||
## All pysical ports except input port and
|
||||
## those disabled by STP.
|
||||
const OFPP_FLOOD = 0xfffffffb;
|
||||
## All pysical ports except input port.
|
||||
const OFPP_ALL = 0xfffffffc;
|
||||
## Send to controller.
|
||||
const OFPP_CONTROLLER = 0xfffffffd;
|
||||
## Local openflow "port".
|
||||
const OFPP_LOCAL = 0xfffffffe;
|
||||
## Wildcard port used only for flow mod (delete) and flow stats requests.
|
||||
const OFPP_ANY = 0xffffffff;
|
||||
# Openflow no buffer constant.
|
||||
const OFP_NO_BUFFER = 0xffffffff;
|
||||
## Send flow removed message when flow
|
||||
## expires or is deleted.
|
||||
const OFPFF_SEND_FLOW_REM = 0x1;
|
||||
## Check for overlapping entries first.
|
||||
const OFPFF_CHECK_OVERLAP = 0x2;
|
||||
## Remark this is for emergency.
|
||||
## Flows added with this are only used
|
||||
## when the controller is disconnected.
|
||||
const OFPFF_EMERG = 0x4;
|
||||
|
||||
# Wildcard table used for table config,
|
||||
# flow stats and flow deletes.
|
||||
const OFPTT_ALL = 0xff;
|
||||
|
||||
## Openflow action_type definitions
|
||||
##
|
||||
## The openflow action type defines
|
||||
## what actions openflow can take
|
||||
## to modify a packet
|
||||
type ofp_action_type: enum {
|
||||
## Output to switch port.
|
||||
OFPAT_OUTPUT = 0x0000,
|
||||
## Set the 802.1q VLAN id.
|
||||
OFPAT_SET_VLAN_VID = 0x0001,
|
||||
## Set the 802.1q priority.
|
||||
OFPAT_SET_VLAN_PCP = 0x0002,
|
||||
## Strip the 802.1q header.
|
||||
OFPAT_STRIP_VLAN = 0x0003,
|
||||
## Ethernet source address.
|
||||
OFPAT_SET_DL_SRC = 0x0004,
|
||||
## Ethernet destination address.
|
||||
OFPAT_SET_DL_DST = 0x0005,
|
||||
## IP source address
|
||||
OFPAT_SET_NW_SRC = 0x0006,
|
||||
## IP destination address.
|
||||
OFPAT_SET_NW_DST = 0x0007,
|
||||
## IP ToS (DSCP field, 6 bits).
|
||||
OFPAT_SET_NW_TOS = 0x0008,
|
||||
## TCP/UDP source port.
|
||||
OFPAT_SET_TP_SRC = 0x0009,
|
||||
## TCP/UDP destination port.
|
||||
OFPAT_SET_TP_DST = 0x000a,
|
||||
## Output to queue.
|
||||
OFPAT_ENQUEUE = 0x000b,
|
||||
## Vendor specific
|
||||
OFPAT_VENDOR = 0xffff,
|
||||
};
|
||||
|
||||
## Openflow flow_mod_command definitions
|
||||
##
|
||||
## The openflow flow_mod_command describes
|
||||
## of what kind an action is.
|
||||
type ofp_flow_mod_command: enum {
|
||||
## New flow.
|
||||
OFPFC_ADD = 0x0,
|
||||
## Modify all matching flows.
|
||||
OFPFC_MODIFY = 0x1,
|
||||
## Modify entry strictly matching wildcards.
|
||||
OFPFC_MODIFY_STRICT = 0x2,
|
||||
## Delete all matching flows.
|
||||
OFPFC_DELETE = 0x3,
|
||||
## Strictly matching wildcards and priority.
|
||||
OFPFC_DELETE_STRICT = 0x4,
|
||||
};
|
||||
|
||||
## Openflow config flag definitions
|
||||
##
|
||||
## TODO: describe
|
||||
type ofp_config_flags: enum {
|
||||
## No special handling for fragments.
|
||||
OFPC_FRAG_NORMAL = 0,
|
||||
## Drop fragments.
|
||||
OFPC_FRAG_DROP = 1,
|
||||
## Reassemble (only if OFPC_IP_REASM set).
|
||||
OFPC_FRAG_REASM = 2,
|
||||
OFPC_FRAG_MASK = 3,
|
||||
};
|
||||
|
||||
}
|
289
scripts/base/frameworks/openflow/main.bro
Normal file
289
scripts/base/frameworks/openflow/main.bro
Normal file
|
@ -0,0 +1,289 @@
|
|||
##! Bro's OpenFlow control framework
|
||||
##!
|
||||
##! This plugin-based framework allows to control OpenFlow capable
|
||||
##! switches by implementing communication to an OpenFlow controller
|
||||
##! via plugins. The framework has to be instantiated via the new function
|
||||
##! in one of the plugins. This framework only offers very low-level
|
||||
##! functionality; if you want to use OpenFlow capable switches, e.g.,
|
||||
##! for shunting, please look at the PACF framework, which provides higher
|
||||
##! level functions and can use the OpenFlow framework as a backend.
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
@load ./consts
|
||||
@load ./types
|
||||
|
||||
export {
|
||||
## Global flow_mod function.
|
||||
##
|
||||
## controller: The controller which should execute the flow modification
|
||||
##
|
||||
## match: The ofp_match record which describes the flow to match.
|
||||
##
|
||||
## flow_mod: The openflow flow_mod record which describes the action to take.
|
||||
##
|
||||
## Returns: F on error or if the plugin does not support the operation, T when the operation was queued.
|
||||
global flow_mod: function(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool;
|
||||
|
||||
## Clear the current flow table of the controller.
|
||||
##
|
||||
## controller: The controller which should execute the flow modification
|
||||
##
|
||||
## Returns: F on error or if the plugin does not support the operation, T when the operation was queued.
|
||||
global flow_clear: function(controller: Controller): bool;
|
||||
|
||||
## Event confirming successful modification of a flow rule.
|
||||
##
|
||||
## name: The unique name of the OpenFlow controller from which this event originated.
|
||||
##
|
||||
## match: The ofp_match record which describes the flow to match.
|
||||
##
|
||||
## flow_mod: The openflow flow_mod record which describes the action to take.
|
||||
##
|
||||
## msg: An optional informational message by the plugin.
|
||||
global flow_mod_success: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default="");
|
||||
|
||||
## Reports an error while installing a flow Rule.
|
||||
##
|
||||
## name: The unique name of the OpenFlow controller from which this event originated.
|
||||
##
|
||||
## match: The ofp_match record which describes the flow to match.
|
||||
##
|
||||
## flow_mod: The openflow flow_mod record which describes the action to take.
|
||||
##
|
||||
## msg: Message to describe the event.
|
||||
global flow_mod_failure: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default="");
|
||||
|
||||
## Reports that a flow was removed by the switch because of either the hard or the idle timeout.
|
||||
## This message is only generated by controllers that indicate that they support flow removal
|
||||
## in supports_flow_removed.
|
||||
##
|
||||
## name: The unique name of the OpenFlow controller from which this event originated.
|
||||
##
|
||||
## match: The ofp_match record which was used to create the flow.
|
||||
##
|
||||
## cookie: The cookie that was specified when creating the flow.
|
||||
##
|
||||
## priority: The priority that was specified when creating the flow.
|
||||
##
|
||||
## reason: The reason for flow removal (OFPRR_*)
|
||||
##
|
||||
## duration_sec: duration of the flow in seconds
|
||||
##
|
||||
## packet_count: packet count of the flow
|
||||
##
|
||||
## byte_count: byte count of the flow
|
||||
global flow_removed: event(name: string, match: ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count);
|
||||
|
||||
## Convert a conn_id record into an ofp_match record that can be used to
|
||||
## create match objects for OpenFlow.
|
||||
##
|
||||
## id: the conn_id record that describes the record.
|
||||
##
|
||||
## reverse: reverse the sources and destinations when creating the match record (default F)
|
||||
##
|
||||
## Returns: ofp_match object for the conn_id record.
|
||||
global match_conn: function(id: conn_id, reverse: bool &default=F): ofp_match;
|
||||
|
||||
# ###
|
||||
# ### Low-level functions for cookie handling and plugin registration.
|
||||
# ###
|
||||
|
||||
## Function to get the unique id out of a given cookie.
|
||||
##
|
||||
## cookie: The openflow match cookie.
|
||||
##
|
||||
## Returns: The cookie unique id.
|
||||
global get_cookie_uid: function(cookie: count): count;
|
||||
|
||||
## Function to get the group id out of a given cookie.
|
||||
##
|
||||
## cookie: The openflow match cookie.
|
||||
##
|
||||
## Returns: The cookie group id.
|
||||
global get_cookie_gid: function(cookie: count): count;
|
||||
|
||||
## Function to generate a new cookie using our group id.
|
||||
##
|
||||
## cookie: The openflow match cookie.
|
||||
##
|
||||
## Returns: The cookie group id.
|
||||
global generate_cookie: function(cookie: count &default=0): count;
|
||||
|
||||
## Function to register a controller instance. This function
|
||||
## is called automatically by the plugin _new functions.
|
||||
##
|
||||
## tpe: type of this plugin
|
||||
##
|
||||
## name: unique name of this controller instance.
|
||||
##
|
||||
## controller: The controller to register
|
||||
global register_controller: function(tpe: OpenFlow::Plugin, name: string, controller: Controller);
|
||||
|
||||
## Function to unregister a controller instance. This function
|
||||
## should be called when a specific controller should no longer
|
||||
## be used.
|
||||
##
|
||||
## controller: The controller to unregister
|
||||
global unregister_controller: function(controller: Controller);
|
||||
|
||||
## Function to signal that a controller finished activation and is
|
||||
## ready to use. Will throw the ``OpenFlow::controller_activated``
|
||||
## event.
|
||||
global controller_init_done: function(controller: Controller);
|
||||
|
||||
## Event that is raised once a controller finishes initialization
|
||||
## and is completely activated.
|
||||
## name: unique name of this controller instance.
|
||||
##
|
||||
## controller: The controller that finished activation.
|
||||
global OpenFlow::controller_activated: event(name: string, controller: Controller);
|
||||
|
||||
## Function to lookup a controller instance by name
|
||||
##
|
||||
## name: unique name of the controller to look up
|
||||
##
|
||||
## Returns: one element vector with controller, if found. Empty vector otherwhise.
|
||||
global lookup_controller: function(name: string): vector of Controller;
|
||||
}
|
||||
|
||||
global name_to_controller: table[string] of Controller;
|
||||
|
||||
|
||||
function match_conn(id: conn_id, reverse: bool &default=F): ofp_match
|
||||
{
|
||||
local dl_type = ETH_IPv4;
|
||||
local proto = IP_TCP;
|
||||
|
||||
local orig_h: addr;
|
||||
local orig_p: port;
|
||||
local resp_h: addr;
|
||||
local resp_p: port;
|
||||
|
||||
if ( reverse == F )
|
||||
{
|
||||
orig_h = id$orig_h;
|
||||
orig_p = id$orig_p;
|
||||
resp_h = id$resp_h;
|
||||
resp_p = id$resp_p;
|
||||
}
|
||||
else
|
||||
{
|
||||
orig_h = id$resp_h;
|
||||
orig_p = id$resp_p;
|
||||
resp_h = id$orig_h;
|
||||
resp_p = id$orig_p;
|
||||
}
|
||||
|
||||
if ( is_v6_addr(orig_h) )
|
||||
dl_type = ETH_IPv6;
|
||||
|
||||
if ( is_udp_port(orig_p) )
|
||||
proto = IP_UDP;
|
||||
else if ( is_icmp_port(orig_p) )
|
||||
proto = IP_ICMP;
|
||||
|
||||
return ofp_match(
|
||||
$dl_type=dl_type,
|
||||
$nw_proto=proto,
|
||||
$nw_src=addr_to_subnet(orig_h),
|
||||
$tp_src=port_to_count(orig_p),
|
||||
$nw_dst=addr_to_subnet(resp_h),
|
||||
$tp_dst=port_to_count(resp_p)
|
||||
);
|
||||
}
|
||||
|
||||
# local function to forge a flow_mod cookie for this framework.
|
||||
# all flow entries from the openflow framework should have the
|
||||
# 42 bit of the cookie set.
|
||||
function generate_cookie(cookie: count &default=0): count
|
||||
{
|
||||
local c = BRO_COOKIE_ID * COOKIE_BID_START;
|
||||
|
||||
if ( cookie >= COOKIE_UID_SIZE )
|
||||
Reporter::warning(fmt("The given cookie uid '%d' is > 32bit and will be discarded", cookie));
|
||||
else
|
||||
c += cookie;
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
# local function to check if a given flow_mod cookie is forged from this framework.
|
||||
function is_valid_cookie(cookie: count): bool
|
||||
{
|
||||
if ( cookie / COOKIE_BID_START == BRO_COOKIE_ID )
|
||||
return T;
|
||||
|
||||
Reporter::warning(fmt("The given Openflow cookie '%d' is not valid", cookie));
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function get_cookie_uid(cookie: count): count
|
||||
{
|
||||
if( is_valid_cookie(cookie) )
|
||||
return (cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START));
|
||||
|
||||
return INVALID_COOKIE;
|
||||
}
|
||||
|
||||
function get_cookie_gid(cookie: count): count
|
||||
{
|
||||
if( is_valid_cookie(cookie) )
|
||||
return (
|
||||
(cookie - (COOKIE_BID_START * BRO_COOKIE_ID) -
|
||||
(cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START))) /
|
||||
COOKIE_GID_START
|
||||
);
|
||||
|
||||
return INVALID_COOKIE;
|
||||
}
|
||||
|
||||
function controller_init_done(controller: Controller)
|
||||
{
|
||||
if ( controller$state$_name !in name_to_controller )
|
||||
{
|
||||
Reporter::error(fmt("Openflow initialized unknown plugin %s successfully?", controller$state$_name));
|
||||
return;
|
||||
}
|
||||
|
||||
controller$state$_activated = T;
|
||||
event OpenFlow::controller_activated(controller$state$_name, controller);
|
||||
}
|
||||
|
||||
# Functions that are called from cluster.bro and non-cluster.bro
|
||||
|
||||
function register_controller_impl(tpe: OpenFlow::Plugin, name: string, controller: Controller)
|
||||
{
|
||||
if ( controller$state$_name in name_to_controller )
|
||||
{
|
||||
Reporter::error(fmt("OpenFlow Controller %s was already registered. Ignored duplicate registration", controller$state$_name));
|
||||
return;
|
||||
}
|
||||
|
||||
name_to_controller[controller$state$_name] = controller;
|
||||
|
||||
if ( controller?$init )
|
||||
controller$init(controller$state);
|
||||
else
|
||||
controller_init_done(controller);
|
||||
}
|
||||
|
||||
function unregister_controller_impl(controller: Controller)
|
||||
{
|
||||
if ( controller$state$_name in name_to_controller )
|
||||
delete name_to_controller[controller$state$_name];
|
||||
else
|
||||
Reporter::error("OpenFlow Controller %s was not registered in unregister.");
|
||||
|
||||
if ( controller?$destroy )
|
||||
controller$destroy(controller$state);
|
||||
}
|
||||
|
||||
function lookup_controller_impl(name: string): vector of Controller
|
||||
{
|
||||
if ( name in name_to_controller )
|
||||
return vector(name_to_controller[name]);
|
||||
else
|
||||
return vector();
|
||||
}
|
44
scripts/base/frameworks/openflow/non-cluster.bro
Normal file
44
scripts/base/frameworks/openflow/non-cluster.bro
Normal file
|
@ -0,0 +1,44 @@
|
|||
@load ./main
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
# the flow_mod function wrapper
|
||||
function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool
|
||||
{
|
||||
if ( ! controller$state$_activated )
|
||||
return F;
|
||||
|
||||
if ( controller?$flow_mod )
|
||||
return controller$flow_mod(controller$state, match, flow_mod);
|
||||
else
|
||||
return F;
|
||||
}
|
||||
|
||||
function flow_clear(controller: Controller): bool
|
||||
{
|
||||
if ( ! controller$state$_activated )
|
||||
return F;
|
||||
|
||||
if ( controller?$flow_clear )
|
||||
return controller$flow_clear(controller$state);
|
||||
else
|
||||
return F;
|
||||
}
|
||||
|
||||
function register_controller(tpe: OpenFlow::Plugin, name: string, controller: Controller)
|
||||
{
|
||||
controller$state$_name = cat(tpe, name);
|
||||
controller$state$_plugin = tpe;
|
||||
|
||||
register_controller_impl(tpe, name, controller);
|
||||
}
|
||||
|
||||
function unregister_controller(controller: Controller)
|
||||
{
|
||||
unregister_controller_impl(controller);
|
||||
}
|
||||
|
||||
function lookup_controller(name: string): vector of Controller
|
||||
{
|
||||
return lookup_controller_impl(name);
|
||||
}
|
3
scripts/base/frameworks/openflow/plugins/__load__.bro
Normal file
3
scripts/base/frameworks/openflow/plugins/__load__.bro
Normal file
|
@ -0,0 +1,3 @@
|
|||
@load ./ryu
|
||||
@load ./log
|
||||
@load ./broker
|
95
scripts/base/frameworks/openflow/plugins/broker.bro
Normal file
95
scripts/base/frameworks/openflow/plugins/broker.bro
Normal file
|
@ -0,0 +1,95 @@
|
|||
##! OpenFlow plugin for interfacing to controllers via Broker.
|
||||
|
||||
@load base/frameworks/openflow
|
||||
@load base/frameworks/broker
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
export {
|
||||
redef enum Plugin += {
|
||||
BROKER,
|
||||
};
|
||||
|
||||
## Broker controller constructor.
|
||||
##
|
||||
## host: Controller ip.
|
||||
##
|
||||
## host_port: Controller listen port.
|
||||
##
|
||||
## topic: broker topic to send messages to.
|
||||
##
|
||||
## dpid: OpenFlow switch datapath id.
|
||||
##
|
||||
## Returns: OpenFlow::Controller record
|
||||
global broker_new: function(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller;
|
||||
|
||||
redef record ControllerState += {
|
||||
## Controller ip.
|
||||
broker_host: addr &optional;
|
||||
## Controller listen port.
|
||||
broker_port: port &optional;
|
||||
## OpenFlow switch datapath id.
|
||||
broker_dpid: count &optional;
|
||||
## Topic to sent events for this controller to
|
||||
broker_topic: string &optional;
|
||||
};
|
||||
|
||||
global broker_flow_mod: event(name: string, dpid: count, match: ofp_match, flow_mod: ofp_flow_mod);
|
||||
global broker_flow_clear: event(name: string, dpid: count);
|
||||
}
|
||||
|
||||
global broker_peers: table[port, string] of Controller;
|
||||
|
||||
function broker_describe(state: ControllerState): string
|
||||
{
|
||||
return fmt("Broker-%s:%d-%d", state$broker_host, state$broker_port, state$broker_dpid);
|
||||
}
|
||||
|
||||
function broker_flow_mod_fun(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
|
||||
{
|
||||
BrokerComm::event(state$broker_topic, BrokerComm::event_args(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod));
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function broker_flow_clear_fun(state: OpenFlow::ControllerState): bool
|
||||
{
|
||||
BrokerComm::event(state$broker_topic, BrokerComm::event_args(broker_flow_clear, state$_name, state$broker_dpid));
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function broker_init(state: OpenFlow::ControllerState)
|
||||
{
|
||||
BrokerComm::enable();
|
||||
BrokerComm::connect(cat(state$broker_host), state$broker_port, 1sec);
|
||||
BrokerComm::subscribe_to_events(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker.
|
||||
}
|
||||
|
||||
event BrokerComm::outgoing_connection_established(peer_address: string, peer_port: port, peer_name: string)
|
||||
{
|
||||
if ( [peer_port, peer_address] !in broker_peers )
|
||||
# ok, this one was none of ours...
|
||||
return;
|
||||
|
||||
local p = broker_peers[peer_port, peer_address];
|
||||
controller_init_done(p);
|
||||
delete broker_peers[peer_port, peer_address];
|
||||
}
|
||||
|
||||
# broker controller constructor
|
||||
function broker_new(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller
|
||||
{
|
||||
local c = OpenFlow::Controller($state=OpenFlow::ControllerState($broker_host=host, $broker_port=host_port, $broker_dpid=dpid, $broker_topic=topic),
|
||||
$flow_mod=broker_flow_mod_fun, $flow_clear=broker_flow_clear_fun, $describe=broker_describe, $supports_flow_removed=T, $init=broker_init);
|
||||
|
||||
register_controller(OpenFlow::BROKER, name, c);
|
||||
|
||||
if ( [host_port, cat(host)] in broker_peers )
|
||||
Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, host_port));
|
||||
else
|
||||
broker_peers[host_port, cat(host)] = c;
|
||||
|
||||
return c;
|
||||
}
|
||||
|
76
scripts/base/frameworks/openflow/plugins/log.bro
Normal file
76
scripts/base/frameworks/openflow/plugins/log.bro
Normal file
|
@ -0,0 +1,76 @@
|
|||
##! OpenFlow plugin that outputs flow-modification commands
|
||||
##! to a Bro log file.
|
||||
|
||||
@load base/frameworks/openflow
|
||||
@load base/frameworks/logging
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
export {
|
||||
redef enum Plugin += {
|
||||
OFLOG,
|
||||
};
|
||||
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Log controller constructor.
|
||||
##
|
||||
## dpid: OpenFlow switch datapath id.
|
||||
##
|
||||
## success_event: If true, flow_mod_success is raised for each logged line.
|
||||
##
|
||||
## Returns: OpenFlow::Controller record
|
||||
global log_new: function(dpid: count, success_event: bool &default=T): OpenFlow::Controller;
|
||||
|
||||
redef record ControllerState += {
|
||||
## OpenFlow switch datapath id.
|
||||
log_dpid: count &optional;
|
||||
## Raise or do not raise success event
|
||||
log_success_event: bool &optional;
|
||||
};
|
||||
|
||||
## The record type which contains column fields of the OpenFlow log.
|
||||
type Info: record {
|
||||
## Network time
|
||||
ts: time &log;
|
||||
## OpenFlow switch datapath id
|
||||
dpid: count &log;
|
||||
## OpenFlow match fields
|
||||
match: ofp_match &log;
|
||||
## OpenFlow modify flow entry message
|
||||
flow_mod: ofp_flow_mod &log;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the :bro:type:`OpenFlow::Info`
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_openflow: event(rec: Info);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(OpenFlow::LOG, [$columns=Info, $ev=log_openflow, $path="openflow"]);
|
||||
}
|
||||
|
||||
function log_flow_mod(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
|
||||
{
|
||||
Log::write(OpenFlow::LOG, [$ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod]);
|
||||
if ( state$log_success_event )
|
||||
event OpenFlow::flow_mod_success(state$_name, match, flow_mod);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function log_describe(state: ControllerState): string
|
||||
{
|
||||
return fmt("Log-%d", state$log_dpid);
|
||||
}
|
||||
|
||||
function log_new(dpid: count, success_event: bool &default=T): OpenFlow::Controller
|
||||
{
|
||||
local c = OpenFlow::Controller($state=OpenFlow::ControllerState($log_dpid=dpid, $log_success_event=success_event),
|
||||
$flow_mod=log_flow_mod, $describe=log_describe, $supports_flow_removed=F);
|
||||
|
||||
register_controller(OpenFlow::OFLOG, cat(dpid), c);
|
||||
|
||||
return c;
|
||||
}
|
190
scripts/base/frameworks/openflow/plugins/ryu.bro
Normal file
190
scripts/base/frameworks/openflow/plugins/ryu.bro
Normal file
|
@ -0,0 +1,190 @@
|
|||
##! OpenFlow plugin for the Ryu controller.
|
||||
|
||||
@load base/frameworks/openflow
|
||||
@load base/utils/active-http
|
||||
@load base/utils/exec
|
||||
@load base/utils/json
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
export {
|
||||
redef enum Plugin += {
|
||||
RYU,
|
||||
};
|
||||
|
||||
## Ryu controller constructor.
|
||||
##
|
||||
## host: Controller ip.
|
||||
##
|
||||
## host_port: Controller listen port.
|
||||
##
|
||||
## dpid: OpenFlow switch datapath id.
|
||||
##
|
||||
## Returns: OpenFlow::Controller record
|
||||
global ryu_new: function(host: addr, host_port: count, dpid: count): OpenFlow::Controller;
|
||||
|
||||
redef record ControllerState += {
|
||||
## Controller ip.
|
||||
ryu_host: addr &optional;
|
||||
## Controller listen port.
|
||||
ryu_port: count &optional;
|
||||
## OpenFlow switch datapath id.
|
||||
ryu_dpid: count &optional;
|
||||
## Enable debug mode - output JSON to stdout; do not perform actions
|
||||
ryu_debug: bool &default=F;
|
||||
};
|
||||
}
|
||||
|
||||
# Ryu ReST API flow_mod URL-path
|
||||
const RYU_FLOWENTRY_PATH = "/stats/flowentry/";
|
||||
# Ryu ReST API flow_stats URL-path
|
||||
#const RYU_FLOWSTATS_PATH = "/stats/flow/";
|
||||
|
||||
# Ryu ReST API action_output type.
|
||||
type ryu_flow_action: record {
|
||||
# Ryu uses strings as its ReST API output action.
|
||||
_type: string;
|
||||
# The output port for type OUTPUT
|
||||
_port: count &optional;
|
||||
};
|
||||
|
||||
# The ReST API documentation can be found at
|
||||
# https://media.readthedocs.org/pdf/ryu/latest/ryu.pdf
|
||||
# Ryu ReST API flow_mod type.
|
||||
type ryu_ofp_flow_mod: record {
|
||||
dpid: count;
|
||||
cookie: count &optional;
|
||||
cookie_mask: count &optional;
|
||||
table_id: count &optional;
|
||||
idle_timeout: count &optional;
|
||||
hard_timeout: count &optional;
|
||||
priority: count &optional;
|
||||
flags: count &optional;
|
||||
match: OpenFlow::ofp_match;
|
||||
actions: vector of ryu_flow_action;
|
||||
out_port: count &optional;
|
||||
out_group: count &optional;
|
||||
};
|
||||
|
||||
# Mapping between ofp flow mod commands and ryu urls
|
||||
const ryu_url: table[ofp_flow_mod_command] of string = {
|
||||
[OFPFC_ADD] = "add",
|
||||
[OFPFC_MODIFY] = "modify",
|
||||
[OFPFC_MODIFY_STRICT] = "modify_strict",
|
||||
[OFPFC_DELETE] = "delete",
|
||||
[OFPFC_DELETE_STRICT] = "delete_strict",
|
||||
};
|
||||
|
||||
# Ryu flow_mod function
|
||||
function ryu_flow_mod(state: OpenFlow::ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool
|
||||
{
|
||||
if ( state$_plugin != RYU )
|
||||
{
|
||||
Reporter::error("Ryu openflow plugin was called with state of non-ryu plugin");
|
||||
return F;
|
||||
}
|
||||
|
||||
# Generate ryu_flow_actions because their type differs (using strings as type).
|
||||
local flow_actions: vector of ryu_flow_action = vector();
|
||||
|
||||
for ( i in flow_mod$actions$out_ports )
|
||||
flow_actions[|flow_actions|] = ryu_flow_action($_type="OUTPUT", $_port=flow_mod$actions$out_ports[i]);
|
||||
|
||||
# Generate our ryu_flow_mod record for the ReST API call.
|
||||
local mod: ryu_ofp_flow_mod = ryu_ofp_flow_mod(
|
||||
$dpid=state$ryu_dpid,
|
||||
$cookie=flow_mod$cookie,
|
||||
$idle_timeout=flow_mod$idle_timeout,
|
||||
$hard_timeout=flow_mod$hard_timeout,
|
||||
$priority=flow_mod$priority,
|
||||
$flags=flow_mod$flags,
|
||||
$match=match,
|
||||
$actions=flow_actions
|
||||
);
|
||||
|
||||
if ( flow_mod?$out_port )
|
||||
mod$out_port = flow_mod$out_port;
|
||||
if ( flow_mod?$out_group )
|
||||
mod$out_group = flow_mod$out_group;
|
||||
|
||||
# Type of the command
|
||||
local command_type: string;
|
||||
|
||||
if ( flow_mod$command in ryu_url )
|
||||
command_type = ryu_url[flow_mod$command];
|
||||
else
|
||||
{
|
||||
Reporter::warning(fmt("The given OpenFlow command type '%s' is not available", cat(flow_mod$command)));
|
||||
return F;
|
||||
}
|
||||
|
||||
local url=cat("http://", cat(state$ryu_host), ":", cat(state$ryu_port), RYU_FLOWENTRY_PATH, command_type);
|
||||
|
||||
if ( state$ryu_debug )
|
||||
{
|
||||
print url;
|
||||
print to_json(mod);
|
||||
event OpenFlow::flow_mod_success(state$_name, match, flow_mod);
|
||||
return T;
|
||||
}
|
||||
|
||||
# Create the ActiveHTTP request and convert the record to a Ryu ReST API JSON string
|
||||
local request: ActiveHTTP::Request = ActiveHTTP::Request(
|
||||
$url=url,
|
||||
$method="POST",
|
||||
$client_data=to_json(mod)
|
||||
);
|
||||
|
||||
# Execute call to Ryu's ReST API
|
||||
when ( local result = ActiveHTTP::request(request) )
|
||||
{
|
||||
if(result$code == 200)
|
||||
event OpenFlow::flow_mod_success(state$_name, match, flow_mod, result$body);
|
||||
else
|
||||
{
|
||||
Reporter::warning(fmt("Flow modification failed with error: %s", result$body));
|
||||
event OpenFlow::flow_mod_failure(state$_name, match, flow_mod, result$body);
|
||||
return F;
|
||||
}
|
||||
}
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function ryu_flow_clear(state: OpenFlow::ControllerState): bool
|
||||
{
|
||||
local url=cat("http://", cat(state$ryu_host), ":", cat(state$ryu_port), RYU_FLOWENTRY_PATH, "clear", "/", state$ryu_dpid);
|
||||
|
||||
if ( state$ryu_debug )
|
||||
{
|
||||
print url;
|
||||
return T;
|
||||
}
|
||||
|
||||
local request: ActiveHTTP::Request = ActiveHTTP::Request(
|
||||
$url=url,
|
||||
$method="DELETE"
|
||||
);
|
||||
|
||||
when ( local result = ActiveHTTP::request(request) )
|
||||
{
|
||||
}
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function ryu_describe(state: ControllerState): string
|
||||
{
|
||||
return fmt("Ryu-%d-http://%s:%d", state$ryu_dpid, state$ryu_host, state$ryu_port);
|
||||
}
|
||||
|
||||
# Ryu controller constructor
|
||||
function ryu_new(host: addr, host_port: count, dpid: count): OpenFlow::Controller
|
||||
{
|
||||
local c = OpenFlow::Controller($state=OpenFlow::ControllerState($ryu_host=host, $ryu_port=host_port, $ryu_dpid=dpid),
|
||||
$flow_mod=ryu_flow_mod, $flow_clear=ryu_flow_clear, $describe=ryu_describe, $supports_flow_removed=F);
|
||||
|
||||
register_controller(OpenFlow::RYU, cat(host,host_port,dpid), c);
|
||||
|
||||
return c;
|
||||
}
|
132
scripts/base/frameworks/openflow/types.bro
Normal file
132
scripts/base/frameworks/openflow/types.bro
Normal file
|
@ -0,0 +1,132 @@
|
|||
##! Types used by the OpenFlow framework.
|
||||
|
||||
module OpenFlow;
|
||||
|
||||
@load ./consts
|
||||
|
||||
export {
|
||||
## Available openflow plugins
|
||||
type Plugin: enum {
|
||||
## Internal placeholder plugin
|
||||
INVALID,
|
||||
};
|
||||
|
||||
## Controller related state.
|
||||
## Can be redefined by plugins to
|
||||
## add state.
|
||||
type ControllerState: record {
|
||||
## Internally set to the type of plugin used.
|
||||
_plugin: Plugin &optional;
|
||||
## Internally set to the unique name of the controller.
|
||||
_name: string &optional;
|
||||
## Internally set to true once the controller is activated
|
||||
_activated: bool &default=F;
|
||||
} &redef;
|
||||
|
||||
## Openflow match definition.
|
||||
##
|
||||
## The openflow match record describes
|
||||
## which packets match to a specific
|
||||
## rule in a flow table.
|
||||
type ofp_match: record {
|
||||
# Input switch port.
|
||||
in_port: count &optional;
|
||||
# Ethernet source address.
|
||||
dl_src: string &optional;
|
||||
# Ethernet destination address.
|
||||
dl_dst: string &optional;
|
||||
# Input VLAN id.
|
||||
dl_vlan: count &optional;
|
||||
# Input VLAN priority.
|
||||
dl_vlan_pcp: count &optional;
|
||||
# Ethernet frame type.
|
||||
dl_type: count &optional;
|
||||
# IP ToS (actually DSCP field, 6bits).
|
||||
nw_tos: count &optional;
|
||||
# IP protocol or lower 8 bits of ARP opcode.
|
||||
nw_proto: count &optional;
|
||||
# At the moment, we store both v4 and v6 in the same fields.
|
||||
# This is not how OpenFlow does it, we might want to change that...
|
||||
# IP source address.
|
||||
nw_src: subnet &optional;
|
||||
# IP destination address.
|
||||
nw_dst: subnet &optional;
|
||||
# TCP/UDP source port.
|
||||
tp_src: count &optional;
|
||||
# TCP/UDP destination port.
|
||||
tp_dst: count &optional;
|
||||
} &log;
|
||||
|
||||
## The actions that can be taken in a flow.
|
||||
## (Sepearate record to make ofp_flow_mod less crowded)
|
||||
type ofp_flow_action: record {
|
||||
## Output ports to send data to.
|
||||
out_ports: vector of count &default=vector();
|
||||
## set vlan vid to this value
|
||||
vlan_vid: count &optional;
|
||||
## set vlan priority to this value
|
||||
vlan_pcp: count &optional;
|
||||
## strip vlan tag
|
||||
vlan_strip: bool &default=F;
|
||||
## set ethernet source address
|
||||
dl_src: string &optional;
|
||||
## set ethernet destination address
|
||||
dl_dst: string &optional;
|
||||
## set ip tos to this value
|
||||
nw_tos: count &optional;
|
||||
## set source to this ip
|
||||
nw_src: addr &optional;
|
||||
## set destination to this ip
|
||||
nw_dst: addr &optional;
|
||||
## set tcp/udp source port
|
||||
tp_src: count &optional;
|
||||
## set tcp/udp destination port
|
||||
tp_dst: count &optional;
|
||||
} &log;
|
||||
|
||||
## Openflow flow_mod definition, describing the action to perform.
|
||||
type ofp_flow_mod: record {
|
||||
## Opaque controller-issued identifier.
|
||||
# This is optional in the specification - but let's force
|
||||
# it so we always can identify our flows...
|
||||
cookie: count; # &default=BRO_COOKIE_ID * COOKIE_BID_START;
|
||||
# Flow actions
|
||||
## Table to put the flow in. OFPTT_ALL can be used for delete,
|
||||
## to delete flows from all matching tables.
|
||||
table_id: count &optional;
|
||||
## One of OFPFC_*.
|
||||
command: ofp_flow_mod_command; # &default=OFPFC_ADD;
|
||||
## Idle time before discarding (seconds).
|
||||
idle_timeout: count &default=0;
|
||||
## Max time before discarding (seconds).
|
||||
hard_timeout: count &default=0;
|
||||
## Priority level of flow entry.
|
||||
priority: count &default=0;
|
||||
## For OFPFC_DELETE* commands, require matching entried to include
|
||||
## this as an output port/group. OFPP_ANY/OFPG_ANY means no restrictions.
|
||||
out_port: count &optional;
|
||||
out_group: count &optional;
|
||||
## Bitmap of the OFPFF_* flags
|
||||
flags: count &default=0;
|
||||
## Actions to take on match
|
||||
actions: ofp_flow_action &default=ofp_flow_action();
|
||||
} &log;
|
||||
|
||||
## Controller record representing an openflow controller
|
||||
type Controller: record {
|
||||
## Controller related state.
|
||||
state: ControllerState;
|
||||
## Does the controller support the flow_removed event?
|
||||
supports_flow_removed: bool;
|
||||
## function that describes the controller. Has to be implemented.
|
||||
describe: function(state: ControllerState): string;
|
||||
## one-time initialization function. If defined, controller_init_done has to be called once initialization finishes.
|
||||
init: function (state: ControllerState) &optional;
|
||||
## one-time destruction function
|
||||
destroy: function (state: ControllerState) &optional;
|
||||
## flow_mod function
|
||||
flow_mod: function(state: ControllerState, match: ofp_match, flow_mod: ofp_flow_mod): bool &optional;
|
||||
## flow_clear function
|
||||
flow_clear: function(state: ControllerState): bool &optional;
|
||||
};
|
||||
}
|
|
@ -138,7 +138,7 @@ redef enum PcapFilterID += {
|
|||
|
||||
function test_filter(filter: string): bool
|
||||
{
|
||||
if ( ! precompile_pcap_filter(FilterTester, filter) )
|
||||
if ( ! Pcap::precompile_pcap_filter(FilterTester, filter) )
|
||||
{
|
||||
# The given filter was invalid
|
||||
# TODO: generate a notice.
|
||||
|
@ -159,7 +159,7 @@ event filter_change_tracking()
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(PacketFilter::LOG, [$columns=Info]);
|
||||
Log::create_stream(PacketFilter::LOG, [$columns=Info, $path="packet_filter"]);
|
||||
|
||||
# Preverify the capture and restrict filters to give more granular failure messages.
|
||||
for ( id in capture_filters )
|
||||
|
@ -273,7 +273,7 @@ function install(): bool
|
|||
return F;
|
||||
|
||||
local ts = current_time();
|
||||
if ( ! precompile_pcap_filter(DefaultPcapFilter, tmp_filter) )
|
||||
if ( ! Pcap::precompile_pcap_filter(DefaultPcapFilter, tmp_filter) )
|
||||
{
|
||||
NOTICE([$note=Compile_Failure,
|
||||
$msg=fmt("Compiling packet filter failed"),
|
||||
|
@ -303,7 +303,7 @@ function install(): bool
|
|||
}
|
||||
info$filter = current_filter;
|
||||
|
||||
if ( ! install_pcap_filter(DefaultPcapFilter) )
|
||||
if ( ! Pcap::install_pcap_filter(DefaultPcapFilter) )
|
||||
{
|
||||
# Installing the filter failed for some reason.
|
||||
info$success = F;
|
||||
|
|
|
@ -45,7 +45,7 @@ export {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Reporter::LOG, [$columns=Info]);
|
||||
Log::create_stream(Reporter::LOG, [$columns=Info, $path="reporter"]);
|
||||
}
|
||||
|
||||
event reporter_info(t: time, msg: string, location: string) &priority=-5
|
||||
|
|
|
@ -142,7 +142,7 @@ global did_sig_log: set[string] &read_expire = 1 hr;
|
|||
|
||||
event bro_init()
|
||||
{
|
||||
Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature]);
|
||||
Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures"]);
|
||||
}
|
||||
|
||||
# Returns true if the given signature has already been triggered for the given
|
||||
|
@ -277,7 +277,7 @@ event signature_match(state: signature_state, msg: string, data: string)
|
|||
orig, sig_id, hcount);
|
||||
|
||||
Log::write(Signatures::LOG,
|
||||
[$note=Multiple_Sig_Responders,
|
||||
[$ts=network_time(), $note=Multiple_Sig_Responders,
|
||||
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
|
||||
$host_count=hcount, $sub_msg=horz_scan_msg]);
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ export {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software]);
|
||||
Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software"]);
|
||||
}
|
||||
|
||||
type Description: record {
|
||||
|
@ -280,6 +280,13 @@ function parse_mozilla(unparsed_version: string): Description
|
|||
v = parse(parts[1])$version;
|
||||
}
|
||||
}
|
||||
else if ( /AdobeAIR\/[0-9\.]*/ in unparsed_version )
|
||||
{
|
||||
software_name = "AdobeAIR";
|
||||
parts = split_string_all(unparsed_version, /AdobeAIR\/[0-9\.]*/);
|
||||
if ( 1 in parts )
|
||||
v = parse(parts[1])$version;
|
||||
}
|
||||
else if ( /AppleWebKit\/[0-9\.]*/ in unparsed_version )
|
||||
{
|
||||
software_name = "Unspecified WebKit";
|
||||
|
|
|
@ -89,7 +89,7 @@ redef likely_server_ports += { ayiya_ports, teredo_ports, gtpv1_ports };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Tunnel::LOG, [$columns=Info]);
|
||||
Log::create_stream(Tunnel::LOG, [$columns=Info, $path="tunnel"]);
|
||||
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_AYIYA, ayiya_ports);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_TEREDO, teredo_ports);
|
||||
|
|
|
@ -39,6 +39,13 @@ type count_set: set[count];
|
|||
## directly and then remove this alias.
|
||||
type index_vec: vector of count;
|
||||
|
||||
## A vector of subnets.
|
||||
##
|
||||
## .. todo:: We need this type definition only for declaring builtin functions
|
||||
## via ``bifcl``. We should extend ``bifcl`` to understand composite types
|
||||
## directly and then remove this alias.
|
||||
type subnet_vec: vector of subnet;
|
||||
|
||||
## A vector of any, used by some builtin functions to store a list of varying
|
||||
## types.
|
||||
##
|
||||
|
@ -120,6 +127,18 @@ type conn_id: record {
|
|||
resp_p: port; ##< The responder's port number.
|
||||
} &log;
|
||||
|
||||
## The identifying 4-tuple of a uni-directional flow.
|
||||
##
|
||||
## .. note:: It's actually a 5-tuple: the transport-layer protocol is stored as
|
||||
## part of the port values, `src_p` and `dst_p`, and can be extracted from
|
||||
## them with :bro:id:`get_port_transport_proto`.
|
||||
type flow_id : record {
|
||||
src_h: addr; ##< The source IP address.
|
||||
src_p: port; ##< The source port number.
|
||||
dst_h: addr; ##< The destination IP address.
|
||||
dst_p: port; ##< The desintation port number.
|
||||
} &log;
|
||||
|
||||
## Specifics about an ICMP conversation. ICMP events typically pass this in
|
||||
## addition to :bro:type:`conn_id`.
|
||||
##
|
||||
|
@ -333,8 +352,6 @@ type connection: record {
|
|||
## to parse the same data. If so, all will be recorded. Also note that
|
||||
## the recorded services are independent of any transport-level protocols.
|
||||
service: set[string];
|
||||
addl: string; ##< Deprecated.
|
||||
hot: count; ##< Deprecated.
|
||||
history: string; ##< State history of connections. See *history* in :bro:see:`Conn::Info`.
|
||||
## A globally unique connection identifier. For each connection, Bro
|
||||
## creates an ID that is very likely unique across independent Bro runs.
|
||||
|
@ -347,6 +364,12 @@ type connection: record {
|
|||
## for the connection unless the :bro:id:`tunnel_changed` event is
|
||||
## handled and reassigns this field to the new encapsulation.
|
||||
tunnel: EncapsulatingConnVector &optional;
|
||||
|
||||
## The outer VLAN, if applicable, for this connection.
|
||||
vlan: int &optional;
|
||||
|
||||
## The inner VLAN, if applicable, for this connection.
|
||||
inner_vlan: int &optional;
|
||||
};
|
||||
|
||||
## Default amount of time a file can be inactive before the file analysis
|
||||
|
@ -414,6 +437,14 @@ type fa_file: record {
|
|||
bof_buffer: string &optional;
|
||||
} &redef;
|
||||
|
||||
## Metadata that's been inferred about a particular file.
|
||||
type fa_metadata: record {
|
||||
## The strongest matching mime type if one was discovered.
|
||||
mime_type: string &optional;
|
||||
## All matching mime types if any were discovered.
|
||||
mime_types: mime_matches &optional;
|
||||
};
|
||||
|
||||
## Fields of a SYN packet.
|
||||
##
|
||||
## .. bro:see:: connection_SYN_packet
|
||||
|
@ -440,6 +471,7 @@ type NetStats: record {
|
|||
## packet capture system, this value may not be available and will then
|
||||
## be always set to zero.
|
||||
pkts_link: count &default=0;
|
||||
bytes_recvd: count &default=0; ##< Bytes received by Bro.
|
||||
};
|
||||
|
||||
## Statistics about Bro's resource consumption.
|
||||
|
@ -733,6 +765,7 @@ type pcap_packet: record {
|
|||
caplen: count; ##< The number of bytes captured (<= *len*).
|
||||
len: count; ##< The length of the packet in bytes, including link-level header.
|
||||
data: string; ##< The payload of the packet, including link-level header.
|
||||
link_type: link_encap; ##< Layer 2 link encapsulation type.
|
||||
};
|
||||
|
||||
## GeoIP location information.
|
||||
|
@ -928,7 +961,7 @@ const tcp_storm_interarrival_thresh = 1 sec &redef;
|
|||
## seeing our peer's ACKs. Set to zero to turn off this determination.
|
||||
##
|
||||
## .. bro:see:: tcp_max_above_hole_without_any_acks tcp_excessive_data_without_further_acks
|
||||
const tcp_max_initial_window = 4096 &redef;
|
||||
const tcp_max_initial_window = 16384 &redef;
|
||||
|
||||
## If we're not seeing our peer's ACKs, the maximum volume of data above a
|
||||
## sequence hole that we'll tolerate before assuming that there's been a packet
|
||||
|
@ -936,7 +969,7 @@ const tcp_max_initial_window = 4096 &redef;
|
|||
## don't ever give up.
|
||||
##
|
||||
## .. bro:see:: tcp_max_initial_window tcp_excessive_data_without_further_acks
|
||||
const tcp_max_above_hole_without_any_acks = 4096 &redef;
|
||||
const tcp_max_above_hole_without_any_acks = 16384 &redef;
|
||||
|
||||
## If we've seen this much data without any of it being acked, we give up
|
||||
## on that connection to avoid memory exhaustion due to buffering all that
|
||||
|
@ -947,6 +980,11 @@ const tcp_max_above_hole_without_any_acks = 4096 &redef;
|
|||
## .. bro:see:: tcp_max_initial_window tcp_max_above_hole_without_any_acks
|
||||
const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024 &redef;
|
||||
|
||||
## Number of TCP segments to buffer beyond what's been acknowledged already
|
||||
## to detect retransmission inconsistencies. Zero disables any additonal
|
||||
## buffering.
|
||||
const tcp_max_old_segments = 0 &redef;
|
||||
|
||||
## For services without a handler, these sets define originator-side ports
|
||||
## that still trigger reassembly.
|
||||
##
|
||||
|
@ -1080,27 +1118,6 @@ const ENDIAN_LITTLE = 1; ##< Little endian.
|
|||
const ENDIAN_BIG = 2; ##< Big endian.
|
||||
const ENDIAN_CONFUSED = 3; ##< Tried to determine endian, but failed.
|
||||
|
||||
## Deprecated.
|
||||
function append_addl(c: connection, addl: string)
|
||||
{
|
||||
if ( c$addl == "" )
|
||||
c$addl= addl;
|
||||
|
||||
else if ( addl !in c$addl )
|
||||
c$addl = fmt("%s %s", c$addl, addl);
|
||||
}
|
||||
|
||||
## Deprecated.
|
||||
function append_addl_marker(c: connection, addl: string, marker: string)
|
||||
{
|
||||
if ( c$addl == "" )
|
||||
c$addl= addl;
|
||||
|
||||
else if ( addl !in c$addl )
|
||||
c$addl = fmt("%s%s%s", c$addl, marker, addl);
|
||||
}
|
||||
|
||||
|
||||
# Values for :bro:see:`set_contents_file` *direction* argument.
|
||||
# todo:: these should go into an enum to make them autodoc'able
|
||||
const CONTENTS_NONE = 0; ##< Turn off recording of contents.
|
||||
|
@ -1509,6 +1526,34 @@ type pkt_hdr: record {
|
|||
icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet.
|
||||
};
|
||||
|
||||
## Values extracted from the layer 2 header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr
|
||||
type l2_hdr: record {
|
||||
encap: link_encap; ##< L2 link encapsulation.
|
||||
len: count; ##< Total frame length on wire.
|
||||
cap_len: count; ##< Captured length.
|
||||
src: string &optional; ##< L2 source (if Ethernet).
|
||||
dst: string &optional; ##< L2 destination (if Ethernet).
|
||||
vlan: count &optional; ##< Outermost VLAN tag if any (and Ethernet).
|
||||
inner_vlan: count &optional; ##< Innermost VLAN tag if any (and Ethernet).
|
||||
eth_type: count &optional; ##< Innermost Ethertype (if Ethernet).
|
||||
proto: layer3_proto; ##< L3 protocol.
|
||||
};
|
||||
|
||||
## A raw packet header, consisting of L2 header and everything in
|
||||
## :bro:id:`pkt_hdr`. .
|
||||
##
|
||||
## .. bro:see:: raw_packet pkt_hdr
|
||||
type raw_pkt_hdr: record {
|
||||
l2: l2_hdr; ##< The layer 2 header.
|
||||
ip: ip4_hdr &optional; ##< The IPv4 header if an IPv4 packet.
|
||||
ip6: ip6_hdr &optional; ##< The IPv6 header if an IPv6 packet.
|
||||
tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet.
|
||||
udp: udp_hdr &optional; ##< The UDP header if a UDP packet.
|
||||
icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet.
|
||||
};
|
||||
|
||||
## A Teredo origin indication header. See :rfc:`4380` for more information
|
||||
## about the Teredo protocol.
|
||||
##
|
||||
|
@ -2215,6 +2260,41 @@ export {
|
|||
const heartbeat_interval = 1.0 secs &redef;
|
||||
}
|
||||
|
||||
module SSH;
|
||||
|
||||
export {
|
||||
## The client and server each have some preferences for the algorithms used
|
||||
## in each direction.
|
||||
type Algorithm_Prefs: record {
|
||||
## The algorithm preferences for client to server communication
|
||||
client_to_server: vector of string &optional;
|
||||
## The algorithm preferences for server to client communication
|
||||
server_to_client: vector of string &optional;
|
||||
};
|
||||
|
||||
## This record lists the preferences of an SSH endpoint for
|
||||
## algorithm selection. During the initial :abbr:`SSH (Secure Shell)`
|
||||
## key exchange, each endpoint lists the algorithms
|
||||
## that it supports, in order of preference. See
|
||||
## :rfc:`4253#section-7.1` for details.
|
||||
type Capabilities: record {
|
||||
## Key exchange algorithms
|
||||
kex_algorithms: string_vec;
|
||||
## The algorithms supported for the server host key
|
||||
server_host_key_algorithms: string_vec;
|
||||
## Symmetric encryption algorithm preferences
|
||||
encryption_algorithms: Algorithm_Prefs;
|
||||
## Symmetric MAC algorithm preferences
|
||||
mac_algorithms: Algorithm_Prefs;
|
||||
## Compression algorithm preferences
|
||||
compression_algorithms: Algorithm_Prefs;
|
||||
## Language preferences
|
||||
languages: Algorithm_Prefs &optional;
|
||||
## Are these the capabilities of the server?
|
||||
is_server: bool;
|
||||
};
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
## An NTP message.
|
||||
|
@ -2448,7 +2528,7 @@ global dns_skip_all_addl = T &redef;
|
|||
|
||||
## If a DNS request includes more than this many queries, assume it's non-DNS
|
||||
## traffic and do not process it. Set to 0 to turn off this functionality.
|
||||
global dns_max_queries = 5;
|
||||
global dns_max_queries = 25 &redef;
|
||||
|
||||
## HTTP session statistics.
|
||||
##
|
||||
|
@ -2511,6 +2591,145 @@ type irc_join_info: record {
|
|||
## .. bro:see:: irc_join_message
|
||||
type irc_join_list: set[irc_join_info];
|
||||
|
||||
module PE;
|
||||
export {
|
||||
type PE::DOSHeader: record {
|
||||
## The magic number of a portable executable file ("MZ").
|
||||
signature : string;
|
||||
## The number of bytes in the last page that are used.
|
||||
used_bytes_in_last_page : count;
|
||||
## The number of pages in the file that are part of the PE file itself.
|
||||
file_in_pages : count;
|
||||
## Number of relocation entries stored after the header.
|
||||
num_reloc_items : count;
|
||||
## Number of paragraphs in the header.
|
||||
header_in_paragraphs : count;
|
||||
## Number of paragraps of additional memory that the program will need.
|
||||
min_extra_paragraphs : count;
|
||||
## Maximum number of paragraphs of additional memory.
|
||||
max_extra_paragraphs : count;
|
||||
## Relative value of the stack segment.
|
||||
init_relative_ss : count;
|
||||
## Initial value of the SP register.
|
||||
init_sp : count;
|
||||
## Checksum. The 16-bit sum of all words in the file should be 0. Normally not set.
|
||||
checksum : count;
|
||||
## Initial value of the IP register.
|
||||
init_ip : count;
|
||||
## Initial value of the CS register (relative to the initial segment).
|
||||
init_relative_cs : count;
|
||||
## Offset of the first relocation table.
|
||||
addr_of_reloc_table : count;
|
||||
## Overlays allow you to append data to the end of the file. If this is the main program,
|
||||
## this will be 0.
|
||||
overlay_num : count;
|
||||
## OEM identifier.
|
||||
oem_id : count;
|
||||
## Additional OEM info, specific to oem_id.
|
||||
oem_info : count;
|
||||
## Address of the new EXE header.
|
||||
addr_of_new_exe_header : count;
|
||||
};
|
||||
|
||||
type PE::FileHeader: record {
|
||||
## The target machine that the file was compiled for.
|
||||
machine : count;
|
||||
## The time that the file was created at.
|
||||
ts : time;
|
||||
## Pointer to the symbol table.
|
||||
sym_table_ptr : count;
|
||||
## Number of symbols.
|
||||
num_syms : count;
|
||||
## The size of the optional header.
|
||||
optional_header_size : count;
|
||||
## Bit flags that determine if this file is executable, non-relocatable, and/or a DLL.
|
||||
characteristics : set[count];
|
||||
};
|
||||
|
||||
type PE::OptionalHeader: record {
|
||||
## PE32 or PE32+ indicator.
|
||||
magic : count;
|
||||
## The major version of the linker used to create the PE.
|
||||
major_linker_version : count;
|
||||
## The minor version of the linker used to create the PE.
|
||||
minor_linker_version : count;
|
||||
## Size of the .text section.
|
||||
size_of_code : count;
|
||||
## Size of the .data section.
|
||||
size_of_init_data : count;
|
||||
## Size of the .bss section.
|
||||
size_of_uninit_data : count;
|
||||
## The relative virtual address (RVA) of the entry point.
|
||||
addr_of_entry_point : count;
|
||||
## The relative virtual address (RVA) of the .text section.
|
||||
base_of_code : count;
|
||||
## The relative virtual address (RVA) of the .data section.
|
||||
base_of_data : count &optional;
|
||||
## Preferred memory location for the image to be based at.
|
||||
image_base : count;
|
||||
## The alignment (in bytes) of sections when they're loaded in memory.
|
||||
section_alignment : count;
|
||||
## The alignment (in bytes) of the raw data of sections.
|
||||
file_alignment : count;
|
||||
## The major version of the required OS.
|
||||
os_version_major : count;
|
||||
## The minor version of the required OS.
|
||||
os_version_minor : count;
|
||||
## The major version of this image.
|
||||
major_image_version : count;
|
||||
## The minor version of this image.
|
||||
minor_image_version : count;
|
||||
## The major version of the subsystem required to run this file.
|
||||
major_subsys_version : count;
|
||||
## The minor version of the subsystem required to run this file.
|
||||
minor_subsys_version : count;
|
||||
## The size (in bytes) of the iamge as the image is loaded in memory.
|
||||
size_of_image : count;
|
||||
## The size (in bytes) of the headers, rounded up to file_alignment.
|
||||
size_of_headers : count;
|
||||
## The image file checksum.
|
||||
checksum : count;
|
||||
## The subsystem that's required to run this image.
|
||||
subsystem : count;
|
||||
## Bit flags that determine how to execute or load this file.
|
||||
dll_characteristics : set[count];
|
||||
## A vector with the sizes of various tables and strings that are
|
||||
## defined in the optional header data directories. Examples include
|
||||
## the import table, the resource table, and debug information.
|
||||
table_sizes : vector of count;
|
||||
|
||||
};
|
||||
|
||||
## Record for Portable Executable (PE) section headers.
|
||||
type PE::SectionHeader: record {
|
||||
## The name of the section
|
||||
name : string;
|
||||
## The total size of the section when loaded into memory.
|
||||
virtual_size : count;
|
||||
## The relative virtual address (RVA) of the section.
|
||||
virtual_addr : count;
|
||||
## The size of the initialized data for the section, as it is
|
||||
## in the file on disk.
|
||||
size_of_raw_data : count;
|
||||
## The virtual address of the initialized dat for the section,
|
||||
## as it is in the file on disk.
|
||||
ptr_to_raw_data : count;
|
||||
## The file pointer to the beginning of relocation entries for
|
||||
## the section.
|
||||
ptr_to_relocs : count;
|
||||
## The file pointer to the beginning of line-number entries for
|
||||
## the section.
|
||||
ptr_to_line_nums : count;
|
||||
## The number of relocation entries for the section.
|
||||
num_of_relocs : count;
|
||||
## The number of line-number entrie for the section.
|
||||
num_of_line_nums : count;
|
||||
## Bit-flags that describe the characteristics of the section.
|
||||
characteristics : set[count];
|
||||
};
|
||||
}
|
||||
module GLOBAL;
|
||||
|
||||
## Deprecated.
|
||||
##
|
||||
## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere
|
||||
|
@ -2635,60 +2854,6 @@ global generate_OS_version_event: set[subnet] &redef;
|
|||
# number>``), which were seen during the sample.
|
||||
type load_sample_info: set[string];
|
||||
|
||||
## ID for NetFlow header. This is primarily a means to sort together NetFlow
|
||||
## headers and flow records at the script level.
|
||||
type nfheader_id: record {
|
||||
## Name of the NetFlow file (e.g., ``netflow.dat``) or the receiving
|
||||
## socket address (e.g., ``127.0.0.1:5555``), or an explicit name if
|
||||
## specified to ``-y`` or ``-Y``.
|
||||
rcvr_id: string;
|
||||
## A serial number, ignoring any overflows.
|
||||
pdu_id: count;
|
||||
};
|
||||
|
||||
## A NetFlow v5 header.
|
||||
##
|
||||
## .. bro:see:: netflow_v5_header
|
||||
type nf_v5_header: record {
|
||||
h_id: nfheader_id; ##< ID for sorting.
|
||||
cnt: count; ##< TODO.
|
||||
sysuptime: interval; ##< Router's uptime.
|
||||
exporttime: time; ##< When the data was exported.
|
||||
flow_seq: count; ##< Sequence number.
|
||||
eng_type: count; ##< Engine type.
|
||||
eng_id: count; ##< Engine ID.
|
||||
sample_int: count; ##< Sampling interval.
|
||||
exporter: addr; ##< Exporter address.
|
||||
};
|
||||
|
||||
## A NetFlow v5 record.
|
||||
##
|
||||
## .. bro:see:: netflow_v5_record
|
||||
type nf_v5_record: record {
|
||||
h_id: nfheader_id; ##< ID for sorting.
|
||||
id: conn_id; ##< Connection ID.
|
||||
nexthop: addr; ##< Address of next hop.
|
||||
input: count; ##< Input interface.
|
||||
output: count; ##< Output interface.
|
||||
pkts: count; ##< Number of packets.
|
||||
octets: count; ##< Number of bytes.
|
||||
first: time; ##< Timestamp of first packet.
|
||||
last: time; ##< Timestamp of last packet.
|
||||
tcpflag_fin: bool; ##< FIN flag for TCP flows.
|
||||
tcpflag_syn: bool; ##< SYN flag for TCP flows.
|
||||
tcpflag_rst: bool; ##< RST flag for TCP flows.
|
||||
tcpflag_psh: bool; ##< PSH flag for TCP flows.
|
||||
tcpflag_ack: bool; ##< ACK flag for TCP flows.
|
||||
tcpflag_urg: bool; ##< URG flag for TCP flows.
|
||||
proto: count; ##< IP protocol.
|
||||
tos: count; ##< Type of service.
|
||||
src_as: count; ##< Source AS.
|
||||
dst_as: count; ##< Destination AS.
|
||||
src_mask: count; ##< Source mask.
|
||||
dst_mask: count; ##< Destination mask.
|
||||
};
|
||||
|
||||
|
||||
## A BitTorrent peer.
|
||||
##
|
||||
## .. bro:see:: bittorrent_peer_set
|
||||
|
@ -2774,19 +2939,20 @@ export {
|
|||
module X509;
|
||||
export {
|
||||
type Certificate: record {
|
||||
version: count; ##< Version number.
|
||||
serial: string; ##< Serial number.
|
||||
subject: string; ##< Subject.
|
||||
issuer: string; ##< Issuer.
|
||||
not_valid_before: time; ##< Timestamp before when certificate is not valid.
|
||||
not_valid_after: time; ##< Timestamp after when certificate is not valid.
|
||||
key_alg: string; ##< Name of the key algorithm
|
||||
sig_alg: string; ##< Name of the signature algorithm
|
||||
key_type: string &optional; ##< Key type, if key parseable by openssl (either rsa, dsa or ec)
|
||||
key_length: count &optional; ##< Key length in bits
|
||||
exponent: string &optional; ##< Exponent, if RSA-certificate
|
||||
curve: string &optional; ##< Curve, if EC-certificate
|
||||
} &log;
|
||||
version: count &log; ##< Version number.
|
||||
serial: string &log; ##< Serial number.
|
||||
subject: string &log; ##< Subject.
|
||||
issuer: string &log; ##< Issuer.
|
||||
cn: string &optional; ##< Last (most specific) common name.
|
||||
not_valid_before: time &log; ##< Timestamp before when certificate is not valid.
|
||||
not_valid_after: time &log; ##< Timestamp after when certificate is not valid.
|
||||
key_alg: string &log; ##< Name of the key algorithm
|
||||
sig_alg: string &log; ##< Name of the signature algorithm
|
||||
key_type: string &optional &log; ##< Key type, if key parseable by openssl (either rsa, dsa or ec)
|
||||
key_length: count &optional &log; ##< Key length in bits
|
||||
exponent: string &optional &log; ##< Exponent, if RSA-certificate
|
||||
curve: string &optional &log; ##< Curve, if EC-certificate
|
||||
};
|
||||
|
||||
type Extension: record {
|
||||
name: string; ##< Long name of extension. oid if name not known
|
||||
|
@ -2847,7 +3013,44 @@ export {
|
|||
attributes : RADIUS::Attributes &optional;
|
||||
};
|
||||
}
|
||||
module GLOBAL;
|
||||
|
||||
module RDP;
|
||||
export {
|
||||
type RDP::EarlyCapabilityFlags: record {
|
||||
support_err_info_pdu: bool;
|
||||
want_32bpp_session: bool;
|
||||
support_statusinfo_pdu: bool;
|
||||
strong_asymmetric_keys: bool;
|
||||
support_monitor_layout_pdu: bool;
|
||||
support_netchar_autodetect: bool;
|
||||
support_dynvc_gfx_protocol: bool;
|
||||
support_dynamic_time_zone: bool;
|
||||
support_heartbeat_pdu: bool;
|
||||
};
|
||||
|
||||
type RDP::ClientCoreData: record {
|
||||
version_major: count;
|
||||
version_minor: count;
|
||||
desktop_width: count;
|
||||
desktop_height: count;
|
||||
color_depth: count;
|
||||
sas_sequence: count;
|
||||
keyboard_layout: count;
|
||||
client_build: count;
|
||||
client_name: string;
|
||||
keyboard_type: count;
|
||||
keyboard_sub: count;
|
||||
keyboard_function_key: count;
|
||||
ime_file_name: string;
|
||||
post_beta2_color_depth: count &optional;
|
||||
client_product_id: string &optional;
|
||||
serial_number: count &optional;
|
||||
high_color_depth: count &optional;
|
||||
supported_color_depths: count &optional;
|
||||
ec_flags: RDP::EarlyCapabilityFlags &optional;
|
||||
dig_product_id: string &optional;
|
||||
};
|
||||
}
|
||||
|
||||
@load base/bif/plugins/Bro_SNMP.types.bif
|
||||
|
||||
|
@ -2971,6 +3174,186 @@ export {
|
|||
};
|
||||
}
|
||||
|
||||
@load base/bif/plugins/Bro_KRB.types.bif
|
||||
|
||||
module KRB;
|
||||
export {
|
||||
## KDC Options. See :rfc:`4120`
|
||||
type KRB::KDC_Options: record {
|
||||
## The ticket to be issued should have its forwardable flag set.
|
||||
forwardable : bool;
|
||||
## A (TGT) request for forwarding.
|
||||
forwarded : bool;
|
||||
## The ticket to be issued should have its proxiable flag set.
|
||||
proxiable : bool;
|
||||
## A request for a proxy.
|
||||
proxy : bool;
|
||||
## The ticket to be issued should have its may-postdate flag set.
|
||||
allow_postdate : bool;
|
||||
## A request for a postdated ticket.
|
||||
postdated : bool;
|
||||
## The ticket to be issued should have its renewable flag set.
|
||||
renewable : bool;
|
||||
## Reserved for opt_hardware_auth
|
||||
opt_hardware_auth : bool;
|
||||
## Request that the KDC not check the transited field of a TGT against
|
||||
## the policy of the local realm before it will issue derivative tickets
|
||||
## based on the TGT.
|
||||
disable_transited_check : bool;
|
||||
## If a ticket with the requested lifetime cannot be issued, a renewable
|
||||
## ticket is acceptable
|
||||
renewable_ok : bool;
|
||||
## The ticket for the end server is to be encrypted in the session key
|
||||
## from the additional TGT provided
|
||||
enc_tkt_in_skey : bool;
|
||||
## The request is for a renewal
|
||||
renew : bool;
|
||||
## The request is to validate a postdated ticket.
|
||||
validate : bool;
|
||||
};
|
||||
|
||||
## AP Options. See :rfc:`4120`
|
||||
type KRB::AP_Options: record {
|
||||
## Indicates that user-to-user-authentication is in use
|
||||
use_session_key : bool;
|
||||
## Mutual authentication is required
|
||||
mutual_required : bool;
|
||||
};
|
||||
|
||||
## Used in a few places in the Kerberos analyzer for elements
|
||||
## that have a type and a string value.
|
||||
type KRB::Type_Value: record {
|
||||
## The data type
|
||||
data_type : count;
|
||||
## The data value
|
||||
val : string;
|
||||
};
|
||||
|
||||
type KRB::Type_Value_Vector: vector of KRB::Type_Value;
|
||||
|
||||
## A Kerberos host address See :rfc:`4120`.
|
||||
type KRB::Host_Address: record {
|
||||
## IPv4 or IPv6 address
|
||||
ip : addr &log &optional;
|
||||
## NetBIOS address
|
||||
netbios : string &log &optional;
|
||||
## Some other type that we don't support yet
|
||||
unknown : KRB::Type_Value &optional;
|
||||
};
|
||||
|
||||
type KRB::Host_Address_Vector: vector of KRB::Host_Address;
|
||||
|
||||
## The data from the SAFE message. See :rfc:`4120`.
|
||||
type KRB::SAFE_Msg: record {
|
||||
## Protocol version number (5 for KRB5)
|
||||
pvno : count;
|
||||
## The message type (20 for SAFE_MSG)
|
||||
msg_type : count;
|
||||
## The application-specific data that is being passed
|
||||
## from the sender to the reciever
|
||||
data : string;
|
||||
## Current time from the sender of the message
|
||||
timestamp : time &optional;
|
||||
## Sequence number used to detect replays
|
||||
seq : count &optional;
|
||||
## Sender address
|
||||
sender : Host_Address &optional;
|
||||
## Recipient address
|
||||
recipient : Host_Address &optional;
|
||||
};
|
||||
|
||||
## The data from the ERROR_MSG message. See :rfc:`4120`.
|
||||
type KRB::Error_Msg: record {
|
||||
## Protocol version number (5 for KRB5)
|
||||
pvno : count;
|
||||
## The message type (30 for ERROR_MSG)
|
||||
msg_type : count;
|
||||
## Current time on the client
|
||||
client_time : time &optional;
|
||||
## Current time on the server
|
||||
server_time : time;
|
||||
## The specific error code
|
||||
error_code : count;
|
||||
## Realm of the ticket
|
||||
client_realm : string &optional;
|
||||
## Name on the ticket
|
||||
client_name : string &optional;
|
||||
## Realm of the service
|
||||
service_realm : string;
|
||||
## Name of the service
|
||||
service_name : string;
|
||||
## Additional text to explain the error
|
||||
error_text : string &optional;
|
||||
## Optional pre-authentication data
|
||||
pa_data : vector of KRB::Type_Value &optional;
|
||||
};
|
||||
|
||||
## A Kerberos ticket. See :rfc:`4120`.
|
||||
type KRB::Ticket: record {
|
||||
## Protocol version number (5 for KRB5)
|
||||
pvno : count;
|
||||
## Realm
|
||||
realm : string;
|
||||
## Name of the service
|
||||
service_name : string;
|
||||
## Cipher the ticket was encrypted with
|
||||
cipher : count;
|
||||
};
|
||||
|
||||
type KRB::Ticket_Vector: vector of KRB::Ticket;
|
||||
|
||||
## The data from the AS_REQ and TGS_REQ messages. See :rfc:`4120`.
|
||||
type KRB::KDC_Request: record {
|
||||
## Protocol version number (5 for KRB5)
|
||||
pvno : count;
|
||||
## The message type (10 for AS_REQ, 12 for TGS_REQ)
|
||||
msg_type : count;
|
||||
## Optional pre-authentication data
|
||||
pa_data : vector of KRB::Type_Value &optional;
|
||||
## Options specified in the request
|
||||
kdc_options : KRB::KDC_Options;
|
||||
## Name on the ticket
|
||||
client_name : string &optional;
|
||||
|
||||
## Realm of the service
|
||||
service_realm : string;
|
||||
## Name of the service
|
||||
service_name : string &optional;
|
||||
## Time the ticket is good from
|
||||
from : time &optional;
|
||||
## Time the ticket is good till
|
||||
till : time;
|
||||
## The requested renew-till time
|
||||
rtime : time &optional;
|
||||
|
||||
## A random nonce generated by the client
|
||||
nonce : count;
|
||||
## The desired encryption algorithms, in order of preference
|
||||
encryption_types : vector of count;
|
||||
## Any additional addresses the ticket should be valid for
|
||||
host_addrs : vector of KRB::Host_Address &optional;
|
||||
## Additional tickets may be included for certain transactions
|
||||
additional_tickets : vector of KRB::Ticket &optional;
|
||||
};
|
||||
|
||||
## The data from the AS_REQ and TGS_REQ messages. See :rfc:`4120`.
|
||||
type KRB::KDC_Response: record {
|
||||
## Protocol version number (5 for KRB5)
|
||||
pvno : count;
|
||||
## The message type (11 for AS_REP, 13 for TGS_REP)
|
||||
msg_type : count;
|
||||
## Optional pre-authentication data
|
||||
pa_data : vector of KRB::Type_Value &optional;
|
||||
## Realm on the ticket
|
||||
client_realm : string &optional;
|
||||
## Name on the service
|
||||
client_name : string;
|
||||
|
||||
## The ticket that was issued
|
||||
ticket : KRB::Ticket;
|
||||
};
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
@load base/bif/event.bif
|
||||
|
@ -3133,6 +3516,11 @@ const forward_remote_events = F &redef;
|
|||
## more sophisticated script-level communication framework.
|
||||
const forward_remote_state_changes = F &redef;
|
||||
|
||||
## The number of IO chunks allowed to be buffered between the child
|
||||
## and parent process of remote communication before Bro starts dropping
|
||||
## connections to remote peers in an attempt to catch up.
|
||||
const chunked_io_buffer_soft_cap = 800000 &redef;
|
||||
|
||||
## Place-holder constant indicating "no peer".
|
||||
const PEER_ID_NONE = 0;
|
||||
|
||||
|
@ -3293,20 +3681,11 @@ export {
|
|||
## Toggle whether to do GRE decapsulation.
|
||||
const enable_gre = T &redef;
|
||||
|
||||
## With this option set, the Teredo analysis will first check to see if
|
||||
## other protocol analyzers have confirmed that they think they're
|
||||
## parsing the right protocol and only continue with Teredo tunnel
|
||||
## decapsulation if nothing else has yet confirmed. This can help
|
||||
## reduce false positives of UDP traffic (e.g. DNS) that also happens
|
||||
## to have a valid Teredo encapsulation.
|
||||
const yielding_teredo_decapsulation = T &redef;
|
||||
|
||||
## With this set, the Teredo analyzer waits until it sees both sides
|
||||
## of a connection using a valid Teredo encapsulation before issuing
|
||||
## a :bro:see:`protocol_confirmation`. If it's false, the first
|
||||
## occurrence of a packet with valid Teredo encapsulation causes a
|
||||
## confirmation. Both cases are still subject to effects of
|
||||
## :bro:see:`Tunnel::yielding_teredo_decapsulation`.
|
||||
## confirmation.
|
||||
const delay_teredo_confirmation = T &redef;
|
||||
|
||||
## With this set, the GTP analyzer waits until the most-recent upflow
|
||||
|
@ -3322,7 +3701,6 @@ export {
|
|||
## (includes GRE tunnels).
|
||||
const ip_tunnel_timeout = 24hrs &redef;
|
||||
} # end export
|
||||
module GLOBAL;
|
||||
|
||||
module Reporter;
|
||||
export {
|
||||
|
@ -3341,10 +3719,18 @@ export {
|
|||
## external harness and shouldn't output anything to the console.
|
||||
const errors_to_stderr = T &redef;
|
||||
}
|
||||
module GLOBAL;
|
||||
|
||||
## Number of bytes per packet to capture from live interfaces.
|
||||
const snaplen = 8192 &redef;
|
||||
module Pcap;
|
||||
export {
|
||||
## Number of bytes per packet to capture from live interfaces.
|
||||
const snaplen = 8192 &redef;
|
||||
|
||||
## Number of Mbytes to provide as buffer space when capturing from live
|
||||
## interfaces.
|
||||
const bufsize = 128 &redef;
|
||||
} # end export
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
## Seed for hashes computed internally for probabilistic data structures. Using
|
||||
## the same value here will make the hashes compatible between independent Bro
|
||||
|
@ -3358,6 +3744,7 @@ const bits_per_uid: count = 96 &redef;
|
|||
|
||||
# Load these frameworks here because they use fairly deep integration with
|
||||
# BiFs and script-land defined types.
|
||||
@load base/frameworks/broker
|
||||
@load base/frameworks/logging
|
||||
@load base/frameworks/input
|
||||
@load base/frameworks/analyzer
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
@load base/frameworks/reporter
|
||||
@load base/frameworks/sumstats
|
||||
@load base/frameworks/tunnels
|
||||
@load base/frameworks/openflow
|
||||
@load base/frameworks/netcontrol
|
||||
|
||||
@load base/protocols/conn
|
||||
@load base/protocols/dhcp
|
||||
|
@ -45,10 +47,13 @@
|
|||
@load base/protocols/ftp
|
||||
@load base/protocols/http
|
||||
@load base/protocols/irc
|
||||
@load base/protocols/krb
|
||||
@load base/protocols/modbus
|
||||
@load base/protocols/mysql
|
||||
@load base/protocols/pop3
|
||||
@load base/protocols/radius
|
||||
@load base/protocols/rdp
|
||||
@load base/protocols/sip
|
||||
@load base/protocols/snmp
|
||||
@load base/protocols/smtp
|
||||
@load base/protocols/socks
|
||||
|
@ -57,6 +62,7 @@
|
|||
@load base/protocols/syslog
|
||||
@load base/protocols/tunnels
|
||||
|
||||
@load base/files/pe
|
||||
@load base/files/hash
|
||||
@load base/files/extract
|
||||
@load base/files/unified2
|
||||
|
|
|
@ -50,7 +50,7 @@ event ChecksumOffloading::check()
|
|||
bad_checksum_msg += "UDP";
|
||||
}
|
||||
|
||||
local message = fmt("Your %s invalid %s checksums, most likely from NIC checksum offloading.", packet_src, bad_checksum_msg);
|
||||
local message = fmt("Your %s invalid %s checksums, most likely from NIC checksum offloading. By default, packets with invalid checksums are discarded by Bro unless using the -C command-line option or toggling the 'ignore_checksums' variable. Alternatively, disable checksum offloading by the network adapter to ensure Bro analyzes the actual checksums that are transmitted.", packet_src, bad_checksum_msg);
|
||||
Reporter::warning(message);
|
||||
done = T;
|
||||
}
|
||||
|
|
|
@ -2,3 +2,4 @@
|
|||
@load ./contents
|
||||
@load ./inactivity
|
||||
@load ./polling
|
||||
@load ./thresholds
|
||||
|
|
|
@ -47,7 +47,7 @@ export {
|
|||
## S2 Connection established and close attempt by originator seen (but no reply from responder).
|
||||
## S3 Connection established and close attempt by responder seen (but no reply from originator).
|
||||
## RSTO Connection established, originator aborted (sent a RST).
|
||||
## RSTR Established, responder aborted.
|
||||
## RSTR Responder sent a RST.
|
||||
## RSTOS0 Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder.
|
||||
## RSTRH Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator.
|
||||
## SH Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was "half" open).
|
||||
|
@ -87,7 +87,8 @@ export {
|
|||
## f packet with FIN bit set
|
||||
## r packet with RST bit set
|
||||
## c packet with a bad checksum
|
||||
## i inconsistent packet (e.g. SYN+RST bits both set)
|
||||
## i inconsistent packet (e.g. FIN+RST bits set)
|
||||
## q multi-flag packet (SYN+FIN or SYN+RST bits set)
|
||||
## ====== ====================================================
|
||||
##
|
||||
## If the event comes from the originator, the letter is in
|
||||
|
@ -127,7 +128,7 @@ redef record connection += {
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Conn::LOG, [$columns=Info, $ev=log_conn]);
|
||||
Log::create_stream(Conn::LOG, [$columns=Info, $ev=log_conn, $path="conn"]);
|
||||
}
|
||||
|
||||
function conn_state(c: connection, trans: transport_proto): string
|
||||
|
|
256
scripts/base/protocols/conn/thresholds.bro
Normal file
256
scripts/base/protocols/conn/thresholds.bro
Normal file
|
@ -0,0 +1,256 @@
|
|||
##! Implements a generic API to throw events when a connection crosses a
|
||||
##! fixed threshold of bytes or packets.
|
||||
|
||||
module ConnThreshold;
|
||||
|
||||
export {
|
||||
|
||||
type Thresholds: record {
|
||||
orig_byte: set[count] &default=count_set(); ##< current originator byte thresholds we watch for
|
||||
resp_byte: set[count] &default=count_set(); ##< current responder byte thresholds we watch for
|
||||
orig_packet: set[count] &default=count_set(); ##< corrent originator packet thresholds we watch for
|
||||
resp_packet: set[count] &default=count_set(); ##< corrent responder packet thresholds we watch for
|
||||
};
|
||||
|
||||
## Sets a byte threshold for connection sizes, adding it to potentially already existing thresholds.
|
||||
## conn_bytes_threshold_crossed will be raised for each set threshold.
|
||||
##
|
||||
## cid: The connection id.
|
||||
##
|
||||
## threshold: Threshold in bytes.
|
||||
##
|
||||
## is_orig: If true, threshold is set for bytes from originator, otherwise for bytes from responder.
|
||||
##
|
||||
## Returns: T on success, F on failure.
|
||||
global set_bytes_threshold: function(c: connection, threshold: count, is_orig: bool): bool;
|
||||
|
||||
## Sets a packet threshold for connection sizes, adding it to potentially already existing thresholds.
|
||||
## conn_packets_threshold_crossed will be raised for each set threshold.
|
||||
##
|
||||
## cid: The connection id.
|
||||
##
|
||||
## threshold: Threshold in packets.
|
||||
##
|
||||
## is_orig: If true, threshold is set for packets from originator, otherwise for packets from responder.
|
||||
##
|
||||
## Returns: T on success, F on failure.
|
||||
global set_packets_threshold: function(c: connection, threshold: count, is_orig: bool): bool;
|
||||
|
||||
## Deletes a byte threshold for connection sizes.
|
||||
##
|
||||
## cid: The connection id.
|
||||
##
|
||||
## threshold: Threshold in bytes to remove.
|
||||
##
|
||||
## is_orig: If true, threshold is removed for packets from originator, otherwhise for packets from responder.
|
||||
##
|
||||
## Returns: T on success, F on failure.
|
||||
global delete_bytes_threshold: function(c: connection, threshold: count, is_orig: bool): bool;
|
||||
|
||||
## Deletes a packet threshold for connection sizes.
|
||||
##
|
||||
## cid: The connection id.
|
||||
##
|
||||
## threshold: Threshold in packets.
|
||||
##
|
||||
## is_orig: If true, threshold is removed for packets from originator, otherwise for packets from responder.
|
||||
##
|
||||
## Returns: T on success, F on failure.
|
||||
global delete_packets_threshold: function(c: connection, threshold: count, is_orig: bool): bool;
|
||||
|
||||
## Generated for a connection that crossed a set byte threshold
|
||||
##
|
||||
## c: the connection
|
||||
##
|
||||
## threshold: the threshold that was set
|
||||
##
|
||||
## is_orig: True if the threshold was crossed by the originator of the connection
|
||||
global bytes_threshold_crossed: event(c: connection, threshold: count, is_orig: bool);
|
||||
|
||||
## Generated for a connection that crossed a set byte threshold
|
||||
##
|
||||
## c: the connection
|
||||
##
|
||||
## threshold: the threshold that was set
|
||||
##
|
||||
## is_orig: True if the threshold was crossed by the originator of the connection
|
||||
global packets_threshold_crossed: event(c: connection, threshold: count, is_orig: bool);
|
||||
}
|
||||
|
||||
redef record connection += {
|
||||
thresholds: ConnThreshold::Thresholds &optional;
|
||||
};
|
||||
|
||||
function set_conn(c: connection)
|
||||
{
|
||||
if ( c?$thresholds )
|
||||
return;
|
||||
|
||||
c$thresholds = Thresholds();
|
||||
}
|
||||
|
||||
function find_min_threshold(t: set[count]): count
|
||||
{
|
||||
if ( |t| == 0 )
|
||||
return 0;
|
||||
|
||||
local first = T;
|
||||
local min: count = 0;
|
||||
|
||||
for ( i in t )
|
||||
{
|
||||
if ( first )
|
||||
{
|
||||
min = i;
|
||||
first = F;
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( i < min )
|
||||
min = i;
|
||||
}
|
||||
}
|
||||
|
||||
return min;
|
||||
}
|
||||
|
||||
function set_current_threshold(c: connection, bytes: bool, is_orig: bool): bool
|
||||
{
|
||||
local t: count = 0;
|
||||
local cur: count = 0;
|
||||
|
||||
if ( bytes && is_orig )
|
||||
{
|
||||
t = find_min_threshold(c$thresholds$orig_byte);
|
||||
cur = get_current_conn_bytes_threshold(c$id, is_orig);
|
||||
}
|
||||
else if ( bytes && ! is_orig )
|
||||
{
|
||||
t = find_min_threshold(c$thresholds$resp_byte);
|
||||
cur = get_current_conn_bytes_threshold(c$id, is_orig);
|
||||
}
|
||||
else if ( ! bytes && is_orig )
|
||||
{
|
||||
t = find_min_threshold(c$thresholds$orig_packet);
|
||||
cur = get_current_conn_packets_threshold(c$id, is_orig);
|
||||
}
|
||||
else if ( ! bytes && ! is_orig )
|
||||
{
|
||||
t = find_min_threshold(c$thresholds$resp_packet);
|
||||
cur = get_current_conn_packets_threshold(c$id, is_orig);
|
||||
}
|
||||
|
||||
if ( t == cur )
|
||||
return T;
|
||||
|
||||
if ( bytes && is_orig )
|
||||
return set_current_conn_bytes_threshold(c$id, t, T);
|
||||
else if ( bytes && ! is_orig )
|
||||
return set_current_conn_bytes_threshold(c$id, t, F);
|
||||
else if ( ! bytes && is_orig )
|
||||
return set_current_conn_packets_threshold(c$id, t, T);
|
||||
else if ( ! bytes && ! is_orig )
|
||||
return set_current_conn_packets_threshold(c$id, t, F);
|
||||
}
|
||||
|
||||
function set_bytes_threshold(c: connection, threshold: count, is_orig: bool): bool
|
||||
{
|
||||
set_conn(c);
|
||||
|
||||
if ( threshold == 0 )
|
||||
return F;
|
||||
|
||||
if ( is_orig )
|
||||
add c$thresholds$orig_byte[threshold];
|
||||
else
|
||||
add c$thresholds$resp_byte[threshold];
|
||||
|
||||
return set_current_threshold(c, T, is_orig);
|
||||
}
|
||||
|
||||
function set_packets_threshold(c: connection, threshold: count, is_orig: bool): bool
|
||||
{
|
||||
set_conn(c);
|
||||
|
||||
if ( threshold == 0 )
|
||||
return F;
|
||||
|
||||
if ( is_orig )
|
||||
add c$thresholds$orig_packet[threshold];
|
||||
else
|
||||
add c$thresholds$resp_packet[threshold];
|
||||
|
||||
return set_current_threshold(c, F, is_orig);
|
||||
}
|
||||
|
||||
function delete_bytes_threshold(c: connection, threshold: count, is_orig: bool): bool
|
||||
{
|
||||
set_conn(c);
|
||||
|
||||
if ( is_orig && threshold in c$thresholds$orig_byte )
|
||||
{
|
||||
delete c$thresholds$orig_byte[threshold];
|
||||
set_current_threshold(c, T, is_orig);
|
||||
return T;
|
||||
}
|
||||
else if ( ! is_orig && threshold in c$thresholds$resp_byte )
|
||||
{
|
||||
delete c$thresholds$resp_byte[threshold];
|
||||
set_current_threshold(c, T, is_orig);
|
||||
return T;
|
||||
}
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
function delete_packets_threshold(c: connection, threshold: count, is_orig: bool): bool
|
||||
{
|
||||
set_conn(c);
|
||||
|
||||
if ( is_orig && threshold in c$thresholds$orig_packet )
|
||||
{
|
||||
delete c$thresholds$orig_packet[threshold];
|
||||
set_current_threshold(c, F, is_orig);
|
||||
return T;
|
||||
}
|
||||
else if ( ! is_orig && threshold in c$thresholds$resp_packet )
|
||||
{
|
||||
delete c$thresholds$resp_packet[threshold];
|
||||
set_current_threshold(c, F, is_orig);
|
||||
return T;
|
||||
}
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
event conn_bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool) &priority=5
|
||||
{
|
||||
if ( is_orig && threshold in c$thresholds$orig_byte )
|
||||
{
|
||||
delete c$thresholds$orig_byte[threshold];
|
||||
event ConnThreshold::bytes_threshold_crossed(c, threshold, is_orig);
|
||||
}
|
||||
else if ( ! is_orig && threshold in c$thresholds$resp_byte )
|
||||
{
|
||||
delete c$thresholds$resp_byte[threshold];
|
||||
event ConnThreshold::bytes_threshold_crossed(c, threshold, is_orig);
|
||||
}
|
||||
|
||||
set_current_threshold(c, T, is_orig);
|
||||
}
|
||||
|
||||
event conn_packets_threshold_crossed(c: connection, threshold: count, is_orig: bool) &priority=5
|
||||
{
|
||||
if ( is_orig && threshold in c$thresholds$orig_packet )
|
||||
{
|
||||
delete c$thresholds$orig_packet[threshold];
|
||||
event ConnThreshold::packets_threshold_crossed(c, threshold, is_orig);
|
||||
}
|
||||
else if ( ! is_orig && threshold in c$thresholds$resp_packet )
|
||||
{
|
||||
delete c$thresholds$resp_packet[threshold];
|
||||
event ConnThreshold::packets_threshold_crossed(c, threshold, is_orig);
|
||||
}
|
||||
|
||||
set_current_threshold(c, F, is_orig);
|
||||
}
|
|
@ -49,7 +49,7 @@ redef likely_server_ports += { 67/udp };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp]);
|
||||
Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp, $path="dhcp"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports);
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ redef likely_server_ports += { ports };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(DNP3::LOG, [$columns=Info, $ev=log_dnp3]);
|
||||
Log::create_stream(DNP3::LOG, [$columns=Info, $ev=log_dnp3, $path="dnp3"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_DNP3_TCP, ports);
|
||||
}
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ redef likely_server_ports += { ports };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(DNS::LOG, [$columns=Info, $ev=log_dns]);
|
||||
Log::create_stream(DNS::LOG, [$columns=Info, $ev=log_dns, $path="dns"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_DNS, ports);
|
||||
}
|
||||
|
||||
|
@ -305,6 +305,9 @@ hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
|
|||
|
||||
if ( ans$answer_type == DNS_ANS )
|
||||
{
|
||||
if ( ! c$dns?$query )
|
||||
c$dns$query = ans$query;
|
||||
|
||||
c$dns$AA = msg$AA;
|
||||
c$dns$RA = msg$RA;
|
||||
|
||||
|
|
|
@ -63,10 +63,13 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
|
|||
f$ftp = ftp;
|
||||
}
|
||||
|
||||
event file_mime_type(f: fa_file, mime_type: string) &priority=5
|
||||
event file_sniff(f: fa_file, meta: fa_metadata) &priority=5
|
||||
{
|
||||
if ( ! f?$ftp )
|
||||
return;
|
||||
|
||||
f$ftp$mime_type = mime_type;
|
||||
if ( ! meta?$mime_type )
|
||||
return;
|
||||
|
||||
f$ftp$mime_type = meta$mime_type;
|
||||
}
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
##! GridFTP data channels are identified by a heuristic that relies on
|
||||
##! the fact that default settings for GridFTP clients typically
|
||||
##! mutually authenticate the data channel with TLS/SSL and negotiate a
|
||||
##! NULL bulk cipher (no encryption). Connections with those
|
||||
##! attributes are then polled for two minutes with decreasing frequency
|
||||
##! to check if the transfer sizes are large enough to indicate a
|
||||
##! GridFTP data channel that would be undesirable to analyze further
|
||||
##! (e.g. stop TCP reassembly). A side effect is that true connection
|
||||
##! sizes are not logged, but at the benefit of saving CPU cycles that
|
||||
##! would otherwise go to analyzing the large (and likely benign) connections.
|
||||
##! NULL bulk cipher (no encryption). Connections with those attributes
|
||||
##! are marked as GridFTP if the data transfer within the first two minutes
|
||||
##! is big enough to indicate a GripFTP data channel that would be
|
||||
##! undesirable to analyze further (e.g. stop TCP reassembly). A side
|
||||
##! effect is that true connection sizes are not logged, but at the benefit
|
||||
##! of saving CPU cycles that would otherwise go to analyzing the large
|
||||
##! (and likely benign) connections.
|
||||
|
||||
@load ./info
|
||||
@load ./main
|
||||
|
@ -32,23 +32,14 @@ export {
|
|||
## GridFTP data channel.
|
||||
const size_threshold = 1073741824 &redef;
|
||||
|
||||
## Max number of times to check whether a connection's size exceeds the
|
||||
## Time during which we check whether a connection's size exceeds the
|
||||
## :bro:see:`GridFTP::size_threshold`.
|
||||
const max_poll_count = 15 &redef;
|
||||
const max_time = 2 min &redef;
|
||||
|
||||
## Whether to skip further processing of the GridFTP data channel once
|
||||
## detected, which may help performance.
|
||||
const skip_data = T &redef;
|
||||
|
||||
## Base amount of time between checking whether a GridFTP data connection
|
||||
## has transferred more than :bro:see:`GridFTP::size_threshold` bytes.
|
||||
const poll_interval = 1sec &redef;
|
||||
|
||||
## The amount of time the base :bro:see:`GridFTP::poll_interval` is
|
||||
## increased by each poll interval. Can be used to make more frequent
|
||||
## checks at the start of a connection and gradually slow down.
|
||||
const poll_interval_increase = 1sec &redef;
|
||||
|
||||
## Raised when a GridFTP data channel is detected.
|
||||
##
|
||||
## c: The connection pertaining to the GridFTP data channel.
|
||||
|
@ -79,23 +70,27 @@ event ftp_request(c: connection, command: string, arg: string) &priority=4
|
|||
c$ftp$last_auth_requested = arg;
|
||||
}
|
||||
|
||||
function size_callback(c: connection, cnt: count): interval
|
||||
event ConnThreshold::bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool)
|
||||
{
|
||||
if ( c$orig$size > size_threshold || c$resp$size > size_threshold )
|
||||
if ( threshold < size_threshold || "gridftp-data" in c$service || c$duration > max_time )
|
||||
return;
|
||||
|
||||
add c$service["gridftp-data"];
|
||||
event GridFTP::data_channel_detected(c);
|
||||
|
||||
if ( skip_data )
|
||||
skip_further_processing(c$id);
|
||||
}
|
||||
|
||||
event gridftp_possibility_timeout(c: connection)
|
||||
{
|
||||
# only remove if we did not already detect it and the connection
|
||||
# is not yet at its end.
|
||||
if ( "gridftp-data" !in c$service && ! (c?$conn && c$conn?$service) )
|
||||
{
|
||||
add c$service["gridftp-data"];
|
||||
event GridFTP::data_channel_detected(c);
|
||||
|
||||
if ( skip_data )
|
||||
skip_further_processing(c$id);
|
||||
|
||||
return -1sec;
|
||||
ConnThreshold::delete_bytes_threshold(c, size_threshold, T);
|
||||
ConnThreshold::delete_bytes_threshold(c, size_threshold, F);
|
||||
}
|
||||
|
||||
if ( cnt >= max_poll_count )
|
||||
return -1sec;
|
||||
|
||||
return poll_interval + poll_interval_increase * cnt;
|
||||
}
|
||||
|
||||
event ssl_established(c: connection) &priority=5
|
||||
|
@ -118,5 +113,9 @@ event ssl_established(c: connection) &priority=-3
|
|||
# By default GridFTP data channels do mutual authentication and
|
||||
# negotiate a cipher suite with a NULL bulk cipher.
|
||||
if ( data_channel_initial_criteria(c) )
|
||||
ConnPolling::watch(c, size_callback, 0, 0secs);
|
||||
{
|
||||
ConnThreshold::set_bytes_threshold(c, size_threshold, T);
|
||||
ConnThreshold::set_bytes_threshold(c, size_threshold, F);
|
||||
schedule max_time { gridftp_possibility_timeout(c) };
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ redef likely_server_ports += { ports };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(FTP::LOG, [$columns=Info, $ev=log_ftp]);
|
||||
Log::create_stream(FTP::LOG, [$columns=Info, $ev=log_ftp, $path="ftp"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_FTP, ports);
|
||||
}
|
||||
|
||||
|
@ -213,7 +213,7 @@ event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &prior
|
|||
# on a different file could be checked, but the file size will
|
||||
# be overwritten by the server response to the RETR command
|
||||
# if that's given as well which would be more correct.
|
||||
c$ftp$file_size = extract_count(msg);
|
||||
c$ftp$file_size = extract_count(msg, F);
|
||||
}
|
||||
|
||||
# PASV and EPSV processing
|
||||
|
|
|
@ -43,7 +43,7 @@ export {
|
|||
|
||||
event http_begin_entity(c: connection, is_orig: bool) &priority=10
|
||||
{
|
||||
set_state(c, F, is_orig);
|
||||
set_state(c, is_orig);
|
||||
|
||||
if ( is_orig )
|
||||
++c$http$orig_mime_depth;
|
||||
|
@ -93,24 +93,27 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
|
|||
}
|
||||
}
|
||||
|
||||
event file_mime_type(f: fa_file, mime_type: string) &priority=5
|
||||
event file_sniff(f: fa_file, meta: fa_metadata) &priority=5
|
||||
{
|
||||
if ( ! f?$http || ! f?$is_orig )
|
||||
return;
|
||||
|
||||
if ( ! meta?$mime_type )
|
||||
return;
|
||||
|
||||
if ( f$is_orig )
|
||||
{
|
||||
if ( ! f$http?$orig_mime_types )
|
||||
f$http$orig_mime_types = string_vec(mime_type);
|
||||
f$http$orig_mime_types = string_vec(meta$mime_type);
|
||||
else
|
||||
f$http$orig_mime_types[|f$http$orig_mime_types|] = mime_type;
|
||||
f$http$orig_mime_types[|f$http$orig_mime_types|] = meta$mime_type;
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( ! f$http?$resp_mime_types )
|
||||
f$http$resp_mime_types = string_vec(mime_type);
|
||||
f$http$resp_mime_types = string_vec(meta$mime_type);
|
||||
else
|
||||
f$http$resp_mime_types[|f$http$resp_mime_types|] = mime_type;
|
||||
f$http$resp_mime_types[|f$http$resp_mime_types|] = meta$mime_type;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,8 @@ export {
|
|||
## misspelled like the standard declares, but the name used here
|
||||
## is "referrer" spelled correctly.
|
||||
referrer: string &log &optional;
|
||||
## Value of the version portion of the request.
|
||||
version: string &log &optional;
|
||||
## Value of the User-Agent header from the client.
|
||||
user_agent: string &log &optional;
|
||||
## Actual uncompressed content size of the data transferred from
|
||||
|
@ -89,6 +91,10 @@ export {
|
|||
current_request: count &default=0;
|
||||
## Current response in the pending queue.
|
||||
current_response: count &default=0;
|
||||
## Track the current deepest transaction.
|
||||
## This is meant to cope with missing requests
|
||||
## and responses.
|
||||
trans_depth: count &default=0;
|
||||
};
|
||||
|
||||
## A list of HTTP headers typically used to indicate proxied requests.
|
||||
|
@ -135,7 +141,7 @@ redef likely_server_ports += { ports };
|
|||
# Initialize the HTTP logging stream and ports.
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(HTTP::LOG, [$columns=Info, $ev=log_http]);
|
||||
Log::create_stream(HTTP::LOG, [$columns=Info, $ev=log_http, $path="http"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_HTTP, ports);
|
||||
}
|
||||
|
||||
|
@ -150,13 +156,11 @@ function new_http_session(c: connection): Info
|
|||
tmp$ts=network_time();
|
||||
tmp$uid=c$uid;
|
||||
tmp$id=c$id;
|
||||
# $current_request is set prior to the Info record creation so we
|
||||
# can use the value directly here.
|
||||
tmp$trans_depth = c$http_state$current_request;
|
||||
tmp$trans_depth = ++c$http_state$trans_depth;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function set_state(c: connection, request: bool, is_orig: bool)
|
||||
function set_state(c: connection, is_orig: bool)
|
||||
{
|
||||
if ( ! c?$http_state )
|
||||
{
|
||||
|
@ -165,15 +169,20 @@ function set_state(c: connection, request: bool, is_orig: bool)
|
|||
}
|
||||
|
||||
# These deal with new requests and responses.
|
||||
if ( request || c$http_state$current_request !in c$http_state$pending )
|
||||
c$http_state$pending[c$http_state$current_request] = new_http_session(c);
|
||||
if ( ! is_orig && c$http_state$current_response !in c$http_state$pending )
|
||||
c$http_state$pending[c$http_state$current_response] = new_http_session(c);
|
||||
|
||||
if ( is_orig )
|
||||
{
|
||||
if ( c$http_state$current_request !in c$http_state$pending )
|
||||
c$http_state$pending[c$http_state$current_request] = new_http_session(c);
|
||||
|
||||
c$http = c$http_state$pending[c$http_state$current_request];
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( c$http_state$current_response !in c$http_state$pending )
|
||||
c$http_state$pending[c$http_state$current_response] = new_http_session(c);
|
||||
|
||||
c$http = c$http_state$pending[c$http_state$current_response];
|
||||
}
|
||||
}
|
||||
|
||||
event http_request(c: connection, method: string, original_URI: string,
|
||||
|
@ -186,7 +195,7 @@ event http_request(c: connection, method: string, original_URI: string,
|
|||
}
|
||||
|
||||
++c$http_state$current_request;
|
||||
set_state(c, T, T);
|
||||
set_state(c, T);
|
||||
|
||||
c$http$method = method;
|
||||
c$http$uri = unescaped_URI;
|
||||
|
@ -208,11 +217,15 @@ event http_reply(c: connection, version: string, code: count, reason: string) &p
|
|||
if ( c$http_state$current_response !in c$http_state$pending ||
|
||||
(c$http_state$pending[c$http_state$current_response]?$status_code &&
|
||||
! code_in_range(c$http_state$pending[c$http_state$current_response]$status_code, 100, 199)) )
|
||||
{
|
||||
++c$http_state$current_response;
|
||||
set_state(c, F, F);
|
||||
}
|
||||
set_state(c, F);
|
||||
|
||||
c$http$status_code = code;
|
||||
c$http$status_msg = reason;
|
||||
c$http$version = version;
|
||||
|
||||
if ( code_in_range(code, 100, 199) )
|
||||
{
|
||||
c$http$info_code = code;
|
||||
|
@ -233,7 +246,7 @@ event http_reply(c: connection, version: string, code: count, reason: string) &p
|
|||
|
||||
event http_header(c: connection, is_orig: bool, name: string, value: string) &priority=5
|
||||
{
|
||||
set_state(c, F, is_orig);
|
||||
set_state(c, is_orig);
|
||||
|
||||
if ( is_orig ) # client headers
|
||||
{
|
||||
|
@ -257,11 +270,11 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
|
|||
add c$http$proxied[fmt("%s -> %s", name, value)];
|
||||
}
|
||||
|
||||
else if ( name == "AUTHORIZATION" )
|
||||
else if ( name == "AUTHORIZATION" || name == "PROXY-AUTHORIZATION" )
|
||||
{
|
||||
if ( /^[bB][aA][sS][iI][cC] / in value )
|
||||
{
|
||||
local userpass = decode_base64(sub(value, /[bB][aA][sS][iI][cC][[:blank:]]/, ""));
|
||||
local userpass = decode_base64_conn(c$id, sub(value, /[bB][aA][sS][iI][cC][[:blank:]]/, ""));
|
||||
local up = split_string(userpass, /:/);
|
||||
if ( |up| >= 2 )
|
||||
{
|
||||
|
@ -278,12 +291,11 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority = 5
|
||||
{
|
||||
set_state(c, F, is_orig);
|
||||
set_state(c, is_orig);
|
||||
|
||||
if ( is_orig )
|
||||
c$http$request_body_len = stat$body_length;
|
||||
|
|
|
@ -42,8 +42,8 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
|
|||
f$irc = irc;
|
||||
}
|
||||
|
||||
event file_mime_type(f: fa_file, mime_type: string) &priority=5
|
||||
event file_sniff(f: fa_file, meta: fa_metadata) &priority=5
|
||||
{
|
||||
if ( f?$irc )
|
||||
f$irc$dcc_mime_type = mime_type;
|
||||
}
|
||||
if ( f?$irc && meta?$mime_type )
|
||||
f$irc$dcc_mime_type = meta$mime_type;
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ redef likely_server_ports += { ports };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(IRC::LOG, [$columns=Info, $ev=irc_log]);
|
||||
Log::create_stream(IRC::LOG, [$columns=Info, $ev=irc_log, $path="irc"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_IRC, ports);
|
||||
}
|
||||
|
||||
|
|
1
scripts/base/protocols/krb/README
Normal file
1
scripts/base/protocols/krb/README
Normal file
|
@ -0,0 +1 @@
|
|||
Support for Kerberos protocol analysis.
|
3
scripts/base/protocols/krb/__load__.bro
Normal file
3
scripts/base/protocols/krb/__load__.bro
Normal file
|
@ -0,0 +1,3 @@
|
|||
@load ./main
|
||||
@load ./files
|
||||
@load-sigs ./dpd.sig
|
99
scripts/base/protocols/krb/consts.bro
Normal file
99
scripts/base/protocols/krb/consts.bro
Normal file
|
@ -0,0 +1,99 @@
|
|||
module KRB;
|
||||
|
||||
export {
|
||||
|
||||
const error_msg: table[count] of string = {
|
||||
[0] = "KDC_ERR_NONE",
|
||||
[1] = "KDC_ERR_NAME_EXP",
|
||||
[2] = "KDC_ERR_SERVICE_EXP",
|
||||
[3] = "KDC_ERR_BAD_PVNO",
|
||||
[4] = "KDC_ERR_C_OLD_MAST_KVNO",
|
||||
[5] = "KDC_ERR_S_OLD_MAST_KVNO",
|
||||
[6] = "KDC_ERR_C_PRINCIPAL_UNKNOWN",
|
||||
[7] = "KDC_ERR_S_PRINCIPAL_UNKNOWN",
|
||||
[8] = "KDC_ERR_PRINCIPAL_NOT_UNIQUE",
|
||||
[9] = "KDC_ERR_NULL_KEY",
|
||||
[10] = "KDC_ERR_CANNOT_POSTDATE",
|
||||
[11] = "KDC_ERR_NEVER_VALID",
|
||||
[12] = "KDC_ERR_POLICY",
|
||||
[13] = "KDC_ERR_BADOPTION",
|
||||
[14] = "KDC_ERR_ETYPE_NOSUPP",
|
||||
[15] = "KDC_ERR_SUMTYPE_NOSUPP",
|
||||
[16] = "KDC_ERR_PADATA_TYPE_NOSUPP",
|
||||
[17] = "KDC_ERR_TRTYPE_NOSUPP",
|
||||
[18] = "KDC_ERR_CLIENT_REVOKED",
|
||||
[19] = "KDC_ERR_SERVICE_REVOKED",
|
||||
[20] = "KDC_ERR_TGT_REVOKED",
|
||||
[21] = "KDC_ERR_CLIENT_NOTYET",
|
||||
[22] = "KDC_ERR_SERVICE_NOTYET",
|
||||
[23] = "KDC_ERR_KEY_EXPIRED",
|
||||
[24] = "KDC_ERR_PREAUTH_FAILED",
|
||||
[25] = "KDC_ERR_PREAUTH_REQUIRED",
|
||||
[26] = "KDC_ERR_SERVER_NOMATCH",
|
||||
[27] = "KDC_ERR_MUST_USE_USER2USER",
|
||||
[28] = "KDC_ERR_PATH_NOT_ACCEPTED",
|
||||
[29] = "KDC_ERR_SVC_UNAVAILABLE",
|
||||
[31] = "KRB_AP_ERR_BAD_INTEGRITY",
|
||||
[32] = "KRB_AP_ERR_TKT_EXPIRED",
|
||||
[33] = "KRB_AP_ERR_TKT_NYV",
|
||||
[34] = "KRB_AP_ERR_REPEAT",
|
||||
[35] = "KRB_AP_ERR_NOT_US",
|
||||
[36] = "KRB_AP_ERR_BADMATCH",
|
||||
[37] = "KRB_AP_ERR_SKEW",
|
||||
[38] = "KRB_AP_ERR_BADADDR",
|
||||
[39] = "KRB_AP_ERR_BADVERSION",
|
||||
[40] = "KRB_AP_ERR_MSG_TYPE",
|
||||
[41] = "KRB_AP_ERR_MODIFIED",
|
||||
[42] = "KRB_AP_ERR_BADORDER",
|
||||
[44] = "KRB_AP_ERR_BADKEYVER",
|
||||
[45] = "KRB_AP_ERR_NOKEY",
|
||||
[46] = "KRB_AP_ERR_MUT_FAIL",
|
||||
[47] = "KRB_AP_ERR_BADDIRECTION",
|
||||
[48] = "KRB_AP_ERR_METHOD",
|
||||
[49] = "KRB_AP_ERR_BADSEQ",
|
||||
[50] = "KRB_AP_ERR_INAPP_CKSUM",
|
||||
[51] = "KRB_AP_PATH_NOT_ACCEPTED",
|
||||
[52] = "KRB_ERR_RESPONSE_TOO_BIG",
|
||||
[60] = "KRB_ERR_GENERIC",
|
||||
[61] = "KRB_ERR_FIELD_TOOLONG",
|
||||
[62] = "KDC_ERROR_CLIENT_NOT_TRUSTED",
|
||||
[63] = "KDC_ERROR_KDC_NOT_TRUSTED",
|
||||
[64] = "KDC_ERROR_INVALID_SIG",
|
||||
[65] = "KDC_ERR_KEY_TOO_WEAK",
|
||||
[66] = "KDC_ERR_CERTIFICATE_MISMATCH",
|
||||
[67] = "KRB_AP_ERR_NO_TGT",
|
||||
[68] = "KDC_ERR_WRONG_REALM",
|
||||
[69] = "KRB_AP_ERR_USER_TO_USER_REQUIRED",
|
||||
[70] = "KDC_ERR_CANT_VERIFY_CERTIFICATE",
|
||||
[71] = "KDC_ERR_INVALID_CERTIFICATE",
|
||||
[72] = "KDC_ERR_REVOKED_CERTIFICATE",
|
||||
[73] = "KDC_ERR_REVOCATION_STATUS_UNKNOWN",
|
||||
[74] = "KDC_ERR_REVOCATION_STATUS_UNAVAILABLE",
|
||||
[75] = "KDC_ERR_CLIENT_NAME_MISMATCH",
|
||||
[76] = "KDC_ERR_KDC_NAME_MISMATCH",
|
||||
};
|
||||
|
||||
const cipher_name: table[count] of string = {
|
||||
[1] = "des-cbc-crc",
|
||||
[2] = "des-cbc-md4",
|
||||
[3] = "des-cbc-md5",
|
||||
[5] = "des3-cbc-md5",
|
||||
[7] = "des3-cbc-sha1",
|
||||
[9] = "dsaWithSHA1-CmsOID",
|
||||
[10] = "md5WithRSAEncryption-CmsOID",
|
||||
[11] = "sha1WithRSAEncryption-CmsOID",
|
||||
[12] = "rc2CBC-EnvOID",
|
||||
[13] = "rsaEncryption-EnvOID",
|
||||
[14] = "rsaES-OAEP-ENV-OID",
|
||||
[15] = "des-ede3-cbc-Env-OID",
|
||||
[16] = "des3-cbc-sha1-kd",
|
||||
[17] = "aes128-cts-hmac-sha1-96",
|
||||
[18] = "aes256-cts-hmac-sha1-96",
|
||||
[23] = "rc4-hmac",
|
||||
[24] = "rc4-hmac-exp",
|
||||
[25] = "camellia128-cts-cmac",
|
||||
[26] = "camellia256-cts-cmac",
|
||||
[65] = "subkey-keymaterial",
|
||||
};
|
||||
|
||||
}
|
26
scripts/base/protocols/krb/dpd.sig
Normal file
26
scripts/base/protocols/krb/dpd.sig
Normal file
|
@ -0,0 +1,26 @@
|
|||
# This is the ASN.1 encoded version and message type headers
|
||||
|
||||
signature dpd_krb_udp_requests {
|
||||
ip-proto == udp
|
||||
payload /(\x6a|\x6c).{1,4}\x30.{1,4}\xa1\x03\x02\x01\x05\xa2\x03\x02\x01/
|
||||
enable "krb"
|
||||
}
|
||||
|
||||
signature dpd_krb_udp_replies {
|
||||
ip-proto == udp
|
||||
payload /(\x6b|\x6d|\x7e).{1,4}\x30.{1,4}\xa0\x03\x02\x01\x05\xa1\x03\x02\x01/
|
||||
enable "krb"
|
||||
}
|
||||
|
||||
signature dpd_krb_tcp_requests {
|
||||
ip-proto == tcp
|
||||
payload /.{4}(\x6a|\x6c).{1,4}\x30.{1,4}\xa1\x03\x02\x01\x05\xa2\x03\x02\x01/
|
||||
enable "krb_tcp"
|
||||
}
|
||||
|
||||
signature dpd_krb_tcp_replies {
|
||||
ip-proto == tcp
|
||||
payload /.{4}(\x6b|\x6d|\x7e).{1,4}\x30.{1,4}\xa0\x03\x02\x01\x05\xa1\x03\x02\x01/
|
||||
enable "krb_tcp"
|
||||
}
|
||||
|
142
scripts/base/protocols/krb/files.bro
Normal file
142
scripts/base/protocols/krb/files.bro
Normal file
|
@ -0,0 +1,142 @@
|
|||
@load ./main
|
||||
@load base/utils/conn-ids
|
||||
@load base/frameworks/files
|
||||
@load base/files/x509
|
||||
|
||||
module KRB;
|
||||
|
||||
export {
|
||||
redef record Info += {
|
||||
# Client certificate
|
||||
client_cert: Files::Info &optional;
|
||||
# Subject of client certificate, if any
|
||||
client_cert_subject: string &log &optional;
|
||||
# File unique ID of client cert, if any
|
||||
client_cert_fuid: string &log &optional;
|
||||
|
||||
# Server certificate
|
||||
server_cert: Files::Info &optional;
|
||||
# Subject of server certificate, if any
|
||||
server_cert_subject: string &log &optional;
|
||||
# File unique ID of server cert, if any
|
||||
server_cert_fuid: string &log &optional;
|
||||
};
|
||||
|
||||
## Default file handle provider for KRB.
|
||||
global get_file_handle: function(c: connection, is_orig: bool): string;
|
||||
|
||||
## Default file describer for KRB.
|
||||
global describe_file: function(f: fa_file): string;
|
||||
}
|
||||
|
||||
function get_file_handle(c: connection, is_orig: bool): string
|
||||
{
|
||||
# Unused. File handles are generated in the analyzer.
|
||||
return "";
|
||||
}
|
||||
|
||||
function describe_file(f: fa_file): string
|
||||
{
|
||||
if ( f$source != "KRB_TCP" && f$source != "KRB" )
|
||||
return "";
|
||||
|
||||
if ( ! f?$info || ! f$info?$x509 || ! f$info$x509?$certificate )
|
||||
return "";
|
||||
|
||||
# It is difficult to reliably describe a certificate - especially since
|
||||
# we do not know when this function is called (hence, if the data structures
|
||||
# are already populated).
|
||||
#
|
||||
# Just return a bit of our connection information and hope that that is good enough.
|
||||
for ( cid in f$conns )
|
||||
{
|
||||
if ( f$conns[cid]?$krb )
|
||||
{
|
||||
local c = f$conns[cid];
|
||||
return cat(c$id$resp_h, ":", c$id$resp_p);
|
||||
}
|
||||
}
|
||||
|
||||
return cat("Serial: ", f$info$x509$certificate$serial, " Subject: ",
|
||||
f$info$x509$certificate$subject, " Issuer: ",
|
||||
f$info$x509$certificate$issuer);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Files::register_protocol(Analyzer::ANALYZER_KRB_TCP,
|
||||
[$get_file_handle = KRB::get_file_handle,
|
||||
$describe = KRB::describe_file]);
|
||||
|
||||
Files::register_protocol(Analyzer::ANALYZER_KRB,
|
||||
[$get_file_handle = KRB::get_file_handle,
|
||||
$describe = KRB::describe_file]);
|
||||
}
|
||||
|
||||
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
|
||||
{
|
||||
if ( f$source != "KRB_TCP" && f$source != "KRB" )
|
||||
return;
|
||||
|
||||
local info: Info;
|
||||
|
||||
if ( ! c?$krb )
|
||||
{
|
||||
info$ts = network_time();
|
||||
info$uid = c$uid;
|
||||
info$id = c$id;
|
||||
}
|
||||
else
|
||||
info = c$krb;
|
||||
|
||||
if ( is_orig )
|
||||
{
|
||||
info$client_cert = f$info;
|
||||
info$client_cert_fuid = f$id;
|
||||
}
|
||||
else
|
||||
{
|
||||
info$server_cert = f$info;
|
||||
info$server_cert_fuid = f$id;
|
||||
}
|
||||
|
||||
c$krb = info;
|
||||
|
||||
Files::add_analyzer(f, Files::ANALYZER_X509);
|
||||
# Always calculate hashes. They are not necessary for base scripts
|
||||
# but very useful for identification, and required for policy scripts
|
||||
Files::add_analyzer(f, Files::ANALYZER_MD5);
|
||||
Files::add_analyzer(f, Files::ANALYZER_SHA1);
|
||||
}
|
||||
|
||||
function fill_in_subjects(c: connection)
|
||||
{
|
||||
if ( !c?$krb )
|
||||
return;
|
||||
|
||||
if ( c$krb?$client_cert && c$krb$client_cert?$x509 && c$krb$client_cert$x509?$certificate )
|
||||
c$krb$client_cert_subject = c$krb$client_cert$x509$certificate$subject;
|
||||
|
||||
if ( c$krb?$server_cert && c$krb$server_cert?$x509 && c$krb$server_cert$x509?$certificate )
|
||||
c$krb$server_cert_subject = c$krb$server_cert$x509$certificate$subject;
|
||||
}
|
||||
|
||||
event krb_error(c: connection, msg: Error_Msg)
|
||||
{
|
||||
fill_in_subjects(c);
|
||||
}
|
||||
|
||||
event krb_as_response(c: connection, msg: KDC_Response)
|
||||
{
|
||||
fill_in_subjects(c);
|
||||
}
|
||||
|
||||
event krb_tgs_response(c: connection, msg: KDC_Response)
|
||||
{
|
||||
fill_in_subjects(c);
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection)
|
||||
{
|
||||
fill_in_subjects(c);
|
||||
}
|
251
scripts/base/protocols/krb/main.bro
Normal file
251
scripts/base/protocols/krb/main.bro
Normal file
|
@ -0,0 +1,251 @@
|
|||
##! Implements base functionality for KRB analysis. Generates the kerberos.log
|
||||
##! file.
|
||||
|
||||
module KRB;
|
||||
|
||||
@load ./consts
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Timestamp for when the event happened.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
|
||||
## Request type - Authentication Service ("AS") or
|
||||
## Ticket Granting Service ("TGS")
|
||||
request_type: string &log &optional;
|
||||
## Client
|
||||
client: string &log &optional;
|
||||
## Service
|
||||
service: string &log;
|
||||
|
||||
## Request result
|
||||
success: bool &log &optional;
|
||||
## Error code
|
||||
error_code: count &optional;
|
||||
## Error message
|
||||
error_msg: string &log &optional;
|
||||
|
||||
## Ticket valid from
|
||||
from: time &log &optional;
|
||||
## Ticket valid till
|
||||
till: time &log &optional;
|
||||
## Ticket encryption type
|
||||
cipher: string &log &optional;
|
||||
|
||||
## Forwardable ticket requested
|
||||
forwardable: bool &log &optional;
|
||||
## Renewable ticket requested
|
||||
renewable: bool &log &optional;
|
||||
|
||||
## We've already logged this
|
||||
logged: bool &default=F;
|
||||
};
|
||||
|
||||
## The server response error texts which are *not* logged.
|
||||
const ignored_errors: set[string] = {
|
||||
# This will significantly increase the noisiness of the log.
|
||||
# However, one attack is to iterate over principals, looking
|
||||
# for ones that don't require preauth, and then performn
|
||||
# an offline attack on that ticket. To detect that attack,
|
||||
# log NEEDED_PREAUTH.
|
||||
"NEEDED_PREAUTH",
|
||||
# This is a more specific version of NEEDED_PREAUTH that's used
|
||||
# by Windows AD Kerberos.
|
||||
"Need to use PA-ENC-TIMESTAMP/PA-PK-AS-REQ",
|
||||
} &redef;
|
||||
|
||||
## Event that can be handled to access the KRB record as it is sent on
|
||||
## to the logging framework.
|
||||
global log_krb: event(rec: Info);
|
||||
}
|
||||
|
||||
redef record connection += {
|
||||
krb: Info &optional;
|
||||
};
|
||||
|
||||
const tcp_ports = { 88/tcp };
|
||||
const udp_ports = { 88/udp };
|
||||
redef likely_server_ports += { tcp_ports, udp_ports };
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_KRB, udp_ports);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_KRB_TCP, tcp_ports);
|
||||
Log::create_stream(KRB::LOG, [$columns=Info, $ev=log_krb, $path="kerberos"]);
|
||||
}
|
||||
|
||||
event krb_error(c: connection, msg: Error_Msg) &priority=5
|
||||
{
|
||||
local info: Info;
|
||||
|
||||
if ( msg?$error_text && msg$error_text in ignored_errors )
|
||||
{
|
||||
if ( c?$krb ) delete c$krb;
|
||||
return;
|
||||
}
|
||||
|
||||
if ( c?$krb && c$krb$logged )
|
||||
return;
|
||||
|
||||
if ( c?$krb )
|
||||
info = c$krb;
|
||||
|
||||
if ( ! info?$ts )
|
||||
{
|
||||
info$ts = network_time();
|
||||
info$uid = c$uid;
|
||||
info$id = c$id;
|
||||
}
|
||||
|
||||
if ( ! info?$client && ( msg?$client_name || msg?$client_realm ) )
|
||||
info$client = fmt("%s%s", msg?$client_name ? msg$client_name + "/" : "",
|
||||
msg?$client_realm ? msg$client_realm : "");
|
||||
|
||||
info$service = msg$service_name;
|
||||
info$success = F;
|
||||
|
||||
info$error_code = msg$error_code;
|
||||
|
||||
if ( msg?$error_text ) info$error_msg = msg$error_text;
|
||||
else if ( msg$error_code in error_msg ) info$error_msg = error_msg[msg$error_code];
|
||||
|
||||
c$krb = info;
|
||||
}
|
||||
|
||||
event krb_error(c: connection, msg: Error_Msg) &priority=-5
|
||||
{
|
||||
if ( c?$krb )
|
||||
{
|
||||
Log::write(KRB::LOG, c$krb);
|
||||
c$krb$logged = T;
|
||||
}
|
||||
}
|
||||
|
||||
event krb_as_request(c: connection, msg: KDC_Request) &priority=5
|
||||
{
|
||||
if ( c?$krb && c$krb$logged )
|
||||
return;
|
||||
|
||||
local info: Info;
|
||||
|
||||
if ( !c?$krb )
|
||||
{
|
||||
info$ts = network_time();
|
||||
info$uid = c$uid;
|
||||
info$id = c$id;
|
||||
}
|
||||
else
|
||||
info = c$krb;
|
||||
|
||||
info$request_type = "AS";
|
||||
info$client = fmt("%s/%s", msg$client_name, msg$service_realm);
|
||||
info$service = msg$service_name;
|
||||
|
||||
if ( msg?$from )
|
||||
info$from = msg$from;
|
||||
|
||||
info$till = msg$till;
|
||||
|
||||
info$forwardable = msg$kdc_options$forwardable;
|
||||
info$renewable = msg$kdc_options$renewable;
|
||||
|
||||
c$krb = info;
|
||||
}
|
||||
|
||||
event krb_tgs_request(c: connection, msg: KDC_Request) &priority=5
|
||||
{
|
||||
if ( c?$krb && c$krb$logged )
|
||||
return;
|
||||
|
||||
local info: Info;
|
||||
info$ts = network_time();
|
||||
info$uid = c$uid;
|
||||
info$id = c$id;
|
||||
info$request_type = "TGS";
|
||||
info$service = msg$service_name;
|
||||
if ( msg?$from ) info$from = msg$from;
|
||||
info$till = msg$till;
|
||||
|
||||
info$forwardable = msg$kdc_options$forwardable;
|
||||
info$renewable = msg$kdc_options$renewable;
|
||||
|
||||
c$krb = info;
|
||||
}
|
||||
|
||||
event krb_as_response(c: connection, msg: KDC_Response) &priority=5
|
||||
{
|
||||
local info: Info;
|
||||
|
||||
if ( c?$krb && c$krb$logged )
|
||||
return;
|
||||
|
||||
if ( c?$krb )
|
||||
info = c$krb;
|
||||
|
||||
if ( ! info?$ts )
|
||||
{
|
||||
info$ts = network_time();
|
||||
info$uid = c$uid;
|
||||
info$id = c$id;
|
||||
}
|
||||
|
||||
if ( ! info?$client )
|
||||
info$client = fmt("%s/%s", msg$client_name, msg$client_realm);
|
||||
|
||||
info$service = msg$ticket$service_name;
|
||||
info$cipher = cipher_name[msg$ticket$cipher];
|
||||
info$success = T;
|
||||
|
||||
c$krb = info;
|
||||
}
|
||||
|
||||
event krb_as_response(c: connection, msg: KDC_Response) &priority=-5
|
||||
{
|
||||
Log::write(KRB::LOG, c$krb);
|
||||
c$krb$logged = T;
|
||||
}
|
||||
|
||||
event krb_tgs_response(c: connection, msg: KDC_Response) &priority=5
|
||||
{
|
||||
local info: Info;
|
||||
|
||||
if ( c?$krb && c$krb$logged )
|
||||
return;
|
||||
|
||||
if ( c?$krb )
|
||||
info = c$krb;
|
||||
|
||||
if ( ! info?$ts )
|
||||
{
|
||||
info$ts = network_time();
|
||||
info$uid = c$uid;
|
||||
info$id = c$id;
|
||||
}
|
||||
|
||||
if ( ! info?$client )
|
||||
info$client = fmt("%s/%s", msg$client_name, msg$client_realm);
|
||||
|
||||
info$service = msg$ticket$service_name;
|
||||
info$cipher = cipher_name[msg$ticket$cipher];
|
||||
info$success = T;
|
||||
|
||||
c$krb = info;
|
||||
}
|
||||
|
||||
event krb_tgs_response(c: connection, msg: KDC_Response) &priority=-5
|
||||
{
|
||||
Log::write(KRB::LOG, c$krb);
|
||||
c$krb$logged = T;
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection) &priority=-5
|
||||
{
|
||||
if ( c?$krb && ! c$krb$logged )
|
||||
Log::write(KRB::LOG, c$krb);
|
||||
}
|
|
@ -34,7 +34,7 @@ redef likely_server_ports += { ports };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Modbus::LOG, [$columns=Info, $ev=log_modbus]);
|
||||
Log::create_stream(Modbus::LOG, [$columns=Info, $ev=log_modbus, $path="modbus"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_MODBUS, ports);
|
||||
}
|
||||
|
||||
|
|
1
scripts/base/protocols/mysql/README
Normal file
1
scripts/base/protocols/mysql/README
Normal file
|
@ -0,0 +1 @@
|
|||
Support for MySQL protocol analysis.
|
|
@ -39,7 +39,7 @@ const ports = { 1434/tcp, 3306/tcp };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(mysql::LOG, [$columns=Info, $ev=log_mysql]);
|
||||
Log::create_stream(mysql::LOG, [$columns=Info, $ev=log_mysql, $path="mysql"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_MYSQL, ports);
|
||||
}
|
||||
|
||||
|
|
1
scripts/base/protocols/radius/README
Normal file
1
scripts/base/protocols/radius/README
Normal file
|
@ -0,0 +1 @@
|
|||
Support for RADIUS protocol analysis.
|
|
@ -59,7 +59,7 @@ const ports = { 1812/udp };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(RADIUS::LOG, [$columns=Info, $ev=log_radius]);
|
||||
Log::create_stream(RADIUS::LOG, [$columns=Info, $ev=log_radius, $path="radius"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_RADIUS, ports);
|
||||
}
|
||||
|
||||
|
|
1
scripts/base/protocols/rdp/README
Normal file
1
scripts/base/protocols/rdp/README
Normal file
|
@ -0,0 +1 @@
|
|||
Support for Remote Desktop Protocol (RDP) analysis.
|
3
scripts/base/protocols/rdp/__load__.bro
Normal file
3
scripts/base/protocols/rdp/__load__.bro
Normal file
|
@ -0,0 +1,3 @@
|
|||
@load ./consts
|
||||
@load ./main
|
||||
@load-sigs ./dpd.sig
|
323
scripts/base/protocols/rdp/consts.bro
Normal file
323
scripts/base/protocols/rdp/consts.bro
Normal file
|
@ -0,0 +1,323 @@
|
|||
module RDP;
|
||||
|
||||
export {
|
||||
# http://www.c-amie.co.uk/technical/mstsc-versions/
|
||||
const builds = {
|
||||
[0419] = "RDP 4.0",
|
||||
[2195] = "RDP 5.0",
|
||||
[2221] = "RDP 5.0",
|
||||
[2600] = "RDP 5.1",
|
||||
[3790] = "RDP 5.2",
|
||||
[6000] = "RDP 6.0",
|
||||
[6001] = "RDP 6.1",
|
||||
[6002] = "RDP 6.2",
|
||||
[7600] = "RDP 7.0",
|
||||
[7601] = "RDP 7.1",
|
||||
[9200] = "RDP 8.0",
|
||||
[9600] = "RDP 8.1",
|
||||
[25189] = "RDP 8.0 (Mac)",
|
||||
[25282] = "RDP 8.0 (Mac)"
|
||||
} &default = function(n: count): string { return fmt("client_build-%d", n); };
|
||||
|
||||
const security_protocols = {
|
||||
[0x00] = "RDP",
|
||||
[0x01] = "SSL",
|
||||
[0x02] = "HYBRID",
|
||||
[0x08] = "HYBRID_EX"
|
||||
} &default = function(n: count): string { return fmt("security_protocol-%d", n); };
|
||||
|
||||
const failure_codes = {
|
||||
[0x01] = "SSL_REQUIRED_BY_SERVER",
|
||||
[0x02] = "SSL_NOT_ALLOWED_BY_SERVER",
|
||||
[0x03] = "SSL_CERT_NOT_ON_SERVER",
|
||||
[0x04] = "INCONSISTENT_FLAGS",
|
||||
[0x05] = "HYBRID_REQUIRED_BY_SERVER",
|
||||
[0x06] = "SSL_WITH_USER_AUTH_REQUIRED_BY_SERVER"
|
||||
} &default = function(n: count): string { return fmt("failure_code-%d", n); };
|
||||
|
||||
const cert_types = {
|
||||
[1] = "RSA",
|
||||
[2] = "X.509"
|
||||
} &default = function(n: count): string { return fmt("cert_type-%d", n); };
|
||||
|
||||
const encryption_methods = {
|
||||
[0] = "None",
|
||||
[1] = "40bit",
|
||||
[2] = "128bit",
|
||||
[8] = "56bit",
|
||||
[10] = "FIPS"
|
||||
} &default = function(n: count): string { return fmt("encryption_method-%d", n); };
|
||||
|
||||
const encryption_levels = {
|
||||
[0] = "None",
|
||||
[1] = "Low",
|
||||
[2] = "Client compatible",
|
||||
[3] = "High",
|
||||
[4] = "FIPS"
|
||||
} &default = function(n: count): string { return fmt("encryption_level-%d", n); };
|
||||
|
||||
const high_color_depths = {
|
||||
[0x0004] = "4bit",
|
||||
[0x0008] = "8bit",
|
||||
[0x000F] = "15bit",
|
||||
[0x0010] = "16bit",
|
||||
[0x0018] = "24bit"
|
||||
} &default = function(n: count): string { return fmt("high_color_depth-%d", n); };
|
||||
|
||||
const color_depths = {
|
||||
[0x0001] = "24bit",
|
||||
[0x0002] = "16bit",
|
||||
[0x0004] = "15bit",
|
||||
[0x0008] = "32bit"
|
||||
} &default = function(n: count): string { return fmt("color_depth-%d", n); };
|
||||
|
||||
const results = {
|
||||
[0] = "Success",
|
||||
[1] = "User rejected",
|
||||
[2] = "Resources not available",
|
||||
[3] = "Rejected for symmetry breaking",
|
||||
[4] = "Locked conference",
|
||||
} &default = function(n: count): string { return fmt("result-%d", n); };
|
||||
|
||||
# http://msdn.microsoft.com/en-us/goglobal/bb964664.aspx
|
||||
const languages = {
|
||||
[1078] = "Afrikaans - South Africa",
|
||||
[1052] = "Albanian - Albania",
|
||||
[1156] = "Alsatian",
|
||||
[1118] = "Amharic - Ethiopia",
|
||||
[1025] = "Arabic - Saudi Arabia",
|
||||
[5121] = "Arabic - Algeria",
|
||||
[15361] = "Arabic - Bahrain",
|
||||
[3073] = "Arabic - Egypt",
|
||||
[2049] = "Arabic - Iraq",
|
||||
[11265] = "Arabic - Jordan",
|
||||
[13313] = "Arabic - Kuwait",
|
||||
[12289] = "Arabic - Lebanon",
|
||||
[4097] = "Arabic - Libya",
|
||||
[6145] = "Arabic - Morocco",
|
||||
[8193] = "Arabic - Oman",
|
||||
[16385] = "Arabic - Qatar",
|
||||
[10241] = "Arabic - Syria",
|
||||
[7169] = "Arabic - Tunisia",
|
||||
[14337] = "Arabic - U.A.E.",
|
||||
[9217] = "Arabic - Yemen",
|
||||
[1067] = "Armenian - Armenia",
|
||||
[1101] = "Assamese",
|
||||
[2092] = "Azeri (Cyrillic)",
|
||||
[1068] = "Azeri (Latin)",
|
||||
[1133] = "Bashkir",
|
||||
[1069] = "Basque",
|
||||
[1059] = "Belarusian",
|
||||
[1093] = "Bengali (India)",
|
||||
[2117] = "Bengali (Bangladesh)",
|
||||
[5146] = "Bosnian (Bosnia/Herzegovina)",
|
||||
[1150] = "Breton",
|
||||
[1026] = "Bulgarian",
|
||||
[1109] = "Burmese",
|
||||
[1027] = "Catalan",
|
||||
[1116] = "Cherokee - United States",
|
||||
[2052] = "Chinese - People's Republic of China",
|
||||
[4100] = "Chinese - Singapore",
|
||||
[1028] = "Chinese - Taiwan",
|
||||
[3076] = "Chinese - Hong Kong SAR",
|
||||
[5124] = "Chinese - Macao SAR",
|
||||
[1155] = "Corsican",
|
||||
[1050] = "Croatian",
|
||||
[4122] = "Croatian (Bosnia/Herzegovina)",
|
||||
[1029] = "Czech",
|
||||
[1030] = "Danish",
|
||||
[1164] = "Dari",
|
||||
[1125] = "Divehi",
|
||||
[1043] = "Dutch - Netherlands",
|
||||
[2067] = "Dutch - Belgium",
|
||||
[1126] = "Edo",
|
||||
[1033] = "English - United States",
|
||||
[2057] = "English - United Kingdom",
|
||||
[3081] = "English - Australia",
|
||||
[10249] = "English - Belize",
|
||||
[4105] = "English - Canada",
|
||||
[9225] = "English - Caribbean",
|
||||
[15369] = "English - Hong Kong SAR",
|
||||
[16393] = "English - India",
|
||||
[14345] = "English - Indonesia",
|
||||
[6153] = "English - Ireland",
|
||||
[8201] = "English - Jamaica",
|
||||
[17417] = "English - Malaysia",
|
||||
[5129] = "English - New Zealand",
|
||||
[13321] = "English - Philippines",
|
||||
[18441] = "English - Singapore",
|
||||
[7177] = "English - South Africa",
|
||||
[11273] = "English - Trinidad",
|
||||
[12297] = "English - Zimbabwe",
|
||||
[1061] = "Estonian",
|
||||
[1080] = "Faroese",
|
||||
[1065] = "Farsi",
|
||||
[1124] = "Filipino",
|
||||
[1035] = "Finnish",
|
||||
[1036] = "French - France",
|
||||
[2060] = "French - Belgium",
|
||||
[11276] = "French - Cameroon",
|
||||
[3084] = "French - Canada",
|
||||
[9228] = "French - Democratic Rep. of Congo",
|
||||
[12300] = "French - Cote d'Ivoire",
|
||||
[15372] = "French - Haiti",
|
||||
[5132] = "French - Luxembourg",
|
||||
[13324] = "French - Mali",
|
||||
[6156] = "French - Monaco",
|
||||
[14348] = "French - Morocco",
|
||||
[58380] = "French - North Africa",
|
||||
[8204] = "French - Reunion",
|
||||
[10252] = "French - Senegal",
|
||||
[4108] = "French - Switzerland",
|
||||
[7180] = "French - West Indies",
|
||||
[1122] = "French - West Indies",
|
||||
[1127] = "Fulfulde - Nigeria",
|
||||
[1071] = "FYRO Macedonian",
|
||||
[1110] = "Galician",
|
||||
[1079] = "Georgian",
|
||||
[1031] = "German - Germany",
|
||||
[3079] = "German - Austria",
|
||||
[5127] = "German - Liechtenstein",
|
||||
[4103] = "German - Luxembourg",
|
||||
[2055] = "German - Switzerland",
|
||||
[1032] = "Greek",
|
||||
[1135] = "Greenlandic",
|
||||
[1140] = "Guarani - Paraguay",
|
||||
[1095] = "Gujarati",
|
||||
[1128] = "Hausa - Nigeria",
|
||||
[1141] = "Hawaiian - United States",
|
||||
[1037] = "Hebrew",
|
||||
[1081] = "Hindi",
|
||||
[1038] = "Hungarian",
|
||||
[1129] = "Ibibio - Nigeria",
|
||||
[1039] = "Icelandic",
|
||||
[1136] = "Igbo - Nigeria",
|
||||
[1057] = "Indonesian",
|
||||
[1117] = "Inuktitut",
|
||||
[2108] = "Irish",
|
||||
[1040] = "Italian - Italy",
|
||||
[2064] = "Italian - Switzerland",
|
||||
[1041] = "Japanese",
|
||||
[1158] = "K'iche",
|
||||
[1099] = "Kannada",
|
||||
[1137] = "Kanuri - Nigeria",
|
||||
[2144] = "Kashmiri",
|
||||
[1120] = "Kashmiri (Arabic)",
|
||||
[1087] = "Kazakh",
|
||||
[1107] = "Khmer",
|
||||
[1159] = "Kinyarwanda",
|
||||
[1111] = "Konkani",
|
||||
[1042] = "Korean",
|
||||
[1088] = "Kyrgyz (Cyrillic)",
|
||||
[1108] = "Lao",
|
||||
[1142] = "Latin",
|
||||
[1062] = "Latvian",
|
||||
[1063] = "Lithuanian",
|
||||
[1134] = "Luxembourgish",
|
||||
[1086] = "Malay - Malaysia",
|
||||
[2110] = "Malay - Brunei Darussalam",
|
||||
[1100] = "Malayalam",
|
||||
[1082] = "Maltese",
|
||||
[1112] = "Manipuri",
|
||||
[1153] = "Maori - New Zealand",
|
||||
[1146] = "Mapudungun",
|
||||
[1102] = "Marathi",
|
||||
[1148] = "Mohawk",
|
||||
[1104] = "Mongolian (Cyrillic)",
|
||||
[2128] = "Mongolian (Mongolian)",
|
||||
[1121] = "Nepali",
|
||||
[2145] = "Nepali - India",
|
||||
[1044] = "Norwegian (Bokmål)",
|
||||
[2068] = "Norwegian (Nynorsk)",
|
||||
[1154] = "Occitan",
|
||||
[1096] = "Oriya",
|
||||
[1138] = "Oromo",
|
||||
[1145] = "Papiamentu",
|
||||
[1123] = "Pashto",
|
||||
[1045] = "Polish",
|
||||
[1046] = "Portuguese - Brazil",
|
||||
[2070] = "Portuguese - Portugal",
|
||||
[1094] = "Punjabi",
|
||||
[2118] = "Punjabi (Pakistan)",
|
||||
[1131] = "Quecha - Bolivia",
|
||||
[2155] = "Quecha - Ecuador",
|
||||
[3179] = "Quecha - Peru CB",
|
||||
[1047] = "Rhaeto-Romanic",
|
||||
[1048] = "Romanian",
|
||||
[2072] = "Romanian - Moldava",
|
||||
[1049] = "Russian",
|
||||
[2073] = "Russian - Moldava",
|
||||
[1083] = "Sami (Lappish)",
|
||||
[1103] = "Sanskrit",
|
||||
[1084] = "Scottish Gaelic",
|
||||
[1132] = "Sepedi",
|
||||
[3098] = "Serbian (Cyrillic)",
|
||||
[2074] = "Serbian (Latin)",
|
||||
[1113] = "Sindhi - India",
|
||||
[2137] = "Sindhi - Pakistan",
|
||||
[1115] = "Sinhalese - Sri Lanka",
|
||||
[1051] = "Slovak",
|
||||
[1060] = "Slovenian",
|
||||
[1143] = "Somali",
|
||||
[1070] = "Sorbian",
|
||||
[3082] = "Spanish - Spain (Modern Sort)",
|
||||
[1034] = "Spanish - Spain (Traditional Sort)",
|
||||
[11274] = "Spanish - Argentina",
|
||||
[16394] = "Spanish - Bolivia",
|
||||
[13322] = "Spanish - Chile",
|
||||
[9226] = "Spanish - Colombia",
|
||||
[5130] = "Spanish - Costa Rica",
|
||||
[7178] = "Spanish - Dominican Republic",
|
||||
[12298] = "Spanish - Ecuador",
|
||||
[17418] = "Spanish - El Salvador",
|
||||
[4106] = "Spanish - Guatemala",
|
||||
[18442] = "Spanish - Honduras",
|
||||
[22538] = "Spanish - Latin America",
|
||||
[2058] = "Spanish - Mexico",
|
||||
[19466] = "Spanish - Nicaragua",
|
||||
[6154] = "Spanish - Panama",
|
||||
[15370] = "Spanish - Paraguay",
|
||||
[10250] = "Spanish - Peru",
|
||||
[20490] = "Spanish - Puerto Rico",
|
||||
[21514] = "Spanish - United States",
|
||||
[14346] = "Spanish - Uruguay",
|
||||
[8202] = "Spanish - Venezuela",
|
||||
[1072] = "Sutu",
|
||||
[1089] = "Swahili",
|
||||
[1053] = "Swedish",
|
||||
[2077] = "Swedish - Finland",
|
||||
[1114] = "Syriac",
|
||||
[1064] = "Tajik",
|
||||
[1119] = "Tamazight (Arabic)",
|
||||
[2143] = "Tamazight (Latin)",
|
||||
[1097] = "Tamil",
|
||||
[1092] = "Tatar",
|
||||
[1098] = "Telugu",
|
||||
[1054] = "Thai",
|
||||
[2129] = "Tibetan - Bhutan",
|
||||
[1105] = "Tibetan - People's Republic of China",
|
||||
[2163] = "Tigrigna - Eritrea",
|
||||
[1139] = "Tigrigna - Ethiopia",
|
||||
[1073] = "Tsonga",
|
||||
[1074] = "Tswana",
|
||||
[1055] = "Turkish",
|
||||
[1090] = "Turkmen",
|
||||
[1152] = "Uighur - China",
|
||||
[1058] = "Ukrainian",
|
||||
[1056] = "Urdu",
|
||||
[2080] = "Urdu - India",
|
||||
[2115] = "Uzbek (Cyrillic)",
|
||||
[1091] = "Uzbek (Latin)",
|
||||
[1075] = "Venda",
|
||||
[1066] = "Vietnamese",
|
||||
[1106] = "Welsh",
|
||||
[1160] = "Wolof",
|
||||
[1076] = "Xhosa",
|
||||
[1157] = "Yakut",
|
||||
[1144] = "Yi",
|
||||
[1085] = "Yiddish",
|
||||
[1130] = "Yoruba",
|
||||
[1077] = "Zulu",
|
||||
[1279] = "HID (Human Interface Device)",
|
||||
} &default = function(n: count): string { return fmt("keyboard-%d", n); };
|
||||
}
|
12
scripts/base/protocols/rdp/dpd.sig
Normal file
12
scripts/base/protocols/rdp/dpd.sig
Normal file
|
@ -0,0 +1,12 @@
|
|||
signature dpd_rdp_client {
|
||||
ip-proto == tcp
|
||||
# Client request
|
||||
payload /.*(Cookie: mstshash\=|Duca.*(rdpdr|rdpsnd|drdynvc|cliprdr))/
|
||||
requires-reverse-signature dpd_rdp_server
|
||||
enable "rdp"
|
||||
}
|
||||
|
||||
signature dpd_rdp_server {
|
||||
ip-proto == tcp
|
||||
payload /(.{5}\xd0|.*McDn)/
|
||||
}
|
269
scripts/base/protocols/rdp/main.bro
Normal file
269
scripts/base/protocols/rdp/main.bro
Normal file
|
@ -0,0 +1,269 @@
|
|||
##! Implements base functionality for RDP analysis. Generates the rdp.log file.
|
||||
|
||||
@load ./consts
|
||||
|
||||
module RDP;
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Timestamp for when the event happened.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## Cookie value used by the client machine.
|
||||
## This is typically a username.
|
||||
cookie: string &log &optional;
|
||||
## Status result for the connection. It's a mix between
|
||||
## RDP negotation failure messages and GCC server create
|
||||
## response messages.
|
||||
result: string &log &optional;
|
||||
## Security protocol chosen by the server.
|
||||
security_protocol: string &log &optional;
|
||||
|
||||
## Keyboard layout (language) of the client machine.
|
||||
keyboard_layout: string &log &optional;
|
||||
## RDP client version used by the client machine.
|
||||
client_build: string &log &optional;
|
||||
## Name of the client machine.
|
||||
client_name: string &log &optional;
|
||||
## Product ID of the client machine.
|
||||
client_dig_product_id: string &log &optional;
|
||||
## Desktop width of the client machine.
|
||||
desktop_width: count &log &optional;
|
||||
## Desktop height of the client machine.
|
||||
desktop_height: count &log &optional;
|
||||
## The color depth requested by the client in
|
||||
## the high_color_depth field.
|
||||
requested_color_depth: string &log &optional;
|
||||
|
||||
## If the connection is being encrypted with native
|
||||
## RDP encryption, this is the type of cert
|
||||
## being used.
|
||||
cert_type: string &log &optional;
|
||||
## The number of certs seen. X.509 can transfer an
|
||||
## entire certificate chain.
|
||||
cert_count: count &log &default=0;
|
||||
## Indicates if the provided certificate or certificate
|
||||
## chain is permanent or temporary.
|
||||
cert_permanent: bool &log &optional;
|
||||
## Encryption level of the connection.
|
||||
encryption_level: string &log &optional;
|
||||
## Encryption method of the connection.
|
||||
encryption_method: string &log &optional;
|
||||
};
|
||||
|
||||
## If true, detach the RDP analyzer from the connection to prevent
|
||||
## continuing to process encrypted traffic.
|
||||
const disable_analyzer_after_detection = F &redef;
|
||||
|
||||
## The amount of time to monitor an RDP session from when it is first
|
||||
## identified. When this interval is reached, the session is logged.
|
||||
const rdp_check_interval = 10secs &redef;
|
||||
|
||||
## Event that can be handled to access the rdp record as it is sent on
|
||||
## to the logging framework.
|
||||
global log_rdp: event(rec: Info);
|
||||
}
|
||||
|
||||
# Internal fields that aren't useful externally
|
||||
redef record Info += {
|
||||
## The analyzer ID used for the analyzer instance attached
|
||||
## to each connection. It is not used for logging since it's a
|
||||
## meaningless arbitrary number.
|
||||
analyzer_id: count &optional;
|
||||
## Track status of logging RDP connections.
|
||||
done: bool &default=F;
|
||||
};
|
||||
|
||||
redef record connection += {
|
||||
rdp: Info &optional;
|
||||
};
|
||||
|
||||
const ports = { 3389/tcp };
|
||||
redef likely_server_ports += { ports };
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(RDP::LOG, [$columns=RDP::Info, $ev=log_rdp, $path="rdp"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_RDP, ports);
|
||||
}
|
||||
|
||||
function write_log(c: connection)
|
||||
{
|
||||
local info = c$rdp;
|
||||
|
||||
if ( info$done )
|
||||
return;
|
||||
|
||||
# Mark this record as fully logged and finished.
|
||||
info$done = T;
|
||||
|
||||
# Verify that the RDP session contains
|
||||
# RDP data before writing it to the log.
|
||||
if ( info?$cookie || info?$keyboard_layout || info?$result )
|
||||
Log::write(RDP::LOG, info);
|
||||
}
|
||||
|
||||
event check_record(c: connection)
|
||||
{
|
||||
# If the record was logged, then stop processing.
|
||||
if ( c$rdp$done )
|
||||
return;
|
||||
|
||||
# If the value rdp_check_interval has passed since the
|
||||
# RDP session was started, then log the record.
|
||||
local diff = network_time() - c$rdp$ts;
|
||||
if ( diff > rdp_check_interval )
|
||||
{
|
||||
write_log(c);
|
||||
|
||||
# Remove the analyzer if it is still attached.
|
||||
if ( disable_analyzer_after_detection &&
|
||||
connection_exists(c$id) &&
|
||||
c$rdp?$analyzer_id )
|
||||
{
|
||||
disable_analyzer(c$id, c$rdp$analyzer_id);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
# If the analyzer is attached and the duration
|
||||
# to monitor the RDP session was not met, then
|
||||
# reschedule the logging event.
|
||||
schedule rdp_check_interval { check_record(c) };
|
||||
}
|
||||
}
|
||||
|
||||
function set_session(c: connection)
|
||||
{
|
||||
if ( ! c?$rdp )
|
||||
{
|
||||
c$rdp = [$ts=network_time(),$id=c$id,$uid=c$uid];
|
||||
# The RDP session is scheduled to be logged from
|
||||
# the time it is first initiated.
|
||||
schedule rdp_check_interval { check_record(c) };
|
||||
}
|
||||
}
|
||||
|
||||
event rdp_connect_request(c: connection, cookie: string) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
c$rdp$cookie = cookie;
|
||||
}
|
||||
|
||||
event rdp_negotiation_response(c: connection, security_protocol: count) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
c$rdp$security_protocol = security_protocols[security_protocol];
|
||||
}
|
||||
|
||||
event rdp_negotiation_failure(c: connection, failure_code: count) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
c$rdp$result = failure_codes[failure_code];
|
||||
}
|
||||
|
||||
event rdp_client_core_data(c: connection, data: RDP::ClientCoreData) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
c$rdp$keyboard_layout = RDP::languages[data$keyboard_layout];
|
||||
c$rdp$client_build = RDP::builds[data$client_build];
|
||||
c$rdp$client_name = data$client_name;
|
||||
c$rdp$client_dig_product_id = data$dig_product_id;
|
||||
c$rdp$desktop_width = data$desktop_width;
|
||||
c$rdp$desktop_height = data$desktop_height;
|
||||
|
||||
if ( data?$ec_flags && data$ec_flags$want_32bpp_session )
|
||||
c$rdp$requested_color_depth = "32bit";
|
||||
else
|
||||
c$rdp$requested_color_depth = RDP::high_color_depths[data$high_color_depth];
|
||||
}
|
||||
|
||||
event rdp_gcc_server_create_response(c: connection, result: count) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
c$rdp$result = RDP::results[result];
|
||||
}
|
||||
|
||||
event rdp_server_security(c: connection, encryption_method: count, encryption_level: count) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
c$rdp$encryption_method = RDP::encryption_methods[encryption_method];
|
||||
c$rdp$encryption_level = RDP::encryption_levels[encryption_level];
|
||||
}
|
||||
|
||||
event rdp_server_certificate(c: connection, cert_type: count, permanently_issued: bool) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
c$rdp$cert_type = RDP::cert_types[cert_type];
|
||||
|
||||
# There are no events for proprietary/RSA certs right
|
||||
# now so we manually count this one.
|
||||
if ( c$rdp$cert_type == "RSA" )
|
||||
++c$rdp$cert_count;
|
||||
|
||||
c$rdp$cert_permanent = permanently_issued;
|
||||
}
|
||||
|
||||
event rdp_begin_encryption(c: connection, security_protocol: count) &priority=5
|
||||
{
|
||||
set_session(c);
|
||||
|
||||
if ( ! c$rdp?$result )
|
||||
{
|
||||
c$rdp$result = "encrypted";
|
||||
}
|
||||
|
||||
c$rdp$security_protocol = security_protocols[security_protocol];
|
||||
}
|
||||
|
||||
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
|
||||
{
|
||||
if ( c?$rdp && f$source == "RDP" )
|
||||
{
|
||||
# Count up X509 certs.
|
||||
++c$rdp$cert_count;
|
||||
|
||||
Files::add_analyzer(f, Files::ANALYZER_X509);
|
||||
Files::add_analyzer(f, Files::ANALYZER_MD5);
|
||||
Files::add_analyzer(f, Files::ANALYZER_SHA1);
|
||||
}
|
||||
}
|
||||
|
||||
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5
|
||||
{
|
||||
if ( atype == Analyzer::ANALYZER_RDP )
|
||||
{
|
||||
set_session(c);
|
||||
c$rdp$analyzer_id = aid;
|
||||
}
|
||||
}
|
||||
|
||||
event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) &priority=5
|
||||
{
|
||||
# If a protocol violation occurs, then log the record immediately.
|
||||
if ( c?$rdp )
|
||||
write_log(c);
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection) &priority=-5
|
||||
{
|
||||
# If the connection is removed, then log the record immediately.
|
||||
if ( c?$rdp )
|
||||
{
|
||||
write_log(c);
|
||||
}
|
||||
}
|
1
scripts/base/protocols/sip/README
Normal file
1
scripts/base/protocols/sip/README
Normal file
|
@ -0,0 +1 @@
|
|||
Support for Session Initiation Protocol (SIP) analysis.
|
3
scripts/base/protocols/sip/__load__.bro
Normal file
3
scripts/base/protocols/sip/__load__.bro
Normal file
|
@ -0,0 +1,3 @@
|
|||
@load ./main
|
||||
|
||||
@load-sigs ./dpd.sig
|
19
scripts/base/protocols/sip/dpd.sig
Normal file
19
scripts/base/protocols/sip/dpd.sig
Normal file
|
@ -0,0 +1,19 @@
|
|||
signature dpd_sip_udp_req {
|
||||
ip-proto == udp
|
||||
payload /.* SIP\/[0-9]\.[0-9]\x0d\x0a/
|
||||
enable "sip"
|
||||
}
|
||||
|
||||
signature dpd_sip_udp_resp {
|
||||
ip-proto == udp
|
||||
payload /^ ?SIP\/[0-9]\.[0-9](\x0d\x0a| [0-9][0-9][0-9] )/
|
||||
enable "sip"
|
||||
}
|
||||
|
||||
# We don't support SIP-over-TCP yet.
|
||||
#
|
||||
# signature dpd_sip_tcp {
|
||||
# ip-proto == tcp
|
||||
# payload /^( SIP\/[0-9]\.[0-9]\x0d\x0a|SIP\/[0-9]\.[0-9] [0-9][0-9][0-9] )/
|
||||
# enable "sip_tcp"
|
||||
# }
|
301
scripts/base/protocols/sip/main.bro
Normal file
301
scripts/base/protocols/sip/main.bro
Normal file
|
@ -0,0 +1,301 @@
|
|||
##! Implements base functionality for SIP analysis. The logging model is
|
||||
##! to log request/response pairs and all relevant metadata together in
|
||||
##! a single record.
|
||||
|
||||
@load base/utils/numbers
|
||||
@load base/utils/files
|
||||
|
||||
module SIP;
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Timestamp for when the request happened.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## Represents the pipelined depth into the connection of this
|
||||
## request/response transaction.
|
||||
trans_depth: count &log;
|
||||
## Verb used in the SIP request (INVITE, REGISTER etc.).
|
||||
method: string &log &optional;
|
||||
## URI used in the request.
|
||||
uri: string &log &optional;
|
||||
## Contents of the Date: header from the client
|
||||
date: string &log &optional;
|
||||
## Contents of the request From: header
|
||||
## Note: The tag= value that's usually appended to the sender
|
||||
## is stripped off and not logged.
|
||||
request_from: string &log &optional;
|
||||
## Contents of the To: header
|
||||
request_to: string &log &optional;
|
||||
## Contents of the response From: header
|
||||
## Note: The ``tag=`` value that's usually appended to the sender
|
||||
## is stripped off and not logged.
|
||||
response_from: string &log &optional;
|
||||
## Contents of the response To: header
|
||||
response_to: string &log &optional;
|
||||
|
||||
## Contents of the Reply-To: header
|
||||
reply_to: string &log &optional;
|
||||
## Contents of the Call-ID: header from the client
|
||||
call_id: string &log &optional;
|
||||
## Contents of the CSeq: header from the client
|
||||
seq: string &log &optional;
|
||||
## Contents of the Subject: header from the client
|
||||
subject: string &log &optional;
|
||||
## The client message transmission path, as extracted from the headers.
|
||||
request_path: vector of string &log &optional;
|
||||
## The server message transmission path, as extracted from the headers.
|
||||
response_path: vector of string &log &optional;
|
||||
## Contents of the User-Agent: header from the client
|
||||
user_agent: string &log &optional;
|
||||
## Status code returned by the server.
|
||||
status_code: count &log &optional;
|
||||
## Status message returned by the server.
|
||||
status_msg: string &log &optional;
|
||||
## Contents of the Warning: header
|
||||
warning: string &log &optional;
|
||||
## Contents of the Content-Length: header from the client
|
||||
request_body_len: count &log &optional;
|
||||
## Contents of the Content-Length: header from the server
|
||||
response_body_len: count &log &optional;
|
||||
## Contents of the Content-Type: header from the server
|
||||
content_type: string &log &optional;
|
||||
};
|
||||
|
||||
type State: record {
|
||||
## Pending requests.
|
||||
pending: table[count] of Info;
|
||||
## Current request in the pending queue.
|
||||
current_request: count &default=0;
|
||||
## Current response in the pending queue.
|
||||
current_response: count &default=0;
|
||||
};
|
||||
|
||||
## A list of SIP methods. Other methods will generate a weird. Note
|
||||
## that the SIP analyzer will only accept methods consisting solely
|
||||
## of letters ``[A-Za-z]``.
|
||||
const sip_methods: set[string] = {
|
||||
"REGISTER", "INVITE", "ACK", "CANCEL", "BYE", "OPTIONS", "NOTIFY", "SUBSCRIBE"
|
||||
} &redef;
|
||||
|
||||
## Event that can be handled to access the SIP record as it is sent on
|
||||
## to the logging framework.
|
||||
global log_sip: event(rec: Info);
|
||||
}
|
||||
|
||||
# Add the sip state tracking fields to the connection record.
|
||||
redef record connection += {
|
||||
sip: Info &optional;
|
||||
sip_state: State &optional;
|
||||
};
|
||||
|
||||
const ports = { 5060/udp };
|
||||
redef likely_server_ports += { ports };
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(SIP::LOG, [$columns=Info, $ev=log_sip, $path="sip"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_SIP, ports);
|
||||
}
|
||||
|
||||
function new_sip_session(c: connection): Info
|
||||
{
|
||||
local tmp: Info;
|
||||
tmp$ts=network_time();
|
||||
tmp$uid=c$uid;
|
||||
tmp$id=c$id;
|
||||
# $current_request is set prior to the Info record creation so we
|
||||
# can use the value directly here.
|
||||
tmp$trans_depth = c$sip_state$current_request;
|
||||
|
||||
tmp$request_path = vector();
|
||||
tmp$response_path = vector();
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function set_state(c: connection, is_request: bool)
|
||||
{
|
||||
if ( ! c?$sip_state )
|
||||
{
|
||||
local s: State;
|
||||
c$sip_state = s;
|
||||
}
|
||||
|
||||
if ( is_request )
|
||||
{
|
||||
if ( c$sip_state$current_request !in c$sip_state$pending )
|
||||
c$sip_state$pending[c$sip_state$current_request] = new_sip_session(c);
|
||||
|
||||
c$sip = c$sip_state$pending[c$sip_state$current_request];
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( c$sip_state$current_response !in c$sip_state$pending )
|
||||
c$sip_state$pending[c$sip_state$current_response] = new_sip_session(c);
|
||||
|
||||
c$sip = c$sip_state$pending[c$sip_state$current_response];
|
||||
}
|
||||
}
|
||||
|
||||
function flush_pending(c: connection)
|
||||
{
|
||||
# Flush all pending but incomplete request/response pairs.
|
||||
if ( c?$sip_state )
|
||||
{
|
||||
for ( r in c$sip_state$pending )
|
||||
{
|
||||
# We don't use pending elements at index 0.
|
||||
if ( r == 0 )
|
||||
next;
|
||||
|
||||
Log::write(SIP::LOG, c$sip_state$pending[r]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
event sip_request(c: connection, method: string, original_URI: string, version: string) &priority=5
|
||||
{
|
||||
set_state(c, T);
|
||||
|
||||
c$sip$method = method;
|
||||
c$sip$uri = original_URI;
|
||||
|
||||
if ( method !in sip_methods )
|
||||
event conn_weird("unknown_SIP_method", c, method);
|
||||
}
|
||||
|
||||
event sip_reply(c: connection, version: string, code: count, reason: string) &priority=5
|
||||
{
|
||||
set_state(c, F);
|
||||
|
||||
if ( c$sip_state$current_response !in c$sip_state$pending &&
|
||||
(code < 100 && 200 <= code) )
|
||||
++c$sip_state$current_response;
|
||||
|
||||
c$sip$status_code = code;
|
||||
c$sip$status_msg = reason;
|
||||
}
|
||||
|
||||
event sip_header(c: connection, is_request: bool, name: string, value: string) &priority=5
|
||||
{
|
||||
if ( ! c?$sip_state )
|
||||
{
|
||||
local s: State;
|
||||
c$sip_state = s;
|
||||
}
|
||||
|
||||
if ( is_request ) # from client
|
||||
{
|
||||
if ( c$sip_state$current_request !in c$sip_state$pending )
|
||||
++c$sip_state$current_request;
|
||||
set_state(c, is_request);
|
||||
switch ( name )
|
||||
{
|
||||
case "CALL-ID":
|
||||
c$sip$call_id = value;
|
||||
break;
|
||||
case "CONTENT-LENGTH", "L":
|
||||
c$sip$request_body_len = to_count(value);
|
||||
break;
|
||||
case "CSEQ":
|
||||
c$sip$seq = value;
|
||||
break;
|
||||
case "DATE":
|
||||
c$sip$date = value;
|
||||
break;
|
||||
case "FROM", "F":
|
||||
c$sip$request_from = split_string1(value, /;[ ]?tag=/)[0];
|
||||
break;
|
||||
case "REPLY-TO":
|
||||
c$sip$reply_to = value;
|
||||
break;
|
||||
case "SUBJECT", "S":
|
||||
c$sip$subject = value;
|
||||
break;
|
||||
case "TO", "T":
|
||||
c$sip$request_to = value;
|
||||
break;
|
||||
case "USER-AGENT":
|
||||
c$sip$user_agent = value;
|
||||
break;
|
||||
case "VIA", "V":
|
||||
c$sip$request_path[|c$sip$request_path|] = split_string1(value, /;[ ]?branch/)[0];
|
||||
break;
|
||||
}
|
||||
|
||||
c$sip_state$pending[c$sip_state$current_request] = c$sip;
|
||||
}
|
||||
else # from server
|
||||
{
|
||||
if ( c$sip_state$current_response !in c$sip_state$pending )
|
||||
++c$sip_state$current_response;
|
||||
|
||||
set_state(c, is_request);
|
||||
switch ( name )
|
||||
{
|
||||
case "CONTENT-LENGTH", "L":
|
||||
c$sip$response_body_len = to_count(value);
|
||||
break;
|
||||
case "CONTENT-TYPE", "C":
|
||||
c$sip$content_type = value;
|
||||
break;
|
||||
case "WARNING":
|
||||
c$sip$warning = value;
|
||||
break;
|
||||
case "FROM", "F":
|
||||
c$sip$response_from = split_string1(value, /;[ ]?tag=/)[0];
|
||||
break;
|
||||
case "TO", "T":
|
||||
c$sip$response_to = value;
|
||||
break;
|
||||
case "VIA", "V":
|
||||
c$sip$response_path[|c$sip$response_path|] = split_string1(value, /;[ ]?branch/)[0];
|
||||
break;
|
||||
}
|
||||
|
||||
c$sip_state$pending[c$sip_state$current_response] = c$sip;
|
||||
}
|
||||
}
|
||||
|
||||
event sip_end_entity(c: connection, is_request: bool) &priority = 5
|
||||
{
|
||||
set_state(c, is_request);
|
||||
}
|
||||
|
||||
event sip_end_entity(c: connection, is_request: bool) &priority = -5
|
||||
{
|
||||
# The reply body is done so we're ready to log.
|
||||
if ( ! is_request )
|
||||
{
|
||||
Log::write(SIP::LOG, c$sip);
|
||||
|
||||
if ( c$sip$status_code < 100 || 200 <= c$sip$status_code )
|
||||
delete c$sip_state$pending[c$sip_state$current_response];
|
||||
|
||||
if ( ! c$sip?$method || ( c$sip$method == "BYE" &&
|
||||
c$sip$status_code >= 200 && c$sip$status_code < 300 ) )
|
||||
{
|
||||
flush_pending(c);
|
||||
delete c$sip;
|
||||
delete c$sip_state;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection) &priority=-5
|
||||
{
|
||||
if ( c?$sip_state )
|
||||
{
|
||||
for ( r in c$sip_state$pending )
|
||||
{
|
||||
Log::write(SIP::LOG, c$sip_state$pending[r]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -29,6 +29,8 @@ export {
|
|||
from: string &log &optional;
|
||||
## Contents of the To header.
|
||||
to: set[string] &log &optional;
|
||||
## Contents of the CC header.
|
||||
cc: set[string] &log &optional;
|
||||
## Contents of the ReplyTo header.
|
||||
reply_to: string &log &optional;
|
||||
## Contents of the MsgID header.
|
||||
|
@ -92,7 +94,7 @@ redef likely_server_ports += { ports };
|
|||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(SMTP::LOG, [$columns=SMTP::Info, $ev=log_smtp]);
|
||||
Log::create_stream(SMTP::LOG, [$columns=SMTP::Info, $ev=log_smtp, $path="smtp"]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_SMTP, ports);
|
||||
}
|
||||
|
||||
|
@ -239,6 +241,16 @@ event mime_one_header(c: connection, h: mime_header_rec) &priority=5
|
|||
add c$smtp$to[to_parts[i]];
|
||||
}
|
||||
|
||||
else if ( h$name == "CC" )
|
||||
{
|
||||
if ( ! c$smtp?$cc )
|
||||
c$smtp$cc = set();
|
||||
|
||||
local cc_parts = split_string(h$value, /[[:blank:]]*,[[:blank:]]*/);
|
||||
for ( i in cc_parts )
|
||||
add c$smtp$cc[cc_parts[i]];
|
||||
}
|
||||
|
||||
else if ( h$name == "X-ORIGINATING-IP" )
|
||||
{
|
||||
local addresses = extract_ip_addresses(h$value);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue