From 5b3573394edbcf6c8926e84c21d84c13faa412e7 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 24 Jan 2014 15:51:58 -0600 Subject: [PATCH 01/47] Improve TCP FIN retransmission handling. In the case multiple FIN packets are seen from a TCP endpoint (e.g. when one is retransmitted), only the first counted towards a byte in the sequence space. This could cause a subsequent FIN packet to induce an incorrect wrap around in the sequence numbers (e.g. the retransmitted FIN packet now is one sequence number behind the the first) and misleadingly large connection sizes. The change is to always treat a FIN packet as counting one byte in to the sequence space. --- src/analyzer/protocol/tcp/TCP.cc | 11 ++++------- .../btest/Baseline/core.tcp.fin-retransmit/out | 2 ++ testing/btest/Traces/tcp/fin_retransmission.pcap | Bin 0 -> 434 bytes testing/btest/core/tcp/fin-retransmit.bro | 8 ++++++++ 4 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 testing/btest/Baseline/core.tcp.fin-retransmit/out create mode 100644 testing/btest/Traces/tcp/fin_retransmission.pcap create mode 100644 testing/btest/core/tcp/fin-retransmit.bro diff --git a/src/analyzer/protocol/tcp/TCP.cc b/src/analyzer/protocol/tcp/TCP.cc index aefc5a1808..57c4ebef18 100644 --- a/src/analyzer/protocol/tcp/TCP.cc +++ b/src/analyzer/protocol/tcp/TCP.cc @@ -373,14 +373,11 @@ void TCP_Analyzer::ProcessSYN(const IP_Hdr* ip, const struct tcphdr* tp, void TCP_Analyzer::ProcessFIN(double t, TCP_Endpoint* endpoint, int& seq_len, uint32 base_seq) { - if ( endpoint->FIN_cnt == 0 ) - { - ++seq_len; // FIN consumes a byte of sequence space - ++endpoint->FIN_cnt; // remember that we've seen a FIN - } + ++seq_len; // FIN consumes a byte of sequence space. + ++endpoint->FIN_cnt; // remember that we've seen a FIN - else if ( t < endpoint->last_time + tcp_storm_interarrival_thresh && - ++endpoint->FIN_cnt == tcp_storm_thresh ) + if ( t < endpoint->last_time + tcp_storm_interarrival_thresh && + endpoint->FIN_cnt == tcp_storm_thresh ) Weird("FIN_storm"); // Remember the relative seq in FIN_seq. diff --git a/testing/btest/Baseline/core.tcp.fin-retransmit/out b/testing/btest/Baseline/core.tcp.fin-retransmit/out new file mode 100644 index 0000000000..8afb8222c9 --- /dev/null +++ b/testing/btest/Baseline/core.tcp.fin-retransmit/out @@ -0,0 +1,2 @@ +[size=0, state=5, num_pkts=3, num_bytes_ip=156, flow_label=0] +[size=0, state=6, num_pkts=2, num_bytes_ip=92, flow_label=0] diff --git a/testing/btest/Traces/tcp/fin_retransmission.pcap b/testing/btest/Traces/tcp/fin_retransmission.pcap new file mode 100644 index 0000000000000000000000000000000000000000..1e17844af55bcf6ccb65c637d5f4f9f3f1173321 GIT binary patch literal 434 zcmca|c+)~A1{MYw`2U}Qff2|lH#!zHv5}R*56A}LBZdF0Vh=^guQHL?!@=Onz~Ery z<-p*;#w*OlC|4@D{Gim6=W*o?i&Wthj-opy7^s$ko-S+Td;crAns*h0-5Mz$i-j-G#!L7 z+^EsH59G$J+tA$zH1%eHpaX;bGLXwa0A$v$i+pgiFkGjR&jxaxMjo2$ZuY$_{20KZ Mz_~;MViE%b0C*yJumAu6 literal 0 HcmV?d00001 diff --git a/testing/btest/core/tcp/fin-retransmit.bro b/testing/btest/core/tcp/fin-retransmit.bro new file mode 100644 index 0000000000..42bf062f5a --- /dev/null +++ b/testing/btest/core/tcp/fin-retransmit.bro @@ -0,0 +1,8 @@ +# @TEST-EXEC: bro -b -r $TRACES/tcp/fin_retransmission.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +event connection_state_remove(c: connection) + { + print c$orig; + print c$resp; + } From 9b12967d40b2624cec097732fd8e0b0efaae4612 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 24 Jan 2014 16:21:02 -0600 Subject: [PATCH 02/47] Improve gap reporting in TCP connections that never see data. The previous behavior was to accomodate SYN/FIN/RST-filtered traces by not reporting missing data (via the content_gap event) for such connections. The new behavior always reports gaps for connections that are established and terminate normally, but sequence numbers indicate that all data packets of the connection were missed. The behavior can be reverted by redef'ing "detect_filtered_trace". --- scripts/base/init-bare.bro | 6 ++++++ src/analyzer/protocol/tcp/TCP_Reassembler.cc | 2 +- src/const.bif | 1 + .../Baseline/core.tcp.miss-end-data/conn.log | 10 ++++++++++ .../btest/Baseline/core.tcp.miss-end-data/out | 1 + testing/btest/Traces/tcp/miss_end_data.pcap | Bin 0 -> 1216 bytes testing/btest/core/tcp/miss-end-data.bro | 10 ++++++++++ 7 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 testing/btest/Baseline/core.tcp.miss-end-data/conn.log create mode 100644 testing/btest/Baseline/core.tcp.miss-end-data/out create mode 100644 testing/btest/Traces/tcp/miss_end_data.pcap create mode 100644 testing/btest/core/tcp/miss-end-data.bro diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 8d4899b785..ce8d68d289 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2849,6 +2849,12 @@ global load_sample_freq = 20 &redef; ## .. bro:see:: gap_report const gap_report_freq = 1.0 sec &redef; +## Whether to attempt to automatically detect SYN/FIN/RST-filtered trace +## and not report missing segments for such connections. +## If this is enabled, then missing data at the end of connections may not +## be reported via :bro:see:`content_gap`. +const detect_filtered_trace = F &redef; + ## Whether we want :bro:see:`content_gap` and :bro:see:`gap_report` for partial ## connections. A connection is partial if it is missing a full handshake. Note ## that gap reports for partial connections might not be reliable. diff --git a/src/analyzer/protocol/tcp/TCP_Reassembler.cc b/src/analyzer/protocol/tcp/TCP_Reassembler.cc index a1e20dc0e6..49292a04a5 100644 --- a/src/analyzer/protocol/tcp/TCP_Reassembler.cc +++ b/src/analyzer/protocol/tcp/TCP_Reassembler.cc @@ -178,7 +178,7 @@ void TCP_Reassembler::Undelivered(int up_to_seq) // to this method and only if this condition is not true). reporter->InternalError("Calling Undelivered for data that has already been delivered (or has already been marked as undelivered"); - if ( last_reassem_seq == 1 && + if ( BifConst::detect_filtered_trace && last_reassem_seq == 1 && (endpoint->FIN_cnt > 0 || endpoint->RST_cnt > 0 || peer->FIN_cnt > 0 || peer->RST_cnt > 0) ) { diff --git a/src/const.bif b/src/const.bif index fd0419c7d9..0ba168ca85 100644 --- a/src/const.bif +++ b/src/const.bif @@ -5,6 +5,7 @@ const ignore_keep_alive_rexmit: bool; const skip_http_data: bool; const use_conn_size_analyzer: bool; +const detect_filtered_trace: bool; const report_gaps_for_partial: bool; const exit_only_after_terminate: bool; diff --git a/testing/btest/Baseline/core.tcp.miss-end-data/conn.log b/testing/btest/Baseline/core.tcp.miss-end-data/conn.log new file mode 100644 index 0000000000..723e5becc3 --- /dev/null +++ b/testing/btest/Baseline/core.tcp.miss-end-data/conn.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-01-24-22-19-38 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1331764471.664131 CXWv6p3arKYeMETxOg 192.168.122.230 60648 77.238.160.184 80 tcp http 10.048360 538 2902 SF - 2902 ShADafF 5 750 4 172 (empty) +#close 2014-01-24-22-19-38 diff --git a/testing/btest/Baseline/core.tcp.miss-end-data/out b/testing/btest/Baseline/core.tcp.miss-end-data/out new file mode 100644 index 0000000000..cd5881035f --- /dev/null +++ b/testing/btest/Baseline/core.tcp.miss-end-data/out @@ -0,0 +1 @@ +content_gap, [orig_h=192.168.122.230, orig_p=60648/tcp, resp_h=77.238.160.184, resp_p=80/tcp], F, 1, 2902 diff --git a/testing/btest/Traces/tcp/miss_end_data.pcap b/testing/btest/Traces/tcp/miss_end_data.pcap new file mode 100644 index 0000000000000000000000000000000000000000..ae5aecbaac78f2ebd7243f373f1142c690fbda0e GIT binary patch literal 1216 zcmaLXOH30%7y#huf<_xl025+-gpHA=KzFxG>24pXDMemE+6HR$V4}ZyVoczQ6gf zy8{Z_!SOvTEfORMyaa$~Klk3()<*bz?F1Ho@=ejRJl7Ke$N=vA)n%+<6%=-Nrq^D? zk-ERw-Sg$n#-WHbq+Z10dfX2!@PIWmD_$$Ab{Ii z6(TD_tW>W3SGkhupPK|=Vys9q+z=aM<{e3Trii*`F&rO_#fGDaOfJIH3r2~YEEWq4 zOS1w0q;A2kf?)}I6T(oV9Lw#; zP~YuDk5`RpqAn>~DM^+ZiV;R@WmT}@@%U(2FfE9}oSMs1@pvpjv0=xSskrO#VJ`|k z;2+gB4T?DSBw2ybpafN^A!ioK(1d1^EZeq`WEiv^{3o#tUa40U6n6GO8ZHbk*zp8k z9paoya6TOkT4_;Ek+S-zmbP@KY@|q47ZKfX)0B3P+852%>*zx)9y4rd}DH3o-+9LOr`27)X5ujsB|G^ d(^G^WP33f64*w5cx14ANo#-;o%hEmoe*w*=ZlnMJ literal 0 HcmV?d00001 diff --git a/testing/btest/core/tcp/miss-end-data.bro b/testing/btest/core/tcp/miss-end-data.bro new file mode 100644 index 0000000000..6cee7577d9 --- /dev/null +++ b/testing/btest/core/tcp/miss-end-data.bro @@ -0,0 +1,10 @@ +# @TEST-EXEC: bro -r $TRACES/tcp/miss_end_data.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff conn.log + +redef report_gaps_for_partial = T; + +event content_gap(c: connection, is_orig: bool, seq: count, length: count) + { + print "content_gap", c$id, is_orig, seq, length; + } From 6d46144c3b1453d429848a699767215b61302024 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 24 Jan 2014 16:32:55 -0600 Subject: [PATCH 03/47] Improve TCP connection size reporting for half-open connections. If TCP endpoint A and B are synchronized at some point, but A closes/aborts/crashes and B goes on without knowledge of it and then A tries to re-synchronize, Bro could end up seeing something like (sequence numbers made up): A: SYN 100 B: ACK 500 A: RST 500 The final sequence number of A, in this case, is not useful in the context of determining the number of data bytes sent by A, so Bro now reports that as 0 (where before it could often be misleadingly large). --- src/analyzer/protocol/tcp/TCP_Endpoint.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/analyzer/protocol/tcp/TCP_Endpoint.cc b/src/analyzer/protocol/tcp/TCP_Endpoint.cc index d596234021..ad642a46e3 100644 --- a/src/analyzer/protocol/tcp/TCP_Endpoint.cc +++ b/src/analyzer/protocol/tcp/TCP_Endpoint.cc @@ -161,6 +161,13 @@ void TCP_Endpoint::SetState(EndpointState new_state) bro_int_t TCP_Endpoint::Size() const { + if ( prev_state == TCP_ENDPOINT_SYN_SENT && state == TCP_ENDPOINT_RESET && + peer->state == TCP_ENDPOINT_INACTIVE && ! NoDataAcked() ) + // This looks like a half-open connection was discovered and aborted. + // Sequence numbers could be misleading if used in context of data size + // and there was never a chance for this endpoint to send data anyway. + return 0; + bro_int_t size; uint64 last_seq_64 = (uint64(last_seq_high) << 32) | last_seq; From e09763e0613ea6ea8134d11957e419e3061b5db0 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 24 Jan 2014 16:47:00 -0600 Subject: [PATCH 04/47] Fix file_over_new_connection event to trigger when entire file is missed. If a file is nothing but gaps (e.g. due to missing/dropped packets), Bro can sometimes detect a file is supposed to have been present and never saw any of its content, but failed to raise file_over_new_connection events for it. This was mostly apparent because the tx_hosts/rx_hosts fields in files.log would not be populated in such cases (but are now with this change). --- src/file_analysis/File.cc | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/file_analysis/File.cc b/src/file_analysis/File.cc index 55b28763c8..deda0f9e93 100644 --- a/src/file_analysis/File.cc +++ b/src/file_analysis/File.cc @@ -103,7 +103,6 @@ File::~File() DBG_LOG(DBG_FILE_ANALYSIS, "Destroying File object %s", id.c_str()); Unref(val); - // Queue may not be empty in the case where only content gaps were seen. while ( ! fonc_queue.empty() ) { delete_vals(fonc_queue.front().second); @@ -460,20 +459,27 @@ void File::FileEvent(EventHandlerPtr h) FileEvent(h, vl); } +static void flush_file_event_queue(queue >& q) + { + while ( ! q.empty() ) + { + pair p = q.front(); + mgr.QueueEvent(p.first, p.second); + q.pop(); + } + } + void File::FileEvent(EventHandlerPtr h, val_list* vl) { + if ( h == file_state_remove ) + flush_file_event_queue(fonc_queue); + mgr.QueueEvent(h, vl); if ( h == file_new ) { did_file_new_event = true; - - while ( ! fonc_queue.empty() ) - { - pair p = fonc_queue.front(); - mgr.QueueEvent(p.first, p.second); - fonc_queue.pop(); - } + flush_file_event_queue(fonc_queue); } if ( h == file_new || h == file_timeout || h == file_extraction_limit ) From 56acd99d15e2e64182a685afa84efe7741213b8c Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Fri, 24 Jan 2014 21:00:55 -0500 Subject: [PATCH 05/47] Fix misidentification of SOCKS traffic. Traffic that had a certain bytestring would get incorrectly identified as SOCKS. This seemed to happen a lot with DCE/RPC traffic. --- src/analyzer/protocol/socks/socks-analyzer.pac | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/analyzer/protocol/socks/socks-analyzer.pac b/src/analyzer/protocol/socks/socks-analyzer.pac index 885542fc2a..b7cbaaceac 100644 --- a/src/analyzer/protocol/socks/socks-analyzer.pac +++ b/src/analyzer/protocol/socks/socks-analyzer.pac @@ -64,6 +64,12 @@ refine connection SOCKS_Conn += { bro_analyzer()->ProtocolViolation(fmt("invalid value in reserved field: %d", ${request.reserved})); return false; } + if ( ( ${request.command} == 0 ) || ( ${request.command} > 3 ) ) + { + bro_analyzer()->ProtocolViolation(fmt("invalid value in reserved field: %d", ${request.reserved})); + bro_analyzer()->SetSkip(true); + return false; + } RecordVal* sa = new RecordVal(socks_address); @@ -105,7 +111,7 @@ refine connection SOCKS_Conn += { function socks5_reply(reply: SOCKS5_Reply): bool %{ RecordVal* sa = new RecordVal(socks_address); - + // This is dumb and there must be a better way (checking for presence of a field)... switch ( ${reply.bound.addr_type} ) { From 6d73b8c57e5bd74a0e7b880d2e547cd6da3649b9 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 27 Jan 2014 10:22:06 -0800 Subject: [PATCH 06/47] Fix x509_extension event. The event now really returns the extension. If openssl supports printing it, it is converted into the openssl ascii output. The output does not always look pretty because it can contain newlines. New event syntax: event x509_extension(c: connection, is_orig: bool, cert:X509, extension: X509_extension_info) Example output for extension: [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication] [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.3.6.1.4.1.6449.1.2.1.3.4^J CPS: https://secure.comodo.com/CPS^J] --- scripts/base/init-bare.bro | 12 ++++ src/NetVar.cc | 2 + src/NetVar.h | 1 + src/analyzer/protocol/ssl/events.bif | 6 +- src/analyzer/protocol/ssl/ssl-analyzer.pac | 61 +++++++++++++------ .../.stdout | 20 ++++++ .../base/protocols/ssl/x509_extensions.test | 7 +++ 7 files changed, 90 insertions(+), 19 deletions(-) create mode 100644 testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout create mode 100644 testing/btest/scripts/base/protocols/ssl/x509_extensions.test diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 8d4899b785..7f80e63f54 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2432,6 +2432,18 @@ type X509: record { not_valid_after: time; ##< Timestamp after when certificate is not valid. }; +## An X509 extension. +## +## +## .. bro:see:: x509_extension +type X509_extension_info: record { + name: string; ##< long name of extension. oid if name not known + short_name: string &optional; ##< short name of extension if known. + oid: string; ##< oid of extension + critical: bool; ##< true if extension is critical + value: string; ##< extension content parsed to string for known extensions. Raw data otherwise. +}; + ## HTTP session statistics. ## ## .. bro:see:: http_stats diff --git a/src/NetVar.cc b/src/NetVar.cc index 79652112f3..05a4e16b47 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -48,6 +48,7 @@ int tcp_max_above_hole_without_any_acks; int tcp_excessive_data_without_further_acks; RecordType* x509_type; +RecordType* x509_extension_type; RecordType* socks_address; @@ -356,6 +357,7 @@ void init_net_var() opt_internal_int("tcp_excessive_data_without_further_acks"); x509_type = internal_type("X509")->AsRecordType(); + x509_extension_type = internal_type("X509_extension_info")->AsRecordType(); socks_address = internal_type("SOCKS::Address")->AsRecordType(); diff --git a/src/NetVar.h b/src/NetVar.h index 12949c0e55..8ef6571313 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -51,6 +51,7 @@ extern int tcp_max_above_hole_without_any_acks; extern int tcp_excessive_data_without_further_acks; extern RecordType* x509_type; +extern RecordType* x509_extension_type; extern RecordType* socks_address; diff --git a/src/analyzer/protocol/ssl/events.bif b/src/analyzer/protocol/ssl/events.bif index 01abb87745..7319d2ce3e 100644 --- a/src/analyzer/protocol/ssl/events.bif +++ b/src/analyzer/protocol/ssl/events.bif @@ -178,11 +178,13 @@ event x509_certificate%(c: connection, is_orig: bool, cert: X509, chain_idx: cou ## ## is_orig: True if event is raised for originator side of the connection. ## -## data: The raw data associated with the extension. +## cert: The parsed certificate. +## +## extension: The parsed extension. ## ## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_extension ## ssl_server_hello x509_certificate x509_error x509_verify -event x509_extension%(c: connection, is_orig: bool, data: string%); +event x509_extension%(c: connection, is_orig: bool, cert: X509, extension: X509_extension_info%); ## Generated when errors occur during parsing an X509 certificate. ## diff --git a/src/analyzer/protocol/ssl/ssl-analyzer.pac b/src/analyzer/protocol/ssl/ssl-analyzer.pac index 18d3812742..0156671ce8 100644 --- a/src/analyzer/protocol/ssl/ssl-analyzer.pac +++ b/src/analyzer/protocol/ssl/ssl-analyzer.pac @@ -9,6 +9,7 @@ #include "util.h" #include +#include #include %} @@ -298,25 +299,51 @@ refine connection SSL_Conn += { int num_ext = X509_get_ext_count(pTemp); for ( int k = 0; k < num_ext; ++k ) { - unsigned char *pBuffer = 0; - int length = 0; + char name[256]; + char oid[256]; + + memset(name, 0, 256); + memset(oid, 0, 256); X509_EXTENSION* ex = X509_get_ext(pTemp, k); - if (ex) - { - ASN1_STRING *pString = X509_EXTENSION_get_data(ex); - length = ASN1_STRING_to_UTF8(&pBuffer, pString); - //i2t_ASN1_OBJECT(&pBuffer, length, obj) - // printf("extension length: %d\n", length); - // -1 indicates an error. - if ( length >= 0 ) - { - StringVal* value = new StringVal(length, (char*)pBuffer); - BifEvent::generate_x509_extension(bro_analyzer(), - bro_analyzer()->Conn(), ${rec.is_orig}, value); - } - OPENSSL_free(pBuffer); - } + + if ( !ex ) + continue; + + ASN1_OBJECT* ext_asn = X509_EXTENSION_get_object(ex); + const char* short_name = OBJ_nid2sn(OBJ_obj2nid(ext_asn)); + + OBJ_obj2txt(name, 255, ext_asn, 0); + OBJ_obj2txt(oid, 255, ext_asn, 1); + + int critical = 0; + if ( X509_EXTENSION_get_critical(ex) != 0 ) + critical = 1; + + BIO *bio = BIO_new(BIO_s_mem()); + if(!X509V3_EXT_print(bio, ex, 0, 0)) + M_ASN1_OCTET_STRING_print(bio,ex->value); + + BIO_flush(bio); + int length = BIO_pending(bio); + // use OPENSSL_malloc here. Using new or anything else can lead + // to interesting, hard to debug segfaults. + char *buffer = (char*) OPENSSL_malloc(length); + BIO_read(bio, buffer, length); + StringVal* ext_val = new StringVal(length, buffer); + BIO_free_all(bio); + OPENSSL_free(buffer); + + RecordVal* pX509Ext = new RecordVal(x509_extension_type); + pX509Ext->Assign(0, new StringVal(name)); + if ( short_name and strlen(short_name) > 0 ) + pX509Ext->Assign(1, new StringVal(short_name)); + pX509Ext->Assign(2, new StringVal(oid)); + pX509Ext->Assign(3, new Val(critical, TYPE_BOOL)); + pX509Ext->Assign(4, ext_val); + + BifEvent::generate_x509_extension(bro_analyzer(), + bro_analyzer()->Conn(), ${rec.is_orig}, pX509Cert->Ref(), pX509Ext); } } X509_free(pTemp); diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout b/testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout new file mode 100644 index 0000000000..3f9c8661bf --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout @@ -0,0 +1,20 @@ +[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:3F:D5:B5:D0:D6:44:79:50:4A:17:A3:9B:8C:4A:DC:B8:B0:22:64:6B^J] +[name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=A2:76:09:20:A8:40:FD:A1:AC:C8:E9:35:B9:11:A6:61:FF:8C:FF:A3] +[name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment] +[name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE] +[name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication] +[name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.3.6.1.4.1.6449.1.2.1.3.4^J CPS: https://secure.comodo.com/CPS^J] +[name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=^JFull Name:^J URI:http://crl.comodoca.com/COMODOHigh-AssuranceSecureServerCA.crl^J] +[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=CA Issuers - URI:http://crt.comodoca.com/COMODOHigh-AssuranceSecureServerCA.crt^JOCSP - URI:http://ocsp.comodoca.com^J] +[name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.taleo.net, DNS:taleo.net] +[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:AD:BD:98:7A:34:B4:26:F7:FA:C4:26:54:EF:03:BD:E0:24:CB:54:1A^J] +[name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=3F:D5:B5:D0:D6:44:79:50:4A:17:A3:9B:8C:4A:DC:B8:B0:22:64:6B] +[name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign] +[name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0] +[name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: X509v3 Any Policy^J] +[name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=^JFull Name:^J URI:http://crl.usertrust.com/AddTrustExternalCARoot.crl^J] +[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=CA Issuers - URI:http://crt.usertrust.com/AddTrustExternalCARoot.p7c^JCA Issuers - URI:http://crt.usertrust.com/AddTrustUTNSGCCA.crt^JOCSP - URI:http://ocsp.usertrust.com^J] +[name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=AD:BD:98:7A:34:B4:26:F7:FA:C4:26:54:EF:03:BD:E0:24:CB:54:1A] +[name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=F, value=Certificate Sign, CRL Sign] +[name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE] +[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:AD:BD:98:7A:34:B4:26:F7:FA:C4:26:54:EF:03:BD:E0:24:CB:54:1A^JDirName:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root^Jserial:01^J] diff --git a/testing/btest/scripts/base/protocols/ssl/x509_extensions.test b/testing/btest/scripts/base/protocols/ssl/x509_extensions.test new file mode 100644 index 0000000000..4db3233b27 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ssl/x509_extensions.test @@ -0,0 +1,7 @@ +# @TEST-EXEC: bro -r $TRACES/tls1.2.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +event x509_extension(c: connection, is_orig: bool, cert:X509, extension: X509_extension_info) +{ + print extension; +} From af95026348688e0df8c867f67d2a53a3d440cf41 Mon Sep 17 00:00:00 2001 From: Jeannette Dopheide Date: Mon, 27 Jan 2014 15:23:24 -0600 Subject: [PATCH 07/47] Minor grammar edits to Installation and Quick Start pages --- doc/install/install.rst | 8 ++++---- doc/quickstart/index.rst | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/install/install.rst b/doc/install/install.rst index 3678d948c2..7030c95642 100644 --- a/doc/install/install.rst +++ b/doc/install/install.rst @@ -89,7 +89,7 @@ Optional Dependencies Bro can make use of some optional libraries and tools if they are found at build time: - * LibGeoIP (for geo-locating IP addresses) + * LibGeoIP (for geolocating IP addresses) * sendmail (enables Bro and BroControl to send mail) * gawk (enables all features of bro-cut) * curl (used by a Bro script that implements active HTTP) @@ -137,11 +137,11 @@ The primary install prefix for binary packages is ``/opt/bro``. Non-MacOS packages that include BroControl also put variable/runtime data (e.g. Bro logs) in ``/var/opt/bro``. -Installing From Source +Installing from Source ========================== -Bro releases are bundled into source packages for convenience and -available from the `bro downloads page`_. Alternatively, the latest +Bro releases are bundled into source packages for convenience and are +available on the `bro downloads page`_. Alternatively, the latest Bro development version can be obtained through git repositories hosted at ``git.bro.org``. See our `git development documentation `_ for comprehensive diff --git a/doc/quickstart/index.rst b/doc/quickstart/index.rst index 49a909a37f..e8ead2cf01 100644 --- a/doc/quickstart/index.rst +++ b/doc/quickstart/index.rst @@ -155,7 +155,7 @@ changes we want to make: attempt looks like it may have been successful, and we want email when that happens, but only for certain servers. -So we've defined *what* we want to do, but need to know *where* to do it. +We've defined *what* we want to do, but need to know *where* to do it. The answer is to use a script written in the Bro programming language, so let's do a quick intro to Bro scripting. @@ -181,7 +181,7 @@ must explicitly choose if they want to load them. The main entry point for the default analysis configuration of a standalone Bro instance managed by BroControl is the ``$PREFIX/share/bro/site/local.bro`` -script. So we'll be adding to that in the following sections, but first +script. We'll be adding to that in the following sections, but first we have to figure out what to add. Redefining Script Option Variables @@ -197,7 +197,7 @@ A redefineable constant might seem strange, but what that really means is that the variable's value may not change at run-time, but whose initial value can be modified via the ``redef`` operator at parse-time. -So let's continue on our path to modify the behavior for the two SSL +Let's continue on our path to modify the behavior for the two SSL and SSH notices. Looking at :doc:`/scripts/base/frameworks/notice/main.bro`, we see that it advertises: @@ -211,7 +211,7 @@ we see that it advertises: const ignored_types: set[Notice::Type] = {} &redef; } -That's exactly what we want to do for the SSL notice. So add to ``local.bro``: +That's exactly what we want to do for the SSL notice. Add to ``local.bro``: .. code:: bro @@ -276,9 +276,9 @@ an email on the condition that the predicate function evaluates to true, which is whenever the notice type is an SSH login and the responding host stored inside the ``Info`` record's connection field is in the set of watched servers. -.. note:: record field member access is done with the '$' character +.. note:: Record field member access is done with the '$' character instead of a '.' as might be expected from other languages, in - order to avoid ambiguity with the builtin address type's use of '.' + order to avoid ambiguity with the built-in address type's use of '.' in IPv4 dotted decimal representations. Remember, to finalize that configuration change perform the ``check``, From 0e0e74e49c01ab867d430d21c149bdf27915b0c3 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 28 Jan 2014 11:04:01 -0600 Subject: [PATCH 08/47] Improve DNS analysis. - Fix parsing of empty question sections (when QDCOUNT == 0). In this case, the DNS parser would extract two 2-byte fields for use in either "dns_query_reply" or "dns_rejected" events (dependent on value of RCODE) as qclass and qtype parameters. This is not correct, because such fields don't actually exist in the DNS message format when QDCOUNT is 0. As a result, these events are no longer raised when there's an empty question section. Scripts that depends on checking for an empty question section can do that in the "dns_message" event. - Add a new "dns_unknown_reply" event, for when Bro does not know how to fully parse a particular resource record type. This helps fix a problem in the default DNS scripts where the logic to complete request-reply pair matching doesn't work because it's waiting on more RR events to complete the reply. i.e. it expects ANCOUNT number of dns_*_reply events and will wait until it gets that many before completing a request-reply pair and logging it to dns.log. This could cause bogus replies to match a previous request if they happen to share a DNS transaction ID. --- scripts/base/protocols/dns/main.bro | 27 +++++++++++++++---- src/analyzer/protocol/dns/DNS.cc | 22 +++++++-------- src/analyzer/protocol/dns/events.bif | 20 ++++++++++++-- .../btest-doc.sphinx.using_bro#1 | 4 +-- .../conn.select | 4 +-- .../dns.log | 6 ++--- .../dns.log | 8 +++--- 7 files changed, 61 insertions(+), 30 deletions(-) diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index 0d23029ad7..f3f19d488c 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -158,12 +158,17 @@ hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 # If this is either a query or this is the reply but # no Info records are in the queue (we missed the query?) # we need to create an Info record and put it in the queue. - if ( is_query || - Queue::len(c$dns_state$pending[msg$id]) == 0 ) + if ( is_query ) { info = new_session(c, msg$id); Queue::put(c$dns_state$pending[msg$id], info); } + else if ( Queue::len(c$dns_state$pending[msg$id]) == 0 ) + { + info = new_session(c, msg$id); + Queue::put(c$dns_state$pending[msg$id], info); + event conn_weird("dns_unmatched_reply", c, ""); + } if ( is_query ) # If this is a query, assign the newly created info variable @@ -202,17 +207,23 @@ hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &priority=5 { hook set_session(c, msg, is_orig); + + if ( msg$QR && msg$rcode != 0 && msg$num_queries == 0 ) + c$dns$rejected = T; } event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5 { + if ( ! msg$QR ) + # This is weird: the inquirer must also be providing answers in + # the request, which is not what we want to track. + return; + if ( ans$answer_type == DNS_ANS ) { if ( ! c?$dns ) - { - event conn_weird("dns_unmatched_reply", c, ""); hook set_session(c, msg, F); - } + c$dns$AA = msg$AA; c$dns$RA = msg$RA; @@ -265,6 +276,12 @@ event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qcla c$dns$query = query; } + +event dns_unknown_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 + { + event DNS::do_reply(c, msg, ans, fmt("", ans$qtype)); + } + event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 { event DNS::do_reply(c, msg, ans, fmt("%s", a)); diff --git a/src/analyzer/protocol/dns/DNS.cc b/src/analyzer/protocol/dns/DNS.cc index 806cb9ae75..b17a90dd61 100644 --- a/src/analyzer/protocol/dns/DNS.cc +++ b/src/analyzer/protocol/dns/DNS.cc @@ -137,18 +137,6 @@ int DNS_Interpreter::ParseQuestions(DNS_MsgInfo* msg, { int n = msg->qdcount; - if ( n == 0 ) - { - // Generate event here because we won't go into ParseQuestion. - EventHandlerPtr dns_event = - msg->rcode == DNS_CODE_OK ? - dns_query_reply : dns_rejected; - BroString* question_name = new BroString(""); - - SendReplyOrRejectEvent(msg, dns_event, data, len, question_name); - return 1; - } - while ( n > 0 && ParseQuestion(msg, data, len, msg_start) ) --n; return n == 0; @@ -299,6 +287,16 @@ int DNS_Interpreter::ParseAnswer(DNS_MsgInfo* msg, break; default: + + if ( dns_unknown_reply && ! msg->skip_event ) + { + val_list* vl = new val_list; + vl->append(analyzer->BuildConnVal()); + vl->append(msg->BuildHdrVal()); + vl->append(msg->BuildAnswerVal()); + analyzer->ConnectionEvent(dns_unknown_reply, vl); + } + analyzer->Weird("DNS_RR_unknown_type"); data += rdlength; len -= rdlength; diff --git a/src/analyzer/protocol/dns/events.bif b/src/analyzer/protocol/dns/events.bif index 95c604a8b8..b43ac95f66 100644 --- a/src/analyzer/protocol/dns/events.bif +++ b/src/analyzer/protocol/dns/events.bif @@ -50,7 +50,7 @@ event dns_message%(c: connection, is_orig: bool, msg: dns_msg, len: count%); event dns_request%(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count%); ## Generated for DNS replies that reject a query. This event is raised if a DNS -## reply either indicates failure via its status code or does not pass on any +## reply indicates failure because it does not pass on any ## answers to a query. Note that all of the event's parameters are parsed out of ## the reply; there's no stateful correlation with the query. ## @@ -78,7 +78,7 @@ event dns_request%(c: connection, msg: dns_msg, query: string, qtype: count, qcl ## dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_rejected%(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count%); -## Generated for DNS replies with an *ok* status code but no question section. +## Generated for each entry in the Question section of a DNS reply. ## ## See `Wikipedia `__ for more ## information about the DNS protocol. Bro analyzes both UDP and TCP DNS @@ -401,6 +401,22 @@ event dns_TXT_reply%(c: connection, msg: dns_msg, ans: dns_answer, str: string%) ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_SRV_reply%(c: connection, msg: dns_msg, ans: dns_answer%); +## Generated on DNS reply resource records when the type of record is not one +## that Bro knows how to parse and generate another more specific specific +## event. +## +## c: The connection, which may be UDP or TCP depending on the type of the +## transport-layer session being analyzed. +## +## msg: The parsed DNS message header. +## +## ans: The type-independent part of the parsed answer record. +## +## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply +## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_SRV_reply dns_end +event dns_unknown_reply%(c: connection, msg: dns_msg, ans: dns_answer%); + ## Generated for DNS replies of type *EDNS*. For replies with multiple answers, ## an individual event of the corresponding type is raised for each. ## diff --git a/testing/btest/Baseline/doc.sphinx.using_bro/btest-doc.sphinx.using_bro#1 b/testing/btest/Baseline/doc.sphinx.using_bro/btest-doc.sphinx.using_bro#1 index 65c802ccf2..53bcb5581d 100644 --- a/testing/btest/Baseline/doc.sphinx.using_bro/btest-doc.sphinx.using_bro#1 +++ b/testing/btest/Baseline/doc.sphinx.using_bro/btest-doc.sphinx.using_bro#1 @@ -20,8 +20,8 @@ #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool count string count count count count table[string] 1300475167.096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns - - - S0 - 0 D 1 73 0 0 (empty) - 1300475167.097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp - - - - S0 - 0 D 1 199 0 0 (empty) - 1300475167.099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp - - - - S0 - 0 D 1 179 0 0 (empty) + 1300475167.097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp dns - - - S0 - 0 D 1 199 0 0 (empty) + 1300475167.099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp dns - - - S0 - 0 D 1 179 0 0 (empty) 1300475168.853899 CPbrpk1qSsw6ESzHV4 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF - 0 Dd 1 66 1 117 (empty) 1300475168.854378 C6pKV8GSxOnSLghOa 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF - 0 Dd 1 80 1 127 (empty) 1300475168.854837 CIPOse170MGiRM1Qf4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF - 0 Dd 1 66 1 211 (empty) diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.sqlite.wikipedia/conn.select b/testing/btest/Baseline/scripts.base.frameworks.logging.sqlite.wikipedia/conn.select index bdae1a8f73..8653fd1edb 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.sqlite.wikipedia/conn.select +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.sqlite.wikipedia/conn.select @@ -1,6 +1,6 @@ 1300475167.09653|CXWv6p3arKYeMETxOg|141.142.220.202|5353|224.0.0.251|5353|udp|dns||||S0||0|D|1|73|0|0|(empty) -1300475167.09701|CjhGID4nQcgTWjvg4c|fe80::217:f2ff:fed7:cf65|5353|ff02::fb|5353|udp|||||S0||0|D|1|199|0|0|(empty) -1300475167.09982|CCvvfg3TEfuqmmG4bh|141.142.220.50|5353|224.0.0.251|5353|udp|||||S0||0|D|1|179|0|0|(empty) +1300475167.09701|CjhGID4nQcgTWjvg4c|fe80::217:f2ff:fed7:cf65|5353|ff02::fb|5353|udp|dns||||S0||0|D|1|199|0|0|(empty) +1300475167.09982|CCvvfg3TEfuqmmG4bh|141.142.220.50|5353|224.0.0.251|5353|udp|dns||||S0||0|D|1|179|0|0|(empty) 1300475168.652|CsRx2w45OKnoww6xl4|141.142.220.118|35634|208.80.152.2|80|tcp||0.0613288879394531|463|350|OTH||0|DdA|2|567|1|402|(empty) 1300475168.72401|CRJuHdVW0XPVINV8a|141.142.220.118|48649|208.80.152.118|80|tcp|http|0.1199049949646|525|232|S1||0|ShADad|4|741|3|396|(empty) 1300475168.8539|CPbrpk1qSsw6ESzHV4|141.142.220.118|43927|141.142.2.2|53|udp|dns|0.000435113906860352|38|89|SF||0|Dd|1|66|1|117|(empty) diff --git a/testing/btest/Baseline/scripts.base.protocols.dns.dns-key/dns.log b/testing/btest/Baseline/scripts.base.protocols.dns.dns-key/dns.log index 35289b82dd..76e83452e5 100644 --- a/testing/btest/Baseline/scripts.base.protocols.dns.dns-key/dns.log +++ b/testing/btest/Baseline/scripts.base.protocols.dns.dns-key/dns.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path dns -#open 2013-08-26-19-04-08 +#open 2014-01-28-14-55-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs rejected #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] bool -1359565680.761790 CXWv6p3arKYeMETxOg 192.168.6.10 53209 192.168.129.36 53 udp 41477 paypal.com 1 C_INTERNET 48 DNSKEY 0 NOERROR F F T F 1 - - F -#close 2013-08-26-19-04-08 +1359565680.761790 CXWv6p3arKYeMETxOg 192.168.6.10 53209 192.168.129.36 53 udp 41477 paypal.com 1 C_INTERNET 48 DNSKEY 0 NOERROR F F T T 1 ,,, 455.000000,455.000000,455.000000,455.000000 F +#close 2014-01-28-14-55-04 diff --git a/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/dns.log b/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/dns.log index 6af017fa49..6e2a0a4699 100644 --- a/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/dns.log +++ b/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/dns.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path dns -#open 2013-08-26-19-04-08 +#open 2014-01-28-14-58-56 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs rejected #types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] bool -1363716396.798072 CXWv6p3arKYeMETxOg 55.247.223.174 27285 222.195.43.124 53 udp 21140 www.cmu.edu 1 C_INTERNET 1 A 0 NOERROR T F F F 1 www-cmu.andrew.cmu.edu,www-cmu-2.andrew.cmu.edu,128.2.10.163,www-cmu.andrew.cmu.edu 86400.000000,5.000000,21600.000000,86400.000000 F -1363716396.798374 CXWv6p3arKYeMETxOg 55.247.223.174 27285 222.195.43.124 53 udp 21140 - - - - - 0 NOERROR T F F F 0 www-cmu-2.andrew.cmu.edu,128.2.10.163 5.000000,21600.000000 F -#close 2013-08-26-19-04-08 +1363716396.798072 CXWv6p3arKYeMETxOg 55.247.223.174 27285 222.195.43.124 53 udp 21140 www.cmu.edu 1 C_INTERNET 1 A 0 NOERROR T F F F 1 www-cmu.andrew.cmu.edu,,www-cmu-2.andrew.cmu.edu,128.2.10.163 86400.000000,86400.000000,5.000000,21600.000000 F +1363716396.798374 CXWv6p3arKYeMETxOg 55.247.223.174 27285 222.195.43.124 53 udp 21140 - - - - - 0 NOERROR T F F F 0 www-cmu.andrew.cmu.edu,,www-cmu-2.andrew.cmu.edu,128.2.10.163 86400.000000,86400.000000,5.000000,21600.000000 F +#close 2014-01-28-14-58-56 From 31866f8f59ba9f9ec31543d4983f06152ec98bda Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 28 Jan 2014 13:56:22 -0600 Subject: [PATCH 09/47] Change dns.log to include only standard DNS queries. The scope of dns.log is now only standard queries (OPCODE == 0). Other kinds of queries (e.g. inverse query) were not handled correctly and could interfere with the state tracking of the default DNS scripts. --- doc/scripting/connection_record_02.bro | 2 +- doc/scripting/index.rst | 14 +++++++------- scripts/base/protocols/dns/main.bro | 17 +++++++++++++++-- scripts/policy/protocols/dns/auth-addl.bro | 4 ++++ .../btest-doc.sphinx.connection-record-01#1 | 6 +++--- .../btest-doc.sphinx.connection-record-02#1 | 14 ++++++-------- .../output | 2 +- .../dns.log | 10 ---------- ...s-session.trace => dns-inverse-query.trace} | Bin .../doc/sphinx/connection-record-01.btest | 2 +- .../doc/sphinx/connection-record-02.btest | 2 +- ...oc_scripting_connection_record_02_bro.btest | 2 +- .../policy/protocols/dns/event-priority.bro | 4 ---- .../policy/protocols/dns/inverse-request.bro | 4 ++++ 14 files changed, 44 insertions(+), 39 deletions(-) delete mode 100644 testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log rename testing/btest/Traces/{dns-session.trace => dns-inverse-query.trace} (100%) delete mode 100644 testing/btest/scripts/policy/protocols/dns/event-priority.bro create mode 100644 testing/btest/scripts/policy/protocols/dns/inverse-request.bro diff --git a/doc/scripting/connection_record_02.bro b/doc/scripting/connection_record_02.bro index 4459e47ef6..e4770069a9 100644 --- a/doc/scripting/connection_record_02.bro +++ b/doc/scripting/connection_record_02.bro @@ -1,5 +1,5 @@ @load base/protocols/conn -@load base/protocols/dns +@load base/protocols/http event connection_state_remove(c: connection) { diff --git a/doc/scripting/index.rst b/doc/scripting/index.rst index e42aa55e2c..66ebce86af 100644 --- a/doc/scripting/index.rst +++ b/doc/scripting/index.rst @@ -232,7 +232,7 @@ overly populated. .. btest:: connection-record-01 - @TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/dns-session.trace ${DOC_ROOT}/scripting/connection_record_01.bro + @TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/http/get.trace ${DOC_ROOT}/scripting/connection_record_01.bro As you can see from the output, the connection record is something of a jumble when printed on its own. Regularly taking a peek at a @@ -248,9 +248,9 @@ originating host is referenced by ``c$id$orig_h`` which if given a narrative relates to ``orig_h`` which is a member of ``id`` which is a member of the data structure referred to as ``c`` that was passed into the event handler." Given that the responder port -(``c$id$resp_p``) is ``53/tcp``, it's likely that Bro's base DNS scripts +(``c$id$resp_p``) is ``53/tcp``, it's likely that Bro's base HTTP scripts can further populate the connection record. Let's load the -``base/protocols/dns`` scripts and check the output of our script. +``base/protocols/http`` scripts and check the output of our script. Bro uses the dollar sign as its field delimiter and a direct correlation exists between the output of the connection record and the @@ -262,16 +262,16 @@ brackets, which would correspond to the ``$``-delimiter in a Bro script. .. btest:: connection-record-02 - @TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/dns-session.trace ${DOC_ROOT}/scripting/connection_record_02.bro + @TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/http/get.trace ${DOC_ROOT}/scripting/connection_record_02.bro -The addition of the ``base/protocols/dns`` scripts populates the -``dns=[]`` member of the connection record. While Bro is doing a +The addition of the ``base/protocols/http`` scripts populates the +``http=[]`` member of the connection record. While Bro is doing a massive amount of work in the background, it is in what is commonly called "scriptland" that details are being refined and decisions being made. Were we to continue running in "bare mode" we could slowly keep adding infrastructure through ``@load`` statements. For example, were we to ``@load base/frameworks/logging``, Bro would generate a -``conn.log`` and ``dns.log`` for us in the current working directory. +``conn.log`` and ``http.log`` for us in the current working directory. As mentioned above, including the appropriate ``@load`` statements is not only good practice, but can also help to indicate which functionalities are being used in a script. Take a second to run the diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index f3f19d488c..0651e23ada 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -206,6 +206,10 @@ hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &priority=5 { + if ( msg$opcode != 0 ) + # Currently only standard queries are tracked. + return; + hook set_session(c, msg, is_orig); if ( msg$QR && msg$rcode != 0 && msg$num_queries == 0 ) @@ -214,6 +218,10 @@ event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &prior event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5 { + if ( msg$opcode != 0 ) + # Currently only standard queries are tracked. + return; + if ( ! msg$QR ) # This is weird: the inquirer must also be providing answers in # the request, which is not what we want to track. @@ -249,7 +257,7 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=-5 { - if ( c$dns$ready ) + if ( c?$dns && c$dns$ready ) { Log::write(DNS::LOG, c$dns); # This record is logged and no longer pending. @@ -260,6 +268,10 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5 { + if ( msg$opcode != 0 ) + # Currently only standard queries are tracked. + return; + c$dns$RD = msg$RD; c$dns$TC = msg$TC; c$dns$qclass = qclass; @@ -356,7 +368,8 @@ event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 event dns_rejected(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5 { - c$dns$rejected = T; + if ( c?$dns ) + c$dns$rejected = T; } event connection_state_remove(c: connection) &priority=-5 diff --git a/scripts/policy/protocols/dns/auth-addl.bro b/scripts/policy/protocols/dns/auth-addl.bro index 8c04379c1c..bc97d529cd 100644 --- a/scripts/policy/protocols/dns/auth-addl.bro +++ b/scripts/policy/protocols/dns/auth-addl.bro @@ -21,6 +21,10 @@ export { event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=4 { + if ( msg$opcode != 0 ) + # Currently only standard queries are tracked. + return; + # The "ready" flag will be set here. This causes the setting from the # base script to be overridden since the base script will log immediately # after all of the ANS replies have been seen. diff --git a/testing/btest/Baseline/doc.sphinx.connection-record-01/btest-doc.sphinx.connection-record-01#1 b/testing/btest/Baseline/doc.sphinx.connection-record-01/btest-doc.sphinx.connection-record-01#1 index 1deb2583a9..8da50c3d30 100644 --- a/testing/btest/Baseline/doc.sphinx.connection-record-01/btest-doc.sphinx.connection-record-01#1 +++ b/testing/btest/Baseline/doc.sphinx.connection-record-01/btest-doc.sphinx.connection-record-01#1 @@ -4,10 +4,10 @@ :linenos: :emphasize-lines: 1,1 - # bro -b -r dns-session.trace connection_record_01.bro - [id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], orig=[size=29, state=5, num_pkts=6, num_bytes_ip=273, flow_label=0], resp=[size=44, state=5, num_pkts=5, num_bytes_ip=248, flow_label=0], start_time=930613226.067666, duration=0.709643, service={ + # bro -b -r http/get.trace connection_record_01.bro + [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={ - }, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, conn=[ts=930613226.067666, uid=CXWv6p3arKYeMETxOg, id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], proto=tcp, service=, duration=0.709643, orig_bytes=29, resp_bytes=44, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=6, orig_ip_bytes=273, resp_pkts=5, resp_ip_bytes=248, tunnel_parents={ + }, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={ }], extract_orig=F, extract_resp=F] diff --git a/testing/btest/Baseline/doc.sphinx.connection-record-02/btest-doc.sphinx.connection-record-02#1 b/testing/btest/Baseline/doc.sphinx.connection-record-02/btest-doc.sphinx.connection-record-02#1 index 42d0a56e21..c170dbc645 100644 --- a/testing/btest/Baseline/doc.sphinx.connection-record-02/btest-doc.sphinx.connection-record-02#1 +++ b/testing/btest/Baseline/doc.sphinx.connection-record-02/btest-doc.sphinx.connection-record-02#1 @@ -4,16 +4,14 @@ :linenos: :emphasize-lines: 1,1 - # bro -b -r dns-session.trace connection_record_02.bro - [id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], orig=[size=29, state=5, num_pkts=6, num_bytes_ip=273, flow_label=0], resp=[size=44, state=5, num_pkts=5, num_bytes_ip=248, flow_label=0], start_time=930613226.067666, duration=0.709643, service={ + # bro -b -r http/get.trace connection_record_02.bro + [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={ - }, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, conn=[ts=930613226.067666, uid=CXWv6p3arKYeMETxOg, id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], proto=tcp, service=, duration=0.709643, orig_bytes=29, resp_bytes=44, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=6, orig_ip_bytes=273, resp_pkts=5, resp_ip_bytes=248, tunnel_parents={ + }, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={ - }], extract_orig=F, extract_resp=F, dns=, dns_state=[pending={ - [34798] = [initialized=T, vals={ + }], extract_orig=F, extract_resp=F, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={ - }, settings=[max_len=], top=1, bottom=1, size=0] - }, finished_answers={ + }, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={ - }]] + }, current_request=1, current_response=1]] diff --git a/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_02_bro/output b/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_02_bro/output index e4552b8580..12092ee2a0 100644 --- a/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_02_bro/output +++ b/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_02_bro/output @@ -3,7 +3,7 @@ connection_record_02.bro @load base/protocols/conn -@load base/protocols/dns +@load base/protocols/http event connection_state_remove(c: connection) { diff --git a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log b/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log deleted file mode 100644 index 18d5769abf..0000000000 --- a/testing/btest/Baseline/scripts.policy.protocols.dns.event-priority/dns.log +++ /dev/null @@ -1,10 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path dns -#open 2013-08-26-19-04-37 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs rejected auth addl -#types time string addr port addr port enum count string count string count string count string bool bool bool bool count vector[string] vector[interval] bool table[string] table[string] -930613226.518174 CXWv6p3arKYeMETxOg 212.180.42.100 25000 131.243.64.3 53 tcp 34798 - - - - - 0 NOERROR F F F T 0 4.3.2.1 31337.000000 F - - -#close 2013-08-26-19-04-37 diff --git a/testing/btest/Traces/dns-session.trace b/testing/btest/Traces/dns-inverse-query.trace similarity index 100% rename from testing/btest/Traces/dns-session.trace rename to testing/btest/Traces/dns-inverse-query.trace diff --git a/testing/btest/doc/sphinx/connection-record-01.btest b/testing/btest/doc/sphinx/connection-record-01.btest index b379fb4fbe..3704d58932 100644 --- a/testing/btest/doc/sphinx/connection-record-01.btest +++ b/testing/btest/doc/sphinx/connection-record-01.btest @@ -1 +1 @@ -@TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/dns-session.trace ${DOC_ROOT}/scripting/connection_record_01.bro +@TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/http/get.trace ${DOC_ROOT}/scripting/connection_record_01.bro diff --git a/testing/btest/doc/sphinx/connection-record-02.btest b/testing/btest/doc/sphinx/connection-record-02.btest index 292503e12c..0b0c87c1f2 100644 --- a/testing/btest/doc/sphinx/connection-record-02.btest +++ b/testing/btest/doc/sphinx/connection-record-02.btest @@ -1 +1 @@ -@TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/dns-session.trace ${DOC_ROOT}/scripting/connection_record_02.bro +@TEST-EXEC: btest-rst-cmd bro -b -r ${TRACES}/http/get.trace ${DOC_ROOT}/scripting/connection_record_02.bro diff --git a/testing/btest/doc/sphinx/include-doc_scripting_connection_record_02_bro.btest b/testing/btest/doc/sphinx/include-doc_scripting_connection_record_02_bro.btest index e4552b8580..12092ee2a0 100644 --- a/testing/btest/doc/sphinx/include-doc_scripting_connection_record_02_bro.btest +++ b/testing/btest/doc/sphinx/include-doc_scripting_connection_record_02_bro.btest @@ -3,7 +3,7 @@ connection_record_02.bro @load base/protocols/conn -@load base/protocols/dns +@load base/protocols/http event connection_state_remove(c: connection) { diff --git a/testing/btest/scripts/policy/protocols/dns/event-priority.bro b/testing/btest/scripts/policy/protocols/dns/event-priority.bro deleted file mode 100644 index 2165b102e8..0000000000 --- a/testing/btest/scripts/policy/protocols/dns/event-priority.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/dns-session.trace %INPUT -# @TEST-EXEC: btest-diff dns.log - -@load protocols/dns/auth-addl diff --git a/testing/btest/scripts/policy/protocols/dns/inverse-request.bro b/testing/btest/scripts/policy/protocols/dns/inverse-request.bro new file mode 100644 index 0000000000..d695060707 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/dns/inverse-request.bro @@ -0,0 +1,4 @@ +# @TEST-EXEC: bro -r $TRACES/dns-inverse-query.trace %INPUT +# @TEST-EXEC: test ! -e dns.log + +@load protocols/dns/auth-addl From 62b3cb0a5b7bdd8fed1d7d0dae3337115b2feae7 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 28 Jan 2014 12:28:12 -0800 Subject: [PATCH 10/47] Also use exec-module test to check for leaks. --- testing/btest/core/leaks/exec.test | 80 ++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 testing/btest/core/leaks/exec.test diff --git a/testing/btest/core/leaks/exec.test b/testing/btest/core/leaks/exec.test new file mode 100644 index 0000000000..887ab01d39 --- /dev/null +++ b/testing/btest/core/leaks/exec.test @@ -0,0 +1,80 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b ../exectest.bro +# @TEST-EXEC: btest-bg-wait 15 + +@TEST-START-FILE exectest.bro + +@load base/utils/exec +@load base/frameworks/communication # let network-time run. otherwise there are no heartbeats... +redef exit_only_after_terminate = T; + +global c: count = 0; + +function check_exit_condition() + { + c += 1; + + if ( c == 3 ) + terminate(); + } + +function test_cmd(label: string, cmd: Exec::Command) + { + when ( local result = Exec::run(cmd) ) + { + print label, result; + check_exit_condition(); + } + } + +event bro_init() + { + test_cmd("test1", [$cmd="bash ../somescript.sh", + $read_files=set("out1", "out2")]); + test_cmd("test2", [$cmd="bash ../nofiles.sh"]); + # Not sure of a portable way to test signals yet. + #test_cmd("test3", [$cmd="bash ../suicide.sh"]); + test_cmd("test4", [$cmd="bash ../stdin.sh", $stdin="hibye"]); + } + +@TEST-END-FILE + +@TEST-START-FILE somescript.sh +#! /usr/bin/env bash +echo "insert text here" > out1 +echo "and here" >> out1 +echo "insert more text here" > out2 +echo "and there" >> out2 +echo "done" +echo "exit" +echo "stop" +@TEST-END-FILE + +@TEST-START-FILE nofiles.sh +#! /usr/bin/env bash +echo "here's something on stdout" +echo "some more stdout" +echo "last stdout" +echo "and some stderr" 1>&2 +echo "more stderr" 1>&2 +echo "last stderr" 1>&2 +exit 1 +@TEST-END-FILE + +@TEST-START-FILE suicide.sh +#! /usr/bin/env bash +echo "FML" +kill -9 $$ +echo "nope" +@TEST-END-FILE + +@TEST-START-FILE stdin.sh +#! /usr/bin/env bash +read -r line +echo "$line" +@TEST-END-FILE From 55a8725ce28891b8ea4a470334c093f4396566b5 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Wed, 29 Jan 2014 08:42:48 -0800 Subject: [PATCH 11/47] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index c4b5fb7336..9ff2e2ced6 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit c4b5fb7336f2b598cf69777a7ec91b4aa16cacd1 +Subproject commit 9ff2e2ced64a3bd4af1268154e261671a1153481 From 4c52c378d5873abb052d688251f0ec7f5aa1c514 Mon Sep 17 00:00:00 2001 From: Jeannette Dopheide Date: Wed, 29 Jan 2014 11:23:31 -0600 Subject: [PATCH 12/47] Added some grammar and spelling corrections to Installation and Quick Start Guide. --- doc/install/guidelines.rst | 35 +++++++++++++++++------------------ doc/install/install.rst | 4 ++-- doc/quickstart/index.rst | 2 +- 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/doc/install/guidelines.rst b/doc/install/guidelines.rst index 7835c83716..2e56b1b17e 100644 --- a/doc/install/guidelines.rst +++ b/doc/install/guidelines.rst @@ -12,32 +12,31 @@ local customizations over. In the following we summarize general guidelines for upgrading, see the :ref:`release-notes` for version-specific information. -Re-Using Previous Install Prefix +Reusing Previous Install Prefix ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you choose to configure and install Bro with the same prefix directory as before, local customization and configuration to files in ``$prefix/share/bro/site`` and ``$prefix/etc`` won't be overwritten (``$prefix`` indicating the root of where Bro was installed). Also, logs -generated at run-time won't be touched by the upgrade. (But making -a backup of local changes before upgrading is still recommended.) +generated at run-time won't be touched by the upgrade. Backing up local +changes before upgrading is still recommended. After upgrading, remember to check ``$prefix/share/bro/site`` and -``$prefix/etc`` for ``.example`` files, which indicate the -distribution's version of the file differs from the local one, which may -include local changes. Review the differences, and make adjustments -as necessary (for differences that aren't the result of a local change, -use the new version's). +``$prefix/etc`` for ``.example`` files, which indicate that the +distribution's version of the file differs from the local one, and therefore, +may include local changes. Review the differences and make adjustments +as necessary. Use the new version for differences that aren't a result of +a local change. -Using a New Install prefix +Using a New Install Prefix ~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you want to install the newer version in a different prefix -directory than before, you can just copy local customization and -configuration files from ``$prefix/share/bro/site`` and ``$prefix/etc`` -to the new location (``$prefix`` indicating the root of where Bro was -originally installed). Make sure to review the files for difference -before copying and make adjustments as necessary (for differences that -aren't the result of a local change, use the new version's). Of -particular note, the copied version of ``$prefix/etc/broctl.cfg`` is -likely to need changes to the ``SpoolDir`` and ``LogDir`` settings. +To install the newer version in a different prefix directory than before, +copy local customization and configuration files from ``$prefix/share/bro/site`` +and ``$prefix/etc`` to the new location (``$prefix`` indicating the root of +where Bro was originally installed). Review the files for differences +before copying and make adjustments as necessary (use the new version for +differences that aren't a result of a local change). Of particular note, +the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes +to the ``SpoolDir`` and ``LogDir`` settings. diff --git a/doc/install/install.rst b/doc/install/install.rst index 7030c95642..7400d640fe 100644 --- a/doc/install/install.rst +++ b/doc/install/install.rst @@ -3,7 +3,7 @@ .. _Xcode: https://developer.apple.com/xcode/ .. _MacPorts: http://www.macports.org .. _Fink: http://www.finkproject.org -.. _Homebrew: http://mxcl.github.com/homebrew +.. _Homebrew: http://brew.sh .. _bro downloads page: http://bro.org/download/index.html .. _installing-bro: @@ -144,7 +144,7 @@ Bro releases are bundled into source packages for convenience and are available on the `bro downloads page`_. Alternatively, the latest Bro development version can be obtained through git repositories hosted at ``git.bro.org``. See our `git development documentation -`_ for comprehensive +`_ for comprehensive information on Bro's use of git revision control, but the short story for downloading the full source code experience for Bro via git is: diff --git a/doc/quickstart/index.rst b/doc/quickstart/index.rst index e8ead2cf01..85fdb88d7f 100644 --- a/doc/quickstart/index.rst +++ b/doc/quickstart/index.rst @@ -407,7 +407,7 @@ logging) and adds SSL certificate validation. You might notice that a script you load from the command line uses the ``@load`` directive in the Bro language to declare dependence on other scripts. This directive is similar to the ``#include`` of C/C++, except the semantics -are "load this script if it hasn't already been loaded". +are, "load this script if it hasn't already been loaded." .. note:: If one wants Bro to be able to load scripts that live outside the default directories in Bro's installation root, the ``BROPATH`` environment From 1842d324cb919c125cff727ff73655503f50bfb6 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 29 Jan 2014 15:34:24 -0600 Subject: [PATCH 13/47] Extend file analysis API to allow file ID caching, adapt HTTP to use it. This allows an analyzer to either provide file IDs associated with some file content or to cache a file ID that was already determined by script-layer logic so that subsequent calls to the file analysis interface can bypass costly detours through script-layer. This can yield a decent performance improvement for analyzers that are able to take advantage of it and deal with streaming content (like HTTP). --- src/analyzer/protocol/http/HTTP.cc | 16 ++++--- src/analyzer/protocol/http/HTTP.h | 1 + src/file_analysis/Manager.cc | 67 +++++++++++++++++++----------- src/file_analysis/Manager.h | 51 +++++++++++++++++++---- 4 files changed, 95 insertions(+), 40 deletions(-) diff --git a/src/analyzer/protocol/http/HTTP.cc b/src/analyzer/protocol/http/HTTP.cc index ffdcad226f..f9b9496992 100644 --- a/src/analyzer/protocol/http/HTTP.cc +++ b/src/analyzer/protocol/http/HTTP.cc @@ -242,10 +242,11 @@ int HTTP_Entity::Undelivered(int64_t len) if ( end_of_data && in_header ) return 0; - file_mgr->Gap(body_length, len, + cached_file_id = file_mgr->Gap(body_length, len, http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), http_message->MyHTTP_Analyzer()->Conn(), - http_message->IsOrig()); + http_message->IsOrig(), + cached_file_id); if ( chunked_transfer_state != NON_CHUNKED_TRANSFER ) { @@ -314,15 +315,18 @@ void HTTP_Entity::SubmitData(int len, const char* buf) else { if ( send_size && content_length > 0 ) - file_mgr->SetSize(content_length, + cached_file_id = file_mgr->SetSize(content_length, http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), http_message->MyHTTP_Analyzer()->Conn(), - http_message->IsOrig()); + http_message->IsOrig(), + cached_file_id); - file_mgr->DataIn(reinterpret_cast(buf), len, + cached_file_id = file_mgr->DataIn(reinterpret_cast(buf), + len, http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), http_message->MyHTTP_Analyzer()->Conn(), - http_message->IsOrig()); + http_message->IsOrig(), + cached_file_id); } send_size = false; diff --git a/src/analyzer/protocol/http/HTTP.h b/src/analyzer/protocol/http/HTTP.h index 8339e48e3b..9951fa461b 100644 --- a/src/analyzer/protocol/http/HTTP.h +++ b/src/analyzer/protocol/http/HTTP.h @@ -64,6 +64,7 @@ protected: uint64_t offset; int64_t instance_length; // total length indicated by content-range bool send_size; // whether to send size indication to FAF + std::string cached_file_id; MIME_Entity* NewChildEntity() { return new HTTP_Entity(http_message, this, 1); } diff --git a/src/file_analysis/Manager.cc b/src/file_analysis/Manager.cc index 0337dbb098..56192e5bf4 100644 --- a/src/file_analysis/Manager.cc +++ b/src/file_analysis/Manager.cc @@ -75,36 +75,47 @@ void Manager::SetHandle(const string& handle) current_file_id = HashHandle(handle); } -void Manager::DataIn(const u_char* data, uint64 len, uint64 offset, - analyzer::Tag tag, Connection* conn, bool is_orig) +string Manager::DataIn(const u_char* data, uint64 len, uint64 offset, + analyzer::Tag tag, Connection* conn, bool is_orig, + const string& cached_id) { - GetFileHandle(tag, conn, is_orig); - File* file = GetFile(current_file_id, conn, tag, is_orig); + string id = cached_id.empty() ? GetFileID(tag, conn, is_orig) : cached_id; + File* file = GetFile(id, conn, tag, is_orig); if ( ! file ) - return; + return ""; file->DataIn(data, len, offset); if ( file->IsComplete() ) + { RemoveFile(file->GetID()); + return ""; + } + + return id; } -void Manager::DataIn(const u_char* data, uint64 len, analyzer::Tag tag, - Connection* conn, bool is_orig) +string Manager::DataIn(const u_char* data, uint64 len, analyzer::Tag tag, + Connection* conn, bool is_orig, const string& cached_id) { - GetFileHandle(tag, conn, is_orig); + string id = cached_id.empty() ? GetFileID(tag, conn, is_orig) : cached_id; // Sequential data input shouldn't be going over multiple conns, so don't // do the check to update connection set. - File* file = GetFile(current_file_id, conn, tag, is_orig, false); + File* file = GetFile(id, conn, tag, is_orig, false); if ( ! file ) - return; + return ""; file->DataIn(data, len); if ( file->IsComplete() ) + { RemoveFile(file->GetID()); + return ""; + } + + return id; } void Manager::DataIn(const u_char* data, uint64 len, const string& file_id, @@ -133,8 +144,7 @@ void Manager::EndOfFile(analyzer::Tag tag, Connection* conn) void Manager::EndOfFile(analyzer::Tag tag, Connection* conn, bool is_orig) { // Don't need to create a file if we're just going to remove it right away. - GetFileHandle(tag, conn, is_orig); - RemoveFile(current_file_id); + RemoveFile(GetFileID(tag, conn, is_orig)); } void Manager::EndOfFile(const string& file_id) @@ -142,31 +152,37 @@ void Manager::EndOfFile(const string& file_id) RemoveFile(file_id); } -void Manager::Gap(uint64 offset, uint64 len, analyzer::Tag tag, - Connection* conn, bool is_orig) +string Manager::Gap(uint64 offset, uint64 len, analyzer::Tag tag, + Connection* conn, bool is_orig, const string& cached_id) { - GetFileHandle(tag, conn, is_orig); - File* file = GetFile(current_file_id, conn, tag, is_orig); + string id = cached_id.empty() ? GetFileID(tag, conn, is_orig) : cached_id; + File* file = GetFile(id, conn, tag, is_orig); if ( ! file ) - return; + return ""; file->Gap(offset, len); + return id; } -void Manager::SetSize(uint64 size, analyzer::Tag tag, Connection* conn, - bool is_orig) +string Manager::SetSize(uint64 size, analyzer::Tag tag, Connection* conn, + bool is_orig, const string& cached_id) { - GetFileHandle(tag, conn, is_orig); - File* file = GetFile(current_file_id, conn, tag, is_orig); + string id = cached_id.empty() ? GetFileID(tag, conn, is_orig) : cached_id; + File* file = GetFile(id, conn, tag, is_orig); if ( ! file ) - return; + return ""; file->SetTotalBytes(size); if ( file->IsComplete() ) + { RemoveFile(file->GetID()); + return ""; + } + + return id; } bool Manager::SetTimeoutInterval(const string& file_id, double interval) const @@ -317,15 +333,15 @@ bool Manager::IsIgnored(const string& file_id) return ignored.find(file_id) != ignored.end(); } -void Manager::GetFileHandle(analyzer::Tag tag, Connection* c, bool is_orig) +string Manager::GetFileID(analyzer::Tag tag, Connection* c, bool is_orig) { current_file_id.clear(); if ( IsDisabled(tag) ) - return; + return ""; if ( ! get_file_handle ) - return; + return ""; EnumVal* tagval = tag.AsEnumVal(); Ref(tagval); @@ -337,6 +353,7 @@ void Manager::GetFileHandle(analyzer::Tag tag, Connection* c, bool is_orig) mgr.QueueEvent(get_file_handle, vl); mgr.Drain(); // need file handle immediately so we don't have to buffer data + return current_file_id; } bool Manager::IsDisabled(analyzer::Tag tag) diff --git a/src/file_analysis/Manager.h b/src/file_analysis/Manager.h index cf73c6b52d..ce8aa6b7d7 100644 --- a/src/file_analysis/Manager.h +++ b/src/file_analysis/Manager.h @@ -82,9 +82,17 @@ public: * @param conn network connection over which the file data is transferred. * @param is_orig true if the file is being sent from connection originator * or false if is being sent in the opposite direction. + * @param cached_file_id may be set to a previous return value in order to + * bypass costly file handle lookups. + * @return a unique file ID string which, in certain contexts, may be + * cached and passed back in to a subsequent function call in order + * to avoid costly file handle lookups (which have to go through + * the \c get_file_handle script-layer event). An empty string + * indicates the associate file is not going to be analyzed further. */ - void DataIn(const u_char* data, uint64 len, uint64 offset, - analyzer::Tag tag, Connection* conn, bool is_orig); + std::string DataIn(const u_char* data, uint64 len, uint64 offset, + analyzer::Tag tag, Connection* conn, bool is_orig, + const std::string& cached_file_id = ""); /** * Pass in sequential file data. @@ -94,9 +102,17 @@ public: * @param conn network connection over which the file data is transferred. * @param is_orig true if the file is being sent from connection originator * or false if is being sent in the opposite direction. + * @param cached_file_id may be set to a previous return value in order to + * bypass costly file handle lookups. + * @return a unique file ID string which, in certain contexts, may be + * cached and passed back in to a subsequent function call in order + * to avoid costly file handle lookups (which have to go through + * the \c get_file_handle script-layer event). An empty string + * indicates the associate file is not going to be analyzed further. */ - void DataIn(const u_char* data, uint64 len, analyzer::Tag tag, - Connection* conn, bool is_orig); + std::string DataIn(const u_char* data, uint64 len, analyzer::Tag tag, + Connection* conn, bool is_orig, + const std::string& cached_file_id = ""); /** * Pass in sequential file data from external source (e.g. input framework). @@ -140,9 +156,17 @@ public: * @param conn network connection over which the file data is transferred. * @param is_orig true if the file is being sent from connection originator * or false if is being sent in the opposite direction. + * @param cached_file_id may be set to a previous return value in order to + * bypass costly file handle lookups. + * @return a unique file ID string which, in certain contexts, may be + * cached and passed back in to a subsequent function call in order + * to avoid costly file handle lookups (which have to go through + * the \c get_file_handle script-layer event). An empty string + * indicates the associate file is not going to be analyzed further. */ - void Gap(uint64 offset, uint64 len, analyzer::Tag tag, Connection* conn, - bool is_orig); + std::string Gap(uint64 offset, uint64 len, analyzer::Tag tag, + Connection* conn, bool is_orig, + const std::string& cached_file_id = ""); /** * Provide the expected number of bytes that comprise a file. @@ -151,9 +175,16 @@ public: * @param conn network connection over which the file data is transferred. * @param is_orig true if the file is being sent from connection originator * or false if is being sent in the opposite direction. + * @param cached_file_id may be set to a previous return value in order to + * bypass costly file handle lookups. + * @return a unique file ID string which, in certain contexts, may be + * cached and passed back in to a subsequent function call in order + * to avoid costly file handle lookups (which have to go through + * the \c get_file_handle script-layer event). An empty string + * indicates the associate file is not going to be analyzed further. */ - void SetSize(uint64 size, analyzer::Tag tag, Connection* conn, - bool is_orig); + std::string SetSize(uint64 size, analyzer::Tag tag, Connection* conn, + bool is_orig, const std::string& cached_file_id = ""); /** * Starts ignoring a file, which will finally be removed from internal @@ -283,8 +314,10 @@ protected: * @param conn network connection over which the file is transferred. * @param is_orig true if the file is being sent from connection originator * or false if is being sent in the opposite direction. + * @return #current_file_id, which is a hash of a unique file handle string + * set by a \c get_file_handle event handler. */ - void GetFileHandle(analyzer::Tag tag, Connection* c, bool is_orig); + std::string GetFileID(analyzer::Tag tag, Connection* c, bool is_orig); /** * Check if analysis is available for files transferred over a given From 2b84af5b80fc943c84a5675ec9cb0975fa35952f Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 29 Jan 2014 17:11:20 -0600 Subject: [PATCH 14/47] Revert use of HTTP file ID caching for gaps range request content. Just an oversight on my part, this makes the use of file ID caching consistent between the uses of the DataIn and Gap interfaces. --- src/analyzer/protocol/http/HTTP.cc | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/analyzer/protocol/http/HTTP.cc b/src/analyzer/protocol/http/HTTP.cc index f9b9496992..3e74ca645c 100644 --- a/src/analyzer/protocol/http/HTTP.cc +++ b/src/analyzer/protocol/http/HTTP.cc @@ -242,11 +242,17 @@ int HTTP_Entity::Undelivered(int64_t len) if ( end_of_data && in_header ) return 0; - cached_file_id = file_mgr->Gap(body_length, len, - http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), - http_message->MyHTTP_Analyzer()->Conn(), - http_message->IsOrig(), - cached_file_id); + if ( is_partial_content ) + file_mgr->Gap(body_length, len, + http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), + http_message->MyHTTP_Analyzer()->Conn(), + http_message->IsOrig()); + else + cached_file_id = file_mgr->Gap(body_length, len, + http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), + http_message->MyHTTP_Analyzer()->Conn(), + http_message->IsOrig(), + cached_file_id); if ( chunked_transfer_state != NON_CHUNKED_TRANSFER ) { From c7cacb56b8d0efaf79d03466563025b43d12ba74 Mon Sep 17 00:00:00 2001 From: Jeannette Dopheide Date: Thu, 30 Jan 2014 13:13:26 -0600 Subject: [PATCH 15/47] Updates to Bro IDS documentation --- doc/broids/index.rst | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/doc/broids/index.rst b/doc/broids/index.rst index 46e0d6ded6..d401dbcf34 100644 --- a/doc/broids/index.rst +++ b/doc/broids/index.rst @@ -16,18 +16,18 @@ In the following sections, we present a few examples of common uses of Bro as an IDS. ------------------------------------------------ -Detecting an FTP Bruteforce attack and notifying +Detecting an FTP Brute-force Attack and Notifying ------------------------------------------------ -For the purpose of this exercise, we define FTP bruteforcing as too many +For the purpose of this exercise, we define FTP brute-forcing as too many rejected usernames and passwords occurring from a single address. We -start by defining a threshold for the number of attempts and a -monitoring interval in minutes as well as a new notice type. +start by defining a threshold for the number of attempts, a monitoring +interval (in minutes), and a new notice type. .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro :lines: 9-25 -Now, using the ftp_reply event, we check for error codes from the `500 +Using the ftp_reply event, we check for error codes from the `500 series `_ for the "USER" and "PASS" commands, representing rejected usernames or passwords. For this, we can use the :bro:see:`FTP::parse_ftp_reply_code` @@ -38,9 +38,9 @@ function to break down the reply code and check if the first digit is a .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro :lines: 52-60 -Next, we use the SumStats framework to raise a notice of the attack of -the attack when the number of failed attempts exceeds the specified -threshold during the measuring interval. +Next, we use the SumStats framework to raise a notice of the attack when +the number of failed attempts exceeds the specified threshold during the +measuring interval. .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro :lines: 28-50 @@ -56,14 +56,14 @@ Below is the final code for our script. As a final note, the :doc:`detect-bruteforcing.bro ` script above is -include with Bro out of the box, so you only need to load it at startup -to instruct Bro to detect and notify of FTP bruteforce attacks. +included with Bro out of the box. Use this feature by loading this script +during startup. ------------- Other Attacks ------------- -Detecting SQL Injection attacks +Detecting SQL Injection Attacks ------------------------------- Checking files against known malware hashes @@ -76,5 +76,4 @@ list of known malware hashes. Bro simplifies this task by offering a :doc:`detect-MHR.bro ` script that creates and compares hashes against the `Malware Hash Registry `_ maintained by Team -Cymru. You only need to load this script along with your other scripts -at startup time. +Cymru. Use this feature by loading this script during startup. From 2e2cb0ffc9e85cf163e0488104fc62238a50e4dc Mon Sep 17 00:00:00 2001 From: Jeannette Dopheide Date: Thu, 30 Jan 2014 13:22:25 -0600 Subject: [PATCH 16/47] Updates to Logs and Cluster documentation --- doc/cluster/index.rst | 127 +++++++++++++++++++++++++++++++++++------- doc/logs/index.rst | 51 ++++++++--------- 2 files changed, 133 insertions(+), 45 deletions(-) diff --git a/doc/cluster/index.rst b/doc/cluster/index.rst index 6de70d38cc..c4df063db5 100644 --- a/doc/cluster/index.rst +++ b/doc/cluster/index.rst @@ -6,7 +6,13 @@ Setting up a Bro Cluster Intro ------ -Bro is not multithreaded, so once the limitations of a single processor core are reached, the only option currently is to spread the workload across many cores or even many physical computers. The cluster deployment scenario for Bro is the current solution to build these larger systems. The accompanying tools and scripts provide the structure to easily manage many Bro processes examining packets and doing correlation activities but acting as a singular, cohesive entity. +Bro is not multithreaded, so once the limitations of a single processor core +are reached the only option currently is to spread the workload across many +cores, or even many physical computers. The cluster deployment scenario for +Bro is the current solution to build these larger systems. The accompanying +tools and scripts provide the structure to easily manage many Bro processes +examining packets and doing correlation activities but acting as a singular, +cohesive entity. Architecture --------------- @@ -17,42 +23,98 @@ The figure below illustrates the main components of a Bro cluster. Tap *** -This is a mechanism that splits the packet stream in order to make a copy -available for inspection. Examples include the monitoring port on a switch and -an optical splitter for fiber networks. +The tap is a mechanism that splits the packet stream in order to make a copy +available for inspection. Examples include the monitoring port on a switch +and an optical splitter on fiber networks. Frontend ******** -This is a discrete hardware device or on-host technique that will split your traffic into many streams or flows. The Bro binary does not do this job. There are numerous ways to accomplish this task, some of which are described below in `Frontend Options`_. +The frontend is a discrete hardware device or on-host technique that splits +traffic into many streams or flows. The Bro binary does not do this job. +There are numerous ways to accomplish this task, some of which are described +below in `Frontend Options`_. Manager ******* -This is a Bro process which has two primary jobs. It receives log messages and notices from the rest of the nodes in the cluster using the Bro communications protocol. The result is that you will end up with single logs for each log instead of many discrete logs that you have to later combine in some manner with post processing. The manager also takes the opportunity to de-duplicate notices and it has the ability to do so since it’s acting as the choke point for notices and how notices might be processed into actions such as emailing, paging, or blocking. +The manager is a Bro process that has two primary jobs. It receives log +messages and notices from the rest of the nodes in the cluster using the Bro +communications protocol. The result is a single log instead of many +discrete logs that you have to combine in some manner with post-processing. +The manager also takes the opportunity to de-duplicate notices, and it has the +ability to do so since it’s acting as the choke point for notices and how notices +might be processed into actions (e.g., emailing, paging, or blocking). -The manager process is started first by BroControl and it only opens it’s designated port and waits for connections, it doesn’t initiate any connections to the rest of the cluster. Once the workers are started and connect to the manager, logs and notices will start arriving to the manager process from the workers. +The manager process is started first by BroControl and it only opens its +designated port and waits for connections, it doesn’t initiate any +connections to the rest of the cluster. Once the workers are started and +connect to the manager, logs and notices will start arriving to the manager +process from the workers. Proxy ***** -This is a Bro process which manages synchronized state. Variables can be synchronized across connected Bro processes automatically in Bro and proxies will help the workers by alleviating the need for all of the workers to connect directly to each other. +The proxy is a Bro process that manages synchronized state. Variables can +be synchronized across connected Bro processes automatically. Proxies help +the workers by alleviating the need for all of the workers to connect +directly to each other. -Examples of synchronized state from the scripts that ship with Bro are things such as the full list of “known” hosts and services which are hosts or services which have been detected as performing full TCP handshakes or an analyzed protocol has been found on the connection. If worker A detects host 1.2.3.4 as an active host, it would be beneficial for worker B to know that as well so worker A shares that information as an insertion to a set which travels to the cluster’s proxy and the proxy then sends that same set insertion to worker B. The result is that worker A and worker B have shared knowledge about host and services that are active on the network being monitored. +Examples of synchronized state from the scripts that ship with Bro include +the full list of “known” hosts and services (which are hosts or services +identified as performing full TCP handshakes) or an analyzed protocol has been +found on the connection. If worker A detects host 1.2.3.4 as an active host, +it would be beneficial for worker B to know that as well. So worker A shares +that information as an insertion to a set + which travels to the cluster’s +proxy and the proxy sends that same set insertion to worker B. The result +is that worker A and worker B have shared knowledge about host and services +that are active on the network being monitored. -The proxy model extends to having multiple proxies as well if necessary for performance reasons, it only adds one additional step for the Bro processes. Each proxy connects to another proxy in a ring and the workers are shared between them as evenly as possible. When a proxy receives some new bit of state, it will share that with it’s proxy which is then shared around the ring of proxies and down to all of the workers. From a practical standpoint, there are no rules of thumb established yet for the number of proxies necessary for the number of workers they are serving. Best is to start with a single proxy and add more if communication performance problems are found. +The proxy model extends to having multiple proxies when necessary for +performance reasons. It only adds one additional step for the Bro processes. +Each proxy connects to another proxy in a ring and the workers are shared +between them as evenly as possible. When a proxy receives some new bit of +state it will share that with its proxy, which is then shared around the +ring of proxies, and down to all of the workers. From a practical standpoint, +there are no rules of thumb established for the number of proxies +necessary for the number of workers they are serving. It is best to start +with a single proxy and add more if communication performance problems are +found. -Bro processes acting as proxies don’t tend to be extremely intense to CPU or memory and users frequently run proxy processes on the same physical host as the manager. +Bro processes acting as proxies don’t tend to be extremely hard on CPU +or memory and users frequently run proxy processes on the same physical +host as the manager. Worker ****** -This is the Bro process that sniffs network traffic and does protocol analysis on the reassembled traffic streams. Most of the work of an active cluster takes place on the workers and as such, the workers typically represent the bulk of the Bro processes that are running in a cluster. The fastest memory and CPU core speed you can afford is best here since all of the protocol parsing and most analysis will take place here. There are no particular requirements for the disks in workers since almost all logging is done remotely to the manager and very little is normally written to disk. +The worker is the Bro process that sniffs network traffic and does protocol +analysis on the reassembled traffic streams. Most of the work of an active +cluster takes place on the workers and as such, the workers typically +represent the bulk of the Bro processes that are running in a cluster. +The fastest memory and CPU core speed you can afford is recommended +since all of the protocol parsing and most analysis will take place here. +There are no particular requirements for the disks in workers since almost all +logging is done remotely to the manager, and normally very little is written +to disk. -The rule of thumb we have followed recently is to allocate approximately 1 core for every 80Mbps of traffic that is being analyzed, however this estimate could be extremely traffic mix specific. It has generally worked for mixed traffic with many users and servers. For example, if your traffic peaks around 2Gbps (combined) and you want to handle traffic at peak load, you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps estimate works for your traffic, this could be handled by 3 physical hosts dedicated to being workers with each one containing dual 6-core processors. +The rule of thumb we have followed recently is to allocate approximately 1 +core for every 80Mbps of traffic that is being analyzed. However, this +estimate could be extremely traffic mix-specific. It has generally worked +for mixed traffic with many users and servers. For example, if your traffic +peaks around 2Gbps (combined) and you want to handle traffic at peak load, +you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps +estimate works for your traffic, this could be handled by 3 physical hosts +dedicated to being workers with each one containing dual 6-core processors. -Once a flow based load balancer is put into place this model is extremely easy to scale as well so it’s recommended that you guess at the amount of hardware you will need to fully analyze your traffic. If it turns out that you need more, it’s relatively easy to increase the size of the cluster in most cases. +Once a flow-based load balancer is put into place this model is extremely +easy to scale. It is recommended that you estimate the amount of +hardware you will need to fully analyze your traffic. If more is needed it’s +relatively easy to increase the size of the cluster in most cases. Frontend Options ---------------- -There are many options for setting up a frontend flow distributor and in many cases it may even be beneficial to do multiple stages of flow distribution on the network and on the host. +There are many options for setting up a frontend flow distributor. In many +cases it is beneficial to do multiple stages of flow distribution +on the network and on the host. Discrete hardware flow balancers ******************************** @@ -60,12 +122,24 @@ Discrete hardware flow balancers cPacket ^^^^^^^ -If you are monitoring one or more 10G physical interfaces, the recommended solution is to use either a cFlow or cVu device from cPacket because they are currently being used very successfully at a number of sites. These devices will perform layer-2 load balancing by rewriting the destination ethernet MAC address to cause each packet associated with a particular flow to have the same destination MAC. The packets can then be passed directly to a monitoring host where each worker has a BPF filter to limit its visibility to only that stream of flows or onward to a commodity switch to split the traffic out to multiple 1G interfaces for the workers. This can ultimately greatly reduce costs since workers can use relatively inexpensive 1G interfaces. +If you are monitoring one or more 10G physical interfaces, the recommended +solution is to use either a cFlow or cVu device from cPacket because they +are used successfully at a number of sites. These devices will perform +layer-2 load balancing by rewriting the destination Ethernet MAC address +to cause each packet associated with a particular flow to have the same +destination MAC. The packets can then be passed directly to a monitoring +host where each worker has a BPF filter to limit its visibility to only that +stream of flows, or onward to a commodity switch to split the traffic out to +multiple 1G interfaces for the workers. This greatly reduces +costs since workers can use relatively inexpensive 1G interfaces. OpenFlow Switches ^^^^^^^^^^^^^^^^^ -We are currently exploring the use of OpenFlow based switches to do flow based load balancing directly on the switch which can greatly reduce frontend costs for many users. This document will be updated when we have more information. +We are currently exploring the use of OpenFlow based switches to do flow-based +load balancing directly on the switch, which greatly reduces frontend +costs for many users. This document will be updated when we have more +information. On host flow balancing ********************** @@ -73,14 +147,27 @@ On host flow balancing PF_RING ^^^^^^^ -The PF_RING software for Linux has a “clustering” feature which will do flow based load balancing across a number of processes that are sniffing the same interface. This will allow you to easily take advantage of multiple cores in a single physical host because Bro’s main event loop is single threaded and can’t natively utilize all of the cores. More information about Bro with PF_RING can be found here: (someone want to write a quick Bro/PF_RING tutorial to link to here? document installing kernel module, libpcap wrapper, building Bro with the --with-pcap configure option) +The PF_RING software for Linux has a “clustering” feature which will do +flow-based load balancing across a number of processes that are sniffing the +same interface. This allows you to easily take advantage of multiple +cores in a single physical host because Bro’s main event loop is single +threaded and can’t natively utilize all of the cores. More information about +Bro with PF_RING can be found here: (someone want to write a quick Bro/PF_RING +tutorial to link to here? document installing kernel module, libpcap +wrapper, building Bro with the --with-pcap configure option) Netmap ^^^^^^ -FreeBSD has an in-progress project named Netmap which will enable flow based load balancing as well. When it becomes viable for real world use, this document will be updated. +FreeBSD has an in-progress project named Netmap which will enable flow-based +load balancing as well. When it becomes viable for real world use, this +document will be updated. Click! Software Router ^^^^^^^^^^^^^^^^^^^^^^ -Click! can be used for flow based load balancing with a simple configuration. (link to an example for the config). This solution is not recommended on Linux due to Bro’s PF_RING support and only as a last resort on other operating systems since it causes a lot of overhead due to context switching back and forth between kernel and userland several times per packet. +Click! can be used for flow based load balancing with a simple configuration. +(link to an example for the config). This solution is not recommended on +Linux due to Bro’s PF_RING support and only as a last resort on other +operating systems since it causes a lot of overhead due to context switching +back and forth between kernel and userland several times per packet. diff --git a/doc/logs/index.rst b/doc/logs/index.rst index b71546db72..ced9a78faa 100644 --- a/doc/logs/index.rst +++ b/doc/logs/index.rst @@ -24,17 +24,17 @@ Working with Log Files Generally, all of Bro's log files are produced by a corresponding script that defines their individual structure. However, as each log -file flows through the Logging Framework, there share a set of +file flows through the Logging Framework, they share a set of structural similarities. Without breaking into the scripting aspect of -Bro here, a bird's eye view of how the log files are produced would -progress as follows. The script's author defines the kinds of data, +Bro here, a bird's eye view of how the log files are produced +progresses as follows. The script's author defines the kinds of data, such as the originating IP address or the duration of a connection, which will make up the fields (i.e., columns) of the log file. The author then decides what network activity should generate a single log -file entry (i.e., one line); that could, e.g., be a connection having -been completed or an HTTP ``GET`` method being issued by an +file entry (i.e., one line). For example, this could be a connection +having been completed or an HTTP ``GET`` request being issued by an originator. When these behaviors are observed during operation, the -data is passed to the Logging Framework which, in turn, adds the entry +data is passed to the Logging Framework which adds the entry to the appropriate log file. As the fields of the log entries can be further customized by the @@ -57,7 +57,7 @@ data, the string ``(empty)`` as the indicator for an empty field and the ``-`` character as the indicator for a field that hasn't been set. The timestamp for when the file was created is included under ``#open``. The header then goes on to detail the fields being listed -in the file and the data types of those fields in ``#fields`` and +in the file and the data types of those fields, in ``#fields`` and ``#types``, respectively. These two entries are often the two most significant points of interest as they detail not only the field names but the data types used. When navigating through the different log @@ -66,12 +66,12 @@ definitions readily available saves the user some mental leg work. The field names are also a key resource for using the :ref:`bro-cut ` utility included with Bro, see below. -Next to the header follows the main content; in this example we see 7 +Next to the header follows the main content. In this example we see 7 connections with their key properties, such as originator and -responder IP addresses (note how Bro transparely handles both IPv4 and -IPv6), transport-layer ports, application-layer services - the -``service`` field is filled ias Bro determines a specific protocol to -be in use, independent of the connection's ports - payload size, and +responder IP addresses (note how Bro transparently handles both IPv4 and +IPv6), transport-layer ports, application-layer services ( - the +``service`` field is filled in as Bro determines a specific protocol to +be in use, independent of the connection's ports), payload size, and more. See :bro:type:`Conn::Info` for a description of all fields. In addition to ``conn.log``, Bro generates many further logs by @@ -87,8 +87,8 @@ default, including: A log of FTP session-level activity. ``files.log`` - Summaries of files transfered over the network. This information - is aggregrated from different protocols, including HTTP, FTP, and + Summaries of files transferred over the network. This information + is aggregated from different protocols, including HTTP, FTP, and SMTP. ``http.log`` @@ -106,7 +106,7 @@ default, including: ``weird.log`` A log of unexpected protocol-level activity. Whenever Bro's protocol analysis encounters a situation it would not expect - (e.g., an RFC violation) is logs it in this file. Note that in + (e.g., an RFC violation) it logs it in this file. Note that in practice, real-world networks tend to exhibit a large number of such "crud" that is usually not worth following up on. @@ -120,7 +120,7 @@ Using ``bro-cut`` The ``bro-cut`` utility can be used in place of other tools to build terminal commands that remain flexible and accurate independent of -possible changes to log file itself. It accomplishes this by parsing +possible changes to the log file itself. It accomplishes this by parsing the header in each file and allowing the user to refer to the specific columnar data available (in contrast to tools like ``awk`` that require the user to refer to fields referenced by their position). @@ -131,7 +131,7 @@ from a ``conn.log``: @TEST-EXEC: btest-rst-cmd -n 10 "cat conn.log | bro-cut id.orig_h id.orig_p id.resp_h duration" -The correspding ``awk`` command would look like this: +The corresponding ``awk`` command will look like this: .. btest:: using_bro @@ -185,8 +185,8 @@ Working with Timestamps ``bro-cut`` accepts the flag ``-d`` to convert the epoch time values in the log files to human-readable format. The following command -includes the human readable time stamp, the unique identifier and the -HTTP ``Host`` and HTTP ``URI`` as extracted from the ``http.log`` +includes the human readable time stamp, the unique identifier, the +HTTP ``Host``, and HTTP ``URI`` as extracted from the ``http.log`` file: .. btest:: using_bro @@ -218,7 +218,7 @@ See ``man strfime`` for more options for the format string. Using UIDs ---------- -While Bro can do signature based analysis, its primary focus is on +While Bro can do signature-based analysis, its primary focus is on behavioral detection which alters the practice of log review from "reactionary review" to a process a little more akin to a hunting trip. A common progression of review includes correlating a session @@ -254,12 +254,13 @@ network. ----------------------- Common Log Files ----------------------- -As a monitoring tool, Bro records a detailed view of the traffic inspected and the events generated in -a series of relevant log files. These files can later be reviewed for monitoring, auditing and troubleshooting -purposes. +As a monitoring tool, Bro records a detailed view of the traffic inspected +and the events generated in a series of relevant log files. These files can +later be reviewed for monitoring, auditing and troubleshooting purposes. -In this section we present a brief explanation of the most commonly used log files generated by Bro including links -to descriptions of some of the fields for each log type. +In this section we present a brief explanation of the most commonly used log +files generated by Bro including links to descriptions of some of the fields +for each log type. +-----------------+---------------------------------------+------------------------------+ | Log File | Description | Field Descriptions | From 121db68c302b82be9d83e64f6a12aba85d9ac243 Mon Sep 17 00:00:00 2001 From: Jeannette Dopheide Date: Thu, 30 Jan 2014 13:23:58 -0600 Subject: [PATCH 17/47] Updates to httpmonitor and mimestats documentation. --- doc/httpmonitor/index.rst | 21 ++++++++++----------- doc/mimestats/index.rst | 32 ++++++++++++++++---------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/doc/httpmonitor/index.rst b/doc/httpmonitor/index.rst index f6b2f5e122..5a4f28ebfe 100644 --- a/doc/httpmonitor/index.rst +++ b/doc/httpmonitor/index.rst @@ -10,7 +10,7 @@ http.log file. This file can then be used for analysis and auditing purposes. In the sections below we briefly explain the structure of the http.log -file. Then, we show you how to perform basic HTTP traffic monitoring and +file, then we show you how to perform basic HTTP traffic monitoring and analysis tasks with Bro. Some of these ideas and techniques can later be applied to monitor different protocols in a similar way. @@ -40,11 +40,10 @@ request to the root of Bro website:: Network administrators and security engineers, for instance, can use the information in this log to understand the HTTP activity on the network -and troubleshoot network problems or search for anomalous activities. At -this point, we would like to stress out the fact that there is no just -one right way to perform analysis; it will depend on the expertise of -the person doing the analysis and the specific details of the task to -accomplish. +and troubleshoot network problems or search for anomalous activities. We must +stress that there is no single right way to perform an analysis. It will +depend on the expertise of the person performing the analysis and the +specific details of the task. For more information about how to handle the HTTP protocol in Bro, including a complete list of the fields available in http.log, go to @@ -58,15 +57,15 @@ Detecting a Proxy Server A proxy server is a device on your network configured to request a service on behalf of a third system; one of the most common examples is a Web proxy server. A client without Internet access connects to the -proxy and requests a Web page; the proxy then sends the request to the -actual Web server, receives the response and passes it to the original +proxy and requests a web page, the proxy sends the request to the web +server, which receives the response, and passes it to the original client. Proxies were conceived to help manage a network and provide better -encapsulation. By themselves, proxies are not a security threat, but a +encapsulation. Proxies by themselves are not a security threat, but a misconfigured or unauthorized proxy can allow others, either inside or -outside the network, to access any Web site and even conduct malicious -activities anonymously using the network resources. +outside the network, to access any web site and even conduct malicious +activities anonymously using the network's resources. What Proxy Server traffic looks like ------------------------------------- diff --git a/doc/mimestats/index.rst b/doc/mimestats/index.rst index df17f4872f..dd2e039e8a 100644 --- a/doc/mimestats/index.rst +++ b/doc/mimestats/index.rst @@ -6,19 +6,19 @@ MIME Type Statistics ==================== Files are constantly transmitted over HTTP on regular networks. These -files belong to a specific category (i.e., executable, text, image, -etc.) identified by a `Multipurpose Internet Mail Extension (MIME) +files belong to a specific category (e.g., executable, text, image) +identified by a `Multipurpose Internet Mail Extension (MIME) `_. Although MIME was originally developed to identify the type of non-text attachments on email, it is -also used by Web browser to identify the type of files transmitted and +also used by a web browser to identify the type of files transmitted and present them accordingly. -In this tutorial, we will show how to use the Sumstats Framework to -collect some statistics information based on MIME types, specifically +In this tutorial, we will demonstrate how to use the Sumstats Framework +to collect statistical information based on MIME types; specifically, the total number of occurrences, size in bytes, and number of unique -hosts transmitting files over HTTP per each type. For instructions about -extracting and creating a local copy of these files, visit :ref:`this -` tutorial instead. +hosts transmitting files over HTTP per each type. For instructions on +extracting and creating a local copy of these files, visit :ref:`this +tutorial `. ------------------------------------------------ MIME Statistics with Sumstats @@ -30,31 +30,31 @@ Observations, where the event is observed and fed into the framework. (ii) Reducers, where observations are collected and measured. (iii) Sumstats, where the main functionality is implemented. -So, we start by defining our observation along with a record to store -all statistics values and an observation interval. We are conducting our -observation on the :bro:see:`HTTP::log_http` event and we are interested -in the MIME type, size of the file ("response_body_len") and the +We start by defining our observation along with a record to store +all statistical values and an observation interval. We are conducting our +observation on the :bro:see:`HTTP::log_http` event and are interested +in the MIME type, size of the file ("response_body_len"), and the originator host ("orig_h"). We use the MIME type as our key and create observers for the other two values. .. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro :lines: 6-29, 54-64 -Next, we create the reducers. The first one will accumulate file sizes -and the second one will make sure we only store a host ID once. Below is +Next, we create the reducers. The first will accumulate file sizes +and the second will make sure we only store a host ID once. Below is the partial code from a :bro:see:`bro_init` handler. .. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro :lines: 34-37 In our final step, we create the SumStats where we check for the -observation interval and once it expires, we populate the record +observation interval. Once it expires, we populate the record (defined above) with all the relevant data and write it to a log. .. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro :lines: 38-51 -Putting everything together we end up with the following final code for +After putting the three pieces together we end up with the following final code for our script. .. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro From c61dfb19630a163ca306be275638b546cac5d6ad Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 30 Jan 2014 17:21:01 -0600 Subject: [PATCH 18/47] Rewrite DNS state tracking which matches queries and replies. The previous method of matching queries with replies was still unreliable in cases where the reply contains no answers. The new code also takes extra measures to avoid pending state growing too large in cases where the condition to match a query with a corresponding reply is never met, but yet DNS messages continue to be exchanged over the same connection 5-tuple (preventing cleanup of the pending state). --- scripts/base/protocols/dns/main.bro | 248 +++++++++++++-------- scripts/policy/protocols/dns/auth-addl.bro | 19 +- 2 files changed, 166 insertions(+), 101 deletions(-) diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index 0651e23ada..21a0711159 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -63,15 +63,17 @@ export { ## The DNS query was rejected by the server. rejected: bool &log &default=F; - ## This value indicates if this request/response pair is ready - ## to be logged. - ready: bool &default=F; ## The total number of resource records in a reply message's ## answer section. total_answers: count &optional; ## The total number of resource records in a reply message's ## answer, authority, and additional sections. total_replies: count &optional; + + ## Whether the full DNS query has been seen. + saw_query: bool &default=F; + ## Whether the full DNS reply has been seen. + saw_reply: bool &default=F; }; ## An event that can be handled to access the :bro:type:`DNS::Info` @@ -90,7 +92,7 @@ export { ## ans: The general information of a RR response. ## ## reply: The specific response information according to RR type/class. - global do_reply: event(c: connection, msg: dns_msg, ans: dns_answer, reply: string); + global do_reply: hook(c: connection, msg: dns_msg, ans: dns_answer, reply: string); ## A hook that is called whenever a session is being set. ## This can be used if additional initialization logic needs to happen @@ -103,17 +105,42 @@ export { ## is_query: Indicator for if this is being called for a query or a response. global set_session: hook(c: connection, msg: dns_msg, is_query: bool); + ## Yields a queue of :bro:see:`DNS::Info` objects for a given + ## DNS message query/transaction ID. + type PendingMessages: table[count] of Queue::Queue; + + ## Called when a pending DNS query has not been matched with a reply (or + ## vice versa) in a sufficent amount of time. + ## + ## pending: table of pending messages, indexed by transaction ID. + ## + ## id: the index of he element being expired. + ## + ## Returns: amount of time to delay expiration of the element. + global expire_pending_msg: function(pending: PendingMessages, id: count): interval; + + ## The amount of time that DNS queries or replies for a given + ## query/transaction ID are allowed to be queued while waiting for + ## a matching reply or query. + const pending_msg_expiry_interval = 2min &redef; + + ## Give up trying to match pending DNS queries or replies for a given + ## query/transaction ID once this number of unmatched queries or replies + ## is reached (this shouldn't happen unless either the DNS server/resolver + ## is broken, Bro is not seeing all the DNS traffic, or an AXFR query + ## response is ongoing). + const max_pending_msgs = 50 &redef; + ## A record type which tracks the status of DNS queries for a given ## :bro:type:`connection`. type State: record { ## Indexed by query id, returns Info record corresponding to - ## query/response which haven't completed yet. - pending: table[count] of Queue::Queue; + ## queries that haven't been matched with a response yet. + pending_queries: PendingMessages &read_expire=pending_msg_expiry_interval &expire_func=expire_pending_msg; - ## This is the list of DNS responses that have completed based - ## on the number of responses declared and the number received. - ## The contents of the set are transaction IDs. - finished_answers: set[count]; + ## Indexed by query id, returns Info record corresponding to + ## replies that haven't been matched with a query yet. + pending_replies: PendingMessages &read_expire=pending_msg_expiry_interval &expire_func=expire_pending_msg; }; } @@ -143,6 +170,51 @@ function new_session(c: connection, trans_id: count): Info return info; } +function log_unmatched_msgs_queue(q: Queue::Queue) + { + local infos: vector of Info; + Queue::get_vector(q, infos); + + for ( i in infos ) + Log::write(DNS::LOG, infos[i]); + } + +function log_unmatched_msgs(msgs: PendingMessages) + { + for ( trans_id in msgs ) + { + log_unmatched_msgs_queue(msgs[trans_id]); + delete msgs[trans_id]; + } + } + +function enqueue_new_msg(msgs: PendingMessages, id: count, msg: Info) + { + if ( id !in msgs ) + msgs[id] = Queue::init(); + else if ( Queue::len(msgs[id]) > max_pending_msgs ) + { + local info: Info = Queue::peek(msgs[id]); + event flow_weird("dns_unmatched_msg_quantity", info$id$orig_h, + info$id$resp_h); + log_unmatched_msgs_queue(msgs[id]); + # Throw away all unmatched on assumption they'll never be matched. + msgs[id] = Queue::init(); + } + + Queue::put(msgs[id], msg); + } + +function pop_msg(msgs: PendingMessages, id: count): Info + { + local rval: Info = Queue::get(msgs[id]); + + if ( Queue::len(msgs[id]) == 0 ) + delete msgs[id]; + + return rval; + } + hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 { if ( ! c?$dns_state ) @@ -151,34 +223,39 @@ hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 c$dns_state = state; } - if ( msg$id !in c$dns_state$pending ) - c$dns_state$pending[msg$id] = Queue::init(); - - local info: Info; - # If this is either a query or this is the reply but - # no Info records are in the queue (we missed the query?) - # we need to create an Info record and put it in the queue. if ( is_query ) { - info = new_session(c, msg$id); - Queue::put(c$dns_state$pending[msg$id], info); + if ( msg$id in c$dns_state$pending_replies && + Queue::len(c$dns_state$pending_replies[msg$id]) > 0 ) + { + # Match this DNS query w/ what's at head of pending reply queue. + c$dns = pop_msg(c$dns_state$pending_replies, msg$id); + } + else + { + # Create a new DNS session and put it in the query queue so + # we can wait for a matching reply. + c$dns = new_session(c, msg$id); + enqueue_new_msg(c$dns_state$pending_queries, msg$id, c$dns); + } } - else if ( Queue::len(c$dns_state$pending[msg$id]) == 0 ) - { - info = new_session(c, msg$id); - Queue::put(c$dns_state$pending[msg$id], info); - event conn_weird("dns_unmatched_reply", c, ""); - } - - if ( is_query ) - # If this is a query, assign the newly created info variable - # so that the world looks correct to anything else handling - # this query. - c$dns = info; else - # Peek at the next item in the queue for this trans_id and - # assign it to c$dns since this is a response. - c$dns = Queue::peek(c$dns_state$pending[msg$id]); + { + if ( msg$id in c$dns_state$pending_queries && + Queue::len(c$dns_state$pending_queries[msg$id]) > 0 ) + { + # Match this DNS reply w/ what's at head of pending query queue. + c$dns = pop_msg(c$dns_state$pending_queries, msg$id); + } + else + { + # Create a new DNS session and put it in the reply queue so + # we can wait for a matching query. + c$dns = new_session(c, msg$id); + event conn_weird("dns_unmatched_reply", c, ""); + enqueue_new_msg(c$dns_state$pending_replies, msg$id, c$dns); + } + } if ( ! is_query ) { @@ -188,19 +265,11 @@ hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 if ( ! c$dns?$total_answers ) c$dns$total_answers = msg$num_answers; - if ( c$dns?$total_replies && - c$dns$total_replies != msg$num_answers + msg$num_addl + msg$num_auth ) - { - event conn_weird("dns_changed_number_of_responses", c, - fmt("The declared number of responses changed from %d to %d", - c$dns$total_replies, - msg$num_answers + msg$num_addl + msg$num_auth)); - } - else - { - # Store the total number of responses expected from the first reply. + if ( ! c$dns?$total_replies ) c$dns$total_replies = msg$num_answers + msg$num_addl + msg$num_auth; - } + + if ( msg$rcode != 0 && msg$num_queries == 0 ) + c$dns$rejected = T; } } @@ -210,13 +279,10 @@ event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &prior # Currently only standard queries are tracked. return; - hook set_session(c, msg, is_orig); - - if ( msg$QR && msg$rcode != 0 && msg$num_queries == 0 ) - c$dns$rejected = T; + hook set_session(c, msg, ! msg$QR); } -event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5 +hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5 { if ( msg$opcode != 0 ) # Currently only standard queries are tracked. @@ -229,9 +295,6 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) if ( ans$answer_type == DNS_ANS ) { - if ( ! c?$dns ) - hook set_session(c, msg, F); - c$dns$AA = msg$AA; c$dns$RA = msg$RA; @@ -245,23 +308,25 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) c$dns$TTLs = vector(); c$dns$TTLs[|c$dns$TTLs|] = ans$TTL; } - - if ( c$dns?$answers && c$dns?$total_answers && - |c$dns$answers| == c$dns$total_answers ) - { - # Indicate this request/reply pair is ready to be logged. - c$dns$ready = T; - } } } -event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=-5 +event dns_end(c: connection, msg: dns_msg) &priority=5 { - if ( c?$dns && c$dns$ready ) + if ( ! c?$dns ) + return; + + if ( msg$QR ) + c$dns$saw_reply = T; + else + c$dns$saw_query = T; + } + +event dns_end(c: connection, msg: dns_msg) &priority=-5 + { + if ( c?$dns && c$dns$saw_reply && c$dns$saw_query ) { Log::write(DNS::LOG, c$dns); - # This record is logged and no longer pending. - Queue::get(c$dns_state$pending[c$dns$trans_id]); delete c$dns; } } @@ -291,63 +356,63 @@ event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qcla event dns_unknown_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 { - event DNS::do_reply(c, msg, ans, fmt("", ans$qtype)); + hook DNS::do_reply(c, msg, ans, fmt("", ans$qtype)); } event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 { - event DNS::do_reply(c, msg, ans, fmt("%s", a)); + hook DNS::do_reply(c, msg, ans, fmt("%s", a)); } event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, str: string) &priority=5 { - event DNS::do_reply(c, msg, ans, str); + hook DNS::do_reply(c, msg, ans, str); } event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 { - event DNS::do_reply(c, msg, ans, fmt("%s", a)); + hook DNS::do_reply(c, msg, ans, fmt("%s", a)); } event dns_A6_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 { - event DNS::do_reply(c, msg, ans, fmt("%s", a)); + hook DNS::do_reply(c, msg, ans, fmt("%s", a)); } event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 { - event DNS::do_reply(c, msg, ans, name); + hook DNS::do_reply(c, msg, ans, name); } event dns_CNAME_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 { - event DNS::do_reply(c, msg, ans, name); + hook DNS::do_reply(c, msg, ans, name); } event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string, preference: count) &priority=5 { - event DNS::do_reply(c, msg, ans, name); + hook DNS::do_reply(c, msg, ans, name); } event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 { - event DNS::do_reply(c, msg, ans, name); + hook DNS::do_reply(c, msg, ans, name); } event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa) &priority=5 { - event DNS::do_reply(c, msg, ans, soa$mname); + hook DNS::do_reply(c, msg, ans, soa$mname); } event dns_WKS_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 { - event DNS::do_reply(c, msg, ans, ""); + hook DNS::do_reply(c, msg, ans, ""); } event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 { - event DNS::do_reply(c, msg, ans, ""); + hook DNS::do_reply(c, msg, ans, ""); } # TODO: figure out how to handle these @@ -377,16 +442,23 @@ event connection_state_remove(c: connection) &priority=-5 if ( ! c?$dns_state ) return; - # If Bro is expiring state, we should go ahead and log all unlogged - # request/response pairs now. - for ( trans_id in c$dns_state$pending ) - { - local infos: vector of Info; - Queue::get_vector(c$dns_state$pending[trans_id], infos); - for ( i in infos ) - { - Log::write(DNS::LOG, infos[i]); - } - } + # If Bro is expiring state, we should go ahead and log all unmatched + # queries and replies now. + log_unmatched_msgs(c$dns_state$pending_queries); + log_unmatched_msgs(c$dns_state$pending_replies); } +function expire_pending_msg(pending: PendingMessages, id: count): interval + { + local infos: vector of Info; + Queue::get_vector(pending[id], infos); + + for ( i in infos ) + { + Log::write(DNS::LOG, infos[i]); + event flow_weird("dns_unmatched_msg", infos[i]$id$orig_h, + infos[i]$id$resp_h); + } + + return 0sec; + } diff --git a/scripts/policy/protocols/dns/auth-addl.bro b/scripts/policy/protocols/dns/auth-addl.bro index bc97d529cd..a04cca37ab 100644 --- a/scripts/policy/protocols/dns/auth-addl.bro +++ b/scripts/policy/protocols/dns/auth-addl.bro @@ -19,17 +19,17 @@ export { }; } -event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=4 +hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5 { if ( msg$opcode != 0 ) # Currently only standard queries are tracked. return; - # The "ready" flag will be set here. This causes the setting from the - # base script to be overridden since the base script will log immediately - # after all of the ANS replies have been seen. - c$dns$ready=F; - + if ( ! msg$QR ) + # This is weird: the inquirer must also be providing answers in + # the request, which is not what we want to track. + return; + if ( ans$answer_type == DNS_AUTH ) { if ( ! c$dns?$auth ) @@ -42,11 +42,4 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) c$dns$addl = set(); add c$dns$addl[reply]; } - - if ( c$dns?$answers && c$dns?$auth && c$dns?$addl && - c$dns$total_replies == |c$dns$answers| + |c$dns$auth| + |c$dns$addl| ) - { - # *Now* all replies desired have been seen. - c$dns$ready = T; - } } From cdf09b4acef5c4c0e03d8170f6ac12bc514b582d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 31 Jan 2014 09:56:20 -0800 Subject: [PATCH 19/47] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 36b96eb9c1..23ff11bf0e 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 36b96eb9c13d1011bbc8be3581fd0f1c0bd8de44 +Subproject commit 23ff11bf0edbad2c6f1acbeb3f9a029ff4b61785 From 0cb2a90da4aa1de49e3881b9471e69f862d62038 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Fri, 31 Jan 2014 17:04:58 -0600 Subject: [PATCH 20/47] Add script to detect filtered TCP traces, addresses BIT-1119. If reading a trace file w/ only TCP control packets, a warning is emitted to suggest the 'detect_filtered_traces' option if the user doesn't desire Bro to report missing TCP segments for such a trace file. --- scripts/base/init-default.bro | 1 + scripts/base/misc/find-filtered-trace.bro | 49 ++++++++++++++++++ .../canonified_loaded_scripts.log | 5 +- .../out1 | 1 + .../out2 | 0 .../btest/Traces/http/bro.org-filtered.pcap | Bin 0 -> 3934 bytes .../base/misc/find-filtered-trace.test | 4 ++ 7 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 scripts/base/misc/find-filtered-trace.bro create mode 100644 testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 create mode 100644 testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out2 create mode 100644 testing/btest/Traces/http/bro.org-filtered.pcap create mode 100644 testing/btest/scripts/base/misc/find-filtered-trace.test diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro index d0120d930b..d87574f4e5 100644 --- a/scripts/base/init-default.bro +++ b/scripts/base/init-default.bro @@ -60,3 +60,4 @@ @load base/misc/find-checksum-offloading +@load base/misc/find-filtered-trace diff --git a/scripts/base/misc/find-filtered-trace.bro b/scripts/base/misc/find-filtered-trace.bro new file mode 100644 index 0000000000..a723b656a7 --- /dev/null +++ b/scripts/base/misc/find-filtered-trace.bro @@ -0,0 +1,49 @@ +##! Discovers trace files that contain TCP traffic consisting only of +##! control packets (e.g. it's been filtered to contain only SYN/FIN/RST +##! packets and no content). On finding such a trace, a warning is +##! emitted that suggests toggling the :bro:see:`detect_filtered_trace` +##! option may be desired if the user does not want Bro to report +##! missing TCP segments. + +module FilteredTraceDetection; + +export { + + ## Flag to enable filtered trace file detection and warning message. + global enable: bool = T &redef; +} + +global saw_tcp_conn_with_data: bool = F; +global saw_a_tcp_conn: bool = F; + +event connection_state_remove(c: connection) + { + if ( ! reading_traces() ) + return; + + if ( ! enable ) + return; + + if ( saw_tcp_conn_with_data ) + return; + + if ( ! is_tcp_port(c$id$orig_p) ) + return; + + saw_a_tcp_conn = T; + + if ( /[Dd]/ in c$history ) + saw_tcp_conn_with_data = T; + } + +event bro_done() + { + if ( ! enable ) + return; + + if ( ! saw_a_tcp_conn ) + return; + + if ( ! saw_tcp_conn_with_data ) + Reporter::warning("The analyzed trace file was determined to contain only TCP control packets, which may indicate it's been pre-filtered. By default, Bro reports the missing segments for this type of trace, but the 'detect_filtered_trace' option may be toggled if that's not desired."); + } diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 90145d94fb..76b3f3a596 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2013-10-30-16-52-28 +#open 2014-01-31-22-54-38 #fields name #types string scripts/base/init-bare.bro @@ -220,5 +220,6 @@ scripts/base/init-default.bro scripts/base/files/unified2/__load__.bro scripts/base/files/unified2/main.bro scripts/base/misc/find-checksum-offloading.bro + scripts/base/misc/find-filtered-trace.bro scripts/policy/misc/loaded-scripts.bro -#close 2013-10-30-16-52-28 +#close 2014-01-31-22-54-38 diff --git a/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 b/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 new file mode 100644 index 0000000000..c2f791ba82 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 @@ -0,0 +1 @@ +1389719059.311687 warning in /Users/jsiwek/Projects/bro/bro/scripts/base/misc/find-filtered-trace.bro, line 48: The analyzed trace file was determined to contain only TCP control packets, which may indicate it's been pre-filtered. By default, Bro reports the missing segments for this type of trace, but the 'detect_filtered_trace' option may be toggled if that's not desired. diff --git a/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out2 b/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out2 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/btest/Traces/http/bro.org-filtered.pcap b/testing/btest/Traces/http/bro.org-filtered.pcap new file mode 100644 index 0000000000000000000000000000000000000000..b25905079eec524262b750a53c96cf96364a9f72 GIT binary patch literal 3934 zcmaLae@vBC7zgn4z91k!#<*%KBwB%7`LR$EBWbd?G}Dpqzzy*-b89U_Cz?RTDsH%G zh-q$2Oq)YV1*J?-DKVq50e`s65rYuatZ>-GG}=rgdcNoMo(^00-0d=W|M=YRdCqg5 zbNQjQ;gHGvne!SOW6YpGjE!Ac=id)y%jkE!vsNRhwx+k&75J{U@CEH-m!N%%5M!bHmRs5c2+pFqTe#<1I|b zCpj@^n2~%S5Gr(LcgIVhUW{Jev;K1Ark(7%IMulS(ztOx|!X%adaBe zAmvEGxRh0Rj+Bt^P1earmO&0XB;`5GZa0?nJMx98zU5f@eBYH_`XuE1^HPpyc3sPf zCPyxQXPmrG%{(|=C#MWRE}bCd1ZKCCeE*&!Kl8x1d|-x74p;}deyfy|m|fxe^>;^J zJ>*-?U0`UsVHJIDM!^27N;f1~jq?oB@1h&B!-Y&gY0tX#Ry4Qz6)V9yooG-EN@1l% zsuoMr=9 za3+Ff=khbr*+$v6+Y!}$>ykvGXD92FrO!jjZcDE(TkmqK zOtIpulY9LJM=uq_y6}o>ZNF}4DjnU6)m6sI)nOJ>Cd#&g=ZY7rvwzYlG4)V(WlM$M zJC}+n747{gZ|Du7JPh{Jb+(tUt5jJRow)lW?M7w=vheZDmxdbEh4^XKb!0sfg;mN# z{3gh(Anze|?d1(3(eZ`q zK;LUk^px?nzOAg)wz}3Dq!(f$Bp-ApLKs1bNIc9l+mGT z$|0|l5^~vPog5kq`DmMzS24TQb>kQW$Wf1vlW9$2{g5wr>g0|skZ-*yZOE}VVW zk=-;+{`;mfG)}JX(#d6YkdrMbuV(gh)f@QvLgt8n?_1OoEtiS4eFVBu%`4mRyft(yD!A|KDs@haVtx)XkO$;d7E#Vn2ie?(Gz&V+WT>c zW;TX*Wl&Gl#LI5uQ|+cn-lKLzX6+DtgP26h2>lo0`;P881Eu#xsfg1sk5)*D(t?I} z3uXUdol%LU zMNGR|>T#~zZxYJIPj$+`LMRtor6SI9Gj$PHUOOw4nifqTx;;INl&UyoztM*#_2GH! zcQwCju;17z<6EuuBe!NN7W&YnK1_j?R0RhwSjl>;ms_!lg|lYTSxbVIa$HEl^60HA o+=@{w%oCI5Ngb@Y->BAhy+vn<>h*+TVV;;YPyT?l*yULN0OWAI4*&oF literal 0 HcmV?d00001 diff --git a/testing/btest/scripts/base/misc/find-filtered-trace.test b/testing/btest/scripts/base/misc/find-filtered-trace.test new file mode 100644 index 0000000000..05b603ac92 --- /dev/null +++ b/testing/btest/scripts/base/misc/find-filtered-trace.test @@ -0,0 +1,4 @@ +# @TEST-EXEC: bro -r $TRACES/http/bro.org-filtered.pcap >out1 2>&1 +# @TEST-EXEC: bro -r $TRACES/http/bro.org-filtered.pcap "FilteredTraceDetection::enable=F" >out2 2>&1 +# @TEST-EXEC: TEST_DIFF_CANOIFIER=$SCRIPTS/diff-remove-abspath btest-diff out1 +# @TEST-EXEC: btest-diff out2 From ab4508486e2337ba9eccf123343f6133530d6813 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 3 Feb 2014 16:54:48 -0600 Subject: [PATCH 21/47] Minor unified2 script documentation fix. --- scripts/base/files/unified2/main.bro | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/base/files/unified2/main.bro b/scripts/base/files/unified2/main.bro index 870f9335ae..2f6ae79f4f 100644 --- a/scripts/base/files/unified2/main.bro +++ b/scripts/base/files/unified2/main.bro @@ -7,10 +7,10 @@ module Unified2; export { redef enum Log::ID += { LOG }; - ## Directory to watch for Unified2 files. + ## File to watch for Unified2 files. const watch_file = "" &redef; - ## File to watch for Unified2 records. + ## Directory to watch for Unified2 records. const watch_dir = "" &redef; ## The sid-msg.map file you would like to use for your alerts. From 4b63b3090165b59863356a9e4ee228c2f1ac5809 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Wed, 5 Feb 2014 10:01:51 -0800 Subject: [PATCH 22/47] Fix x509-extension test sometimes failing. For some fields, the format apparently is not consistens over OpenSSL versions. For the test, we simply skip those. --- .../scripts.base.protocols.ssl.x509_extensions/.stdout | 2 -- .../btest/scripts/base/protocols/ssl/x509_extensions.test | 5 ++++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout b/testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout index 3f9c8661bf..c33135d3f8 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.x509_extensions/.stdout @@ -4,7 +4,6 @@ [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE] [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication] [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.3.6.1.4.1.6449.1.2.1.3.4^J CPS: https://secure.comodo.com/CPS^J] -[name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=^JFull Name:^J URI:http://crl.comodoca.com/COMODOHigh-AssuranceSecureServerCA.crl^J] [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=CA Issuers - URI:http://crt.comodoca.com/COMODOHigh-AssuranceSecureServerCA.crt^JOCSP - URI:http://ocsp.comodoca.com^J] [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.taleo.net, DNS:taleo.net] [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:AD:BD:98:7A:34:B4:26:F7:FA:C4:26:54:EF:03:BD:E0:24:CB:54:1A^J] @@ -12,7 +11,6 @@ [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign] [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0] [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: X509v3 Any Policy^J] -[name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=^JFull Name:^J URI:http://crl.usertrust.com/AddTrustExternalCARoot.crl^J] [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=CA Issuers - URI:http://crt.usertrust.com/AddTrustExternalCARoot.p7c^JCA Issuers - URI:http://crt.usertrust.com/AddTrustUTNSGCCA.crt^JOCSP - URI:http://ocsp.usertrust.com^J] [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=AD:BD:98:7A:34:B4:26:F7:FA:C4:26:54:EF:03:BD:E0:24:CB:54:1A] [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=F, value=Certificate Sign, CRL Sign] diff --git a/testing/btest/scripts/base/protocols/ssl/x509_extensions.test b/testing/btest/scripts/base/protocols/ssl/x509_extensions.test index 4db3233b27..c5e0b1b407 100644 --- a/testing/btest/scripts/base/protocols/ssl/x509_extensions.test +++ b/testing/btest/scripts/base/protocols/ssl/x509_extensions.test @@ -3,5 +3,8 @@ event x509_extension(c: connection, is_orig: bool, cert:X509, extension: X509_extension_info) { - print extension; + # The formatting of CRL Distribution Points varies between OpenSSL versions. Skip it + # for the test. + if ( extension$short_name != "crlDistributionPoints" ) + print extension; } From d81bfed45da04d0e77496ea4487832d5aa1f95ba Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 6 Feb 2014 17:52:41 -0800 Subject: [PATCH 23/47] Fixing memory leaks in input framework. --- CHANGES | 7 +++ VERSION | 2 +- aux/btest | 2 +- src/input/Manager.cc | 18 ++++-- .../btest/core/leaks/input-with-remove.bro | 63 +++++++++++++++++++ 5 files changed, 84 insertions(+), 8 deletions(-) create mode 100644 testing/btest/core/leaks/input-with-remove.bro diff --git a/CHANGES b/CHANGES index ce57bfc99e..345e945207 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,11 @@ +2.2-140 | 2014-02-06 17:58:04 -0800 + + * Fixing memory leaks in input framework. (Robin Sommer) + + * Add script to detect filtered TCP traces. Addresses BIT-1119. (Jon + Siwek) + 2.2-137 | 2014-02-04 09:09:55 -0800 * Minor unified2 script documentation fix. (Jon Siwek) diff --git a/VERSION b/VERSION index c869973493..8611c50ec0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-137 +2.2-140 diff --git a/aux/btest b/aux/btest index 23ff11bf0e..808fd764b6 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 23ff11bf0edbad2c6f1acbeb3f9a029ff4b61785 +Subproject commit 808fd764b6f5198264177822db3f902f747c21cc diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 7af80892c6..95983faf26 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -397,7 +397,9 @@ bool Manager::CreateEventStream(RecordVal* fval) string stream_name = name_val->AsString()->CheckString(); Unref(name_val); - RecordType *fields = fval->Lookup("fields", true)->AsType()->AsTypeType()->Type()->AsRecordType(); + Val* fields_val = fval->Lookup("fields", true); + RecordType *fields = fields_val->AsType()->AsTypeType()->Type()->AsRecordType(); + Unref(fields_val); Val *want_record = fval->Lookup("want_record", true); @@ -548,13 +550,17 @@ bool Manager::CreateTableStream(RecordVal* fval) Val* pred = fval->Lookup("pred", true); - RecordType *idx = fval->Lookup("idx", true)->AsType()->AsTypeType()->Type()->AsRecordType(); + Val* idx_val = fval->Lookup("idx", true); + RecordType *idx = idx_val->AsType()->AsTypeType()->Type()->AsRecordType(); + Unref(idx_val); + RecordType *val = 0; - if ( fval->Lookup("val", true) != 0 ) + Val* val_val = fval->Lookup("val", true); + if ( val_val ) { - val = fval->Lookup("val", true)->AsType()->AsTypeType()->Type()->AsRecordType(); - Unref(val); // The lookupwithdefault in the if-clause ref'ed val. + val = val_val->AsType()->AsTypeType()->Type()->AsRecordType(); + Unref(val_val); } TableVal *dst = fval->Lookup("destination", true)->AsTableVal(); @@ -729,7 +735,7 @@ bool Manager::CreateTableStream(RecordVal* fval) stream->pred = pred ? pred->AsFunc() : 0; stream->num_idx_fields = idxfields; stream->num_val_fields = valfields; - stream->tab = dst->AsTableVal(); + stream->tab = dst->AsTableVal(); // ref'd by lookupwithdefault stream->rtype = val ? val->AsRecordType() : 0; stream->itype = idx->AsRecordType(); stream->event = event ? event_registry->Lookup(event->Name()) : 0; diff --git a/testing/btest/core/leaks/input-with-remove.bro b/testing/btest/core/leaks/input-with-remove.bro new file mode 100644 index 0000000000..62fcfa0a4e --- /dev/null +++ b/testing/btest/core/leaks/input-with-remove.bro @@ -0,0 +1,63 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 15 + +@load base/frameworks/input + +redef exit_only_after_terminate = T; + +global c: count = 0; + + +type OneLine: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print "1", "Line"; + } + +event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) + { + Input::remove(name); + print "2", name; + } + +function run(): count + { + Input::add_event([$name=unique_id(""), + $source=fmt("%s |", "date"), + $reader=Input::READER_RAW, + $mode=Input::STREAM, + $fields=OneLine, + $ev=line, + $want_record=F]); + + return 1; + } + + +event do() + { + run(); + } + +event do_term() { + terminate(); +} + +event bro_init() { + schedule 1sec { + do() + }; + schedule 3sec { + do_term() + }; +} + From b64137761ee73ed10498645124575bf17f5630c9 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 6 Feb 2014 18:20:46 -0800 Subject: [PATCH 24/47] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 808fd764b6..14d1f23fff 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 808fd764b6f5198264177822db3f902f747c21cc +Subproject commit 14d1f23fffff5bcc19d305992dac78cbff83b7be From a048082e688ef9ed3b3d0649dec2a79c1519847a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 6 Feb 2014 20:23:34 -0800 Subject: [PATCH 25/47] Fixing bug in POP3 analyzer. With certain input the analyzer could end up trying to write to non-writable memory. --- CHANGES | 5 +++++ VERSION | 2 +- src/analyzer/protocol/pop3/POP3.cc | 7 +++---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index 345e945207..801b7fcd10 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,9 @@ +2.2-142 | 2014-02-06 20:23:34 -0800 + + * Fixing bug in POP3 analyzer. With certain input the analyzer could + end up trying to write to non-writable memory. (Robin Sommer) + 2.2-140 | 2014-02-06 17:58:04 -0800 * Fixing memory leaks in input framework. (Robin Sommer) diff --git a/VERSION b/VERSION index 8611c50ec0..56b1a615cd 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-140 +2.2-142 diff --git a/src/analyzer/protocol/pop3/POP3.cc b/src/analyzer/protocol/pop3/POP3.cc index 388a055ee2..1b6b4c53b6 100644 --- a/src/analyzer/protocol/pop3/POP3.cc +++ b/src/analyzer/protocol/pop3/POP3.cc @@ -192,14 +192,13 @@ void POP3_Analyzer::ProcessRequest(int length, const char* line) case AUTH_CRAM_MD5: { // Format: "userpassword-hash" - char* s; - char* str = (char*) decoded->CheckString(); + const char* s; + const char* str = (char*) decoded->CheckString(); for ( s = str; *s && *s != '\t' && *s != ' '; ++s ) ; - *s = '\0'; - user = str; + user = std::string(str, s); password = ""; break; From c1f626d4ced9312ce132919daa1c92346252e687 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 6 Feb 2014 20:31:02 -0800 Subject: [PATCH 26/47] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index 437333e799..a3f9cc59af 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 437333e79964b5582f40ae213f69fa96ad590778 +Subproject commit a3f9cc59af51bf51a5a2d6e89b059059345a1d0f From 71df27f9d54b848d606a3bd5ba3755ecf2beb97e Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 6 Feb 2014 20:31:18 -0800 Subject: [PATCH 27/47] Updating submodule(s). [nomail] --- CHANGES | 2 +- VERSION | 2 +- aux/broctl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 801b7fcd10..16d1bb69f4 100644 --- a/CHANGES +++ b/CHANGES @@ -1,5 +1,5 @@ -2.2-142 | 2014-02-06 20:23:34 -0800 +2.2-144 | 2014-02-06 20:31:18 -0800 * Fixing bug in POP3 analyzer. With certain input the analyzer could end up trying to write to non-writable memory. (Robin Sommer) diff --git a/VERSION b/VERSION index 56b1a615cd..d92d849fbb 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-142 +2.2-144 diff --git a/aux/broctl b/aux/broctl index a3f9cc59af..66793ec3c6 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit a3f9cc59af51bf51a5a2d6e89b059059345a1d0f +Subproject commit 66793ec3c602439e235bee705b654aefb7ac8dec From 2bbf29681ea7151745fc06546e767bde95507bab Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 6 Feb 2014 21:07:46 -0800 Subject: [PATCH 28/47] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 14d1f23fff..9d23ca5b7c 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 14d1f23fffff5bcc19d305992dac78cbff83b7be +Subproject commit 9d23ca5b7ced7fd5ec208d1a074bc40babde9a30 From f11373505deac2bd69ace4f71440c15e94671e13 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 7 Feb 2014 10:44:31 -0800 Subject: [PATCH 29/47] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 9d23ca5b7c..73a736bddb 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 9d23ca5b7ced7fd5ec208d1a074bc40babde9a30 +Subproject commit 73a736bddb8931afc54bc52b567f639515b19f26 From 741ae7a368f4e58cde2d1db26695db92811224e3 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 7 Feb 2014 12:51:48 -0800 Subject: [PATCH 30/47] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 73a736bddb..9a7b36a44a 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 73a736bddb8931afc54bc52b567f639515b19f26 +Subproject commit 9a7b36a44a7abeaa1dc2754d3072ab08995c6af8 From adfe3a0754d936de1dbe702848b5bf36fe8c19ed Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Mon, 10 Feb 2014 23:56:23 -0800 Subject: [PATCH 31/47] add channel_id tls extension number. This number is not IANA defined, but we see it being actively used. --- scripts/base/protocols/ssl/consts.bro | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro index 55289a7419..b81aebfbbb 100644 --- a/scripts/base/protocols/ssl/consts.bro +++ b/scripts/base/protocols/ssl/consts.bro @@ -86,6 +86,7 @@ export { [13172] = "next_protocol_negotiation", [13175] = "origin_bound_certificates", [13180] = "encrypted_client_certificates", + [30031] = "channel_id", [65281] = "renegotiation_info" } &default=function(i: count):string { return fmt("unknown-%d", i); }; From 506b26e5ff137f61e087292c0b62453086f5fca9 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 11 Feb 2014 15:30:22 -0500 Subject: [PATCH 32/47] Expanding the HTTP methods used in the signature to detect HTTP traffic. --- scripts/base/protocols/http/dpd.sig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/base/protocols/http/dpd.sig b/scripts/base/protocols/http/dpd.sig index 13470f4e95..3e264f0bb3 100644 --- a/scripts/base/protocols/http/dpd.sig +++ b/scripts/base/protocols/http/dpd.sig @@ -1,6 +1,8 @@ +# List of HTTP headers pulled from: +# http://annevankesteren.nl/2007/10/http-methods signature dpd_http_client { ip-proto == tcp - payload /^[[:space:]]*(GET|HEAD|POST)[[:space:]]*/ + payload /^[[:space:]]*(OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT|PROPFIND|PROPPATCH|MKCOL|COPY|MOVE|LOCK|UNLOCK|VERSION-CONTROL|REPORT|CHECKOUT|CHECKIN|UNCHECKOUT|MKWORKSPACE|UPDATE|LABEL|MERGE|BASELINE-CONTROL|MKACTIVITY|ORDERPATCH|ACL|PATCH|SEARCH|BCOPY|BDELETE|BMOVE|BPROPFIND|BPROPPATCH|NOTIFY|POLL|SUBSCRIBE|UNSUBSCRIBE|X-MS-ENUMATTS|RPC_OUT_DATA|RPC_IN_DATA)[[:space:]]*/ tcp-state originator } @@ -11,3 +13,5 @@ signature dpd_http_server { requires-reverse-signature dpd_http_client enable "http" } + + From 64d73d5a2b76e84b472ee361535f91f2b00a4802 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 11 Feb 2014 15:41:16 -0800 Subject: [PATCH 33/47] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index 9a7b36a44a..acdf251b08 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 9a7b36a44a7abeaa1dc2754d3072ab08995c6af8 +Subproject commit acdf251b08980447b20c72ce0a5e6035665cbc22 From 39be3828fdb7632477cd8d4ccb0e060ca237e16f Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 11 Feb 2014 16:16:09 -0800 Subject: [PATCH 34/47] Baseline updates for DNS change. I assume these are expected, and in any case it's DS that's being tested not DNS. :) --- .../conn.ds.txt | 4 ++-- .../conn.ds.txt | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt index 5a48439a9f..3bf8a78707 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt @@ -54,8 +54,8 @@ # Extent, type='conn' ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents 1300475167096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 -1300475167097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 -1300475167099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 +1300475167097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp dns 0 0 0 S0 F 0 D 1 199 0 0 +1300475167099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 179 0 0 1300475168853899 CPbrpk1qSsw6ESzHV4 141.142.220.118 43927 141.142.2.2 53 udp dns 435 38 89 SF F 0 Dd 1 66 1 117 1300475168854378 C6pKV8GSxOnSLghOa 141.142.220.118 37676 141.142.2.2 53 udp dns 420 52 99 SF F 0 Dd 1 80 1 127 1300475168854837 CIPOse170MGiRM1Qf4 141.142.220.118 40526 141.142.2.2 53 udp dns 391 38 183 SF F 0 Dd 1 66 1 211 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt index 8f486c30e0..a82439c1e0 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -54,8 +54,8 @@ # Extent, type='conn' ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents 1300475167.096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 -1300475167.097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 -1300475167.099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 +1300475167.097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp dns 0.000000 0 0 S0 F 0 D 1 199 0 0 +1300475167.099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 179 0 0 1300475168.853899 CPbrpk1qSsw6ESzHV4 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117 1300475168.854378 C6pKV8GSxOnSLghOa 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127 1300475168.854837 CIPOse170MGiRM1Qf4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 From f45bd84f4ca65b235d17963dc97b4c152fd8ad57 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Tue, 11 Feb 2014 16:16:49 -0800 Subject: [PATCH 35/47] Updating submodule(s). [nomail] --- aux/btest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/btest b/aux/btest index acdf251b08..c3a65f1306 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit acdf251b08980447b20c72ce0a5e6035665cbc22 +Subproject commit c3a65f13063291ffcfd6d05c09d7724c02e9a40d From 6563b544d8b5e532006682acc3313c5989ce0fe5 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 12 Feb 2014 17:00:12 -0600 Subject: [PATCH 36/47] Fix memory leak in modbus analyzer. Would happen if there's a 'modbus_read_fifo_queue_response' event handler. --- .../protocol/modbus/modbus-analyzer.pac | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/analyzer/protocol/modbus/modbus-analyzer.pac b/src/analyzer/protocol/modbus/modbus-analyzer.pac index a9c773b9e9..c2d009c961 100644 --- a/src/analyzer/protocol/modbus/modbus-analyzer.pac +++ b/src/analyzer/protocol/modbus/modbus-analyzer.pac @@ -10,6 +10,7 @@ %header{ VectorVal* bytestring_to_coils(bytestring coils, uint quantity); RecordVal* HeaderToBro(ModbusTCP_TransportHeader *header); + VectorVal* create_vector_of_count(); %} %code{ @@ -30,6 +31,14 @@ return modbus_header; } + VectorVal* create_vector_of_count() + { + VectorType* vt = new VectorType(base_type(TYPE_COUNT)); + VectorVal* vv = new VectorVal(vt); + Unref(vt); + return vv; + } + %} refine flow ModbusTCP_Flow += { @@ -367,7 +376,7 @@ refine flow ModbusTCP_Flow += { if ( ::modbus_read_file_record_request ) { //TODO: this need to be a vector of some Reference Request record type - //VectorVal *t = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + //VectorVal *t = create_vector_of_count(); //for ( unsigned int i = 0; i < (${message.references}->size()); ++i ) // { // Val* r = new Val((${message.references[i].ref_type}), TYPE_COUNT); @@ -393,7 +402,7 @@ refine flow ModbusTCP_Flow += { %{ if ( ::modbus_read_file_record_response ) { - //VectorVal *t = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + //VectorVal *t = create_vector_of_count(); //for ( unsigned int i = 0; i < ${message.references}->size(); ++i ) // { // //TODO: work the reference type in here somewhere @@ -414,7 +423,7 @@ refine flow ModbusTCP_Flow += { %{ if ( ::modbus_write_file_record_request ) { - //VectorVal* t = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + //VectorVal* t = create_vector_of_count(); //for ( unsigned int i = 0; i < (${message.references}->size()); ++i ) // { // Val* r = new Val((${message.references[i].ref_type}), TYPE_COUNT); @@ -447,7 +456,7 @@ refine flow ModbusTCP_Flow += { %{ if ( ::modbus_write_file_record_response ) { - //VectorVal* t = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + //VectorVal* t = create_vector_of_count(); //for ( unsigned int i = 0; i < (${messages.references}->size()); ++i ) // { // Val* r = new Val((${message.references[i].ref_type}), TYPE_COUNT); @@ -589,7 +598,7 @@ refine flow ModbusTCP_Flow += { if ( ::modbus_read_fifo_queue_response ) { - VectorVal* t = new VectorVal(new VectorType(base_type(TYPE_COUNT))); + VectorVal* t = create_vector_of_count(); for ( unsigned int i = 0; i < (${message.register_data})->size(); ++i ) { Val* r = new Val(${message.register_data[i]}, TYPE_COUNT); From e844727e7339a95054e05cdf8634d6fd47044e74 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 12 Feb 2014 17:03:51 -0600 Subject: [PATCH 37/47] Increase timeouts of some unit tests. --- testing/btest/core/leaks/basic-cluster.bro | 2 +- testing/btest/core/leaks/dataseries.bro | 2 +- testing/btest/core/leaks/file-analysis-http-get.bro | 2 +- testing/btest/core/leaks/hll_cluster.bro | 2 +- testing/btest/core/leaks/input-reread.bro | 2 +- testing/btest/core/leaks/test-all.bro | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro index 2c13c2315c..2d93469850 100644 --- a/testing/btest/core/leaks/basic-cluster.bro +++ b/testing/btest/core/leaks/basic-cluster.bro @@ -9,7 +9,7 @@ # @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m %INPUT # @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m %INPUT -# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-bg-wait 25 @TEST-START-FILE cluster-layout.bro redef Cluster::nodes = { diff --git a/testing/btest/core/leaks/dataseries.bro b/testing/btest/core/leaks/dataseries.bro index 61c9c030e9..fcb5782f4e 100644 --- a/testing/btest/core/leaks/dataseries.bro +++ b/testing/btest/core/leaks/dataseries.bro @@ -8,4 +8,4 @@ # # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES -# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-bg-wait 25 diff --git a/testing/btest/core/leaks/file-analysis-http-get.bro b/testing/btest/core/leaks/file-analysis-http-get.bro index 8256f3e6da..aa4708305e 100644 --- a/testing/btest/core/leaks/file-analysis-http-get.bro +++ b/testing/btest/core/leaks/file-analysis-http-get.bro @@ -5,7 +5,7 @@ # @TEST-GROUP: leaks # # @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-bg-wait 25 redef test_file_analysis_source = "HTTP"; diff --git a/testing/btest/core/leaks/hll_cluster.bro b/testing/btest/core/leaks/hll_cluster.bro index a6f704a677..a843452e00 100644 --- a/testing/btest/core/leaks/hll_cluster.bro +++ b/testing/btest/core/leaks/hll_cluster.bro @@ -10,7 +10,7 @@ # @TEST-EXEC: sleep 2 # @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro runnumber=1 %INPUT # @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro runnumber=2 %INPUT -# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-bg-wait 25 # # @TEST-EXEC: btest-diff manager-1/.stdout # @TEST-EXEC: btest-diff worker-1/.stdout diff --git a/testing/btest/core/leaks/input-reread.bro b/testing/btest/core/leaks/input-reread.bro index fa37f04ede..c6ff5361be 100644 --- a/testing/btest/core/leaks/input-reread.bro +++ b/testing/btest/core/leaks/input-reread.bro @@ -14,7 +14,7 @@ # @TEST-EXEC: cp input4.log input.log # @TEST-EXEC: sleep 5 # @TEST-EXEC: cp input5.log input.log -# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-bg-wait 30 @TEST-START-FILE input1.log #separator \x09 diff --git a/testing/btest/core/leaks/test-all.bro b/testing/btest/core/leaks/test-all.bro index acba16bd6d..7cdccb202a 100644 --- a/testing/btest/core/leaks/test-all.bro +++ b/testing/btest/core/leaks/test-all.bro @@ -5,4 +5,4 @@ # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # # @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/wikipedia.trace test-all-policy -# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-bg-wait 25 From eb744fd329a2147f94b0cd5d8b2e677fbdaa286a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 13 Feb 2014 12:45:51 -0800 Subject: [PATCH 38/47] Revert "Expanding the HTTP methods used in the signature to detect HTTP traffic." This reverts commit 506b26e5ff137f61e087292c0b62453086f5fca9. The corresponding patch adding HTTP CONNECT support doesn't work yet so backing this out until we get that in shape. --- scripts/base/protocols/http/dpd.sig | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/scripts/base/protocols/http/dpd.sig b/scripts/base/protocols/http/dpd.sig index 3e264f0bb3..13470f4e95 100644 --- a/scripts/base/protocols/http/dpd.sig +++ b/scripts/base/protocols/http/dpd.sig @@ -1,8 +1,6 @@ -# List of HTTP headers pulled from: -# http://annevankesteren.nl/2007/10/http-methods signature dpd_http_client { ip-proto == tcp - payload /^[[:space:]]*(OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT|PROPFIND|PROPPATCH|MKCOL|COPY|MOVE|LOCK|UNLOCK|VERSION-CONTROL|REPORT|CHECKOUT|CHECKIN|UNCHECKOUT|MKWORKSPACE|UPDATE|LABEL|MERGE|BASELINE-CONTROL|MKACTIVITY|ORDERPATCH|ACL|PATCH|SEARCH|BCOPY|BDELETE|BMOVE|BPROPFIND|BPROPPATCH|NOTIFY|POLL|SUBSCRIBE|UNSUBSCRIBE|X-MS-ENUMATTS|RPC_OUT_DATA|RPC_IN_DATA)[[:space:]]*/ + payload /^[[:space:]]*(GET|HEAD|POST)[[:space:]]*/ tcp-state originator } @@ -13,5 +11,3 @@ signature dpd_http_server { requires-reverse-signature dpd_http_client enable "http" } - - From 3c95d1d695ed80d7796b680fd31fa8f3afaf8ad3 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Thu, 13 Feb 2014 14:55:45 -0600 Subject: [PATCH 39/47] Refactor DNS script's state management to improve performance. The amount of timers involved in DNS::PendingMessage tables' expiration attributes have a significant performance hit. Instead the script now relies solely on maximum thresholds for pending message quantities to limit amount of accumulated state. There's a new option, "DNS::max_pending_query_ids", to limit the number outstanding messages across all DNS query IDs ("DNS::max_pending_msgs" still limits number of outstanding messages for a *given* query ID). --- scripts/base/protocols/dns/main.bro | 64 +++++++++---------- .../weird.log | 5 +- 2 files changed, 33 insertions(+), 36 deletions(-) diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro index 21a0711159..294220d1f2 100644 --- a/scripts/base/protocols/dns/main.bro +++ b/scripts/base/protocols/dns/main.bro @@ -109,16 +109,6 @@ export { ## DNS message query/transaction ID. type PendingMessages: table[count] of Queue::Queue; - ## Called when a pending DNS query has not been matched with a reply (or - ## vice versa) in a sufficent amount of time. - ## - ## pending: table of pending messages, indexed by transaction ID. - ## - ## id: the index of he element being expired. - ## - ## Returns: amount of time to delay expiration of the element. - global expire_pending_msg: function(pending: PendingMessages, id: count): interval; - ## The amount of time that DNS queries or replies for a given ## query/transaction ID are allowed to be queued while waiting for ## a matching reply or query. @@ -131,16 +121,21 @@ export { ## response is ongoing). const max_pending_msgs = 50 &redef; + ## Give up trying to match pending DNS queries or replies across all + ## query/transaction IDs once there is at least one unmatched query or + ## reply across this number of different query IDs. + const max_pending_query_ids = 50 &redef; + ## A record type which tracks the status of DNS queries for a given ## :bro:type:`connection`. type State: record { ## Indexed by query id, returns Info record corresponding to ## queries that haven't been matched with a response yet. - pending_queries: PendingMessages &read_expire=pending_msg_expiry_interval &expire_func=expire_pending_msg; + pending_queries: PendingMessages; ## Indexed by query id, returns Info record corresponding to ## replies that haven't been matched with a query yet. - pending_replies: PendingMessages &read_expire=pending_msg_expiry_interval &expire_func=expire_pending_msg; + pending_replies: PendingMessages; }; } @@ -176,7 +171,11 @@ function log_unmatched_msgs_queue(q: Queue::Queue) Queue::get_vector(q, infos); for ( i in infos ) + { + event flow_weird("dns_unmatched_msg", + infos[i]$id$orig_h, infos[i]$id$resp_h); Log::write(DNS::LOG, infos[i]); + } } function log_unmatched_msgs(msgs: PendingMessages) @@ -191,16 +190,28 @@ function log_unmatched_msgs(msgs: PendingMessages) function enqueue_new_msg(msgs: PendingMessages, id: count, msg: Info) { if ( id !in msgs ) - msgs[id] = Queue::init(); - else if ( Queue::len(msgs[id]) > max_pending_msgs ) { - local info: Info = Queue::peek(msgs[id]); - event flow_weird("dns_unmatched_msg_quantity", info$id$orig_h, - info$id$resp_h); - log_unmatched_msgs_queue(msgs[id]); - # Throw away all unmatched on assumption they'll never be matched. + if ( |msgs| > max_pending_query_ids ) + { + event flow_weird("dns_unmatched_query_id_quantity", + msg$id$orig_h, msg$id$resp_h); + # Throw away all unmatched on assumption they'll never be matched. + log_unmatched_msgs(msgs); + } + msgs[id] = Queue::init(); } + else + { + if ( Queue::len(msgs[id]) > max_pending_msgs ) + { + event flow_weird("dns_unmatched_msg_quantity", + msg$id$orig_h, msg$id$resp_h); + log_unmatched_msgs_queue(msgs[id]); + # Throw away all unmatched on assumption they'll never be matched. + msgs[id] = Queue::init(); + } + } Queue::put(msgs[id], msg); } @@ -447,18 +458,3 @@ event connection_state_remove(c: connection) &priority=-5 log_unmatched_msgs(c$dns_state$pending_queries); log_unmatched_msgs(c$dns_state$pending_replies); } - -function expire_pending_msg(pending: PendingMessages, id: count): interval - { - local infos: vector of Info; - Queue::get_vector(pending[id], infos); - - for ( i in infos ) - { - Log::write(DNS::LOG, infos[i]); - event flow_weird("dns_unmatched_msg", infos[i]$id$orig_h, - infos[i]$id$resp_h); - } - - return 0sec; - } diff --git a/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/weird.log b/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/weird.log index 175a474425..295de4ec2c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.dns.duplicate-reponses/weird.log @@ -3,9 +3,10 @@ #empty_field (empty) #unset_field - #path weird -#open 2013-08-26-19-36-33 +#open 2014-02-13-20-36-35 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string 1363716396.798286 CXWv6p3arKYeMETxOg 55.247.223.174 27285 222.195.43.124 53 DNS_RR_unknown_type - F bro 1363716396.798374 CXWv6p3arKYeMETxOg 55.247.223.174 27285 222.195.43.124 53 dns_unmatched_reply - F bro -#close 2013-08-26-19-36-33 +1363716396.798374 - - - - - dns_unmatched_msg - F bro +#close 2014-02-13-20-36-35 From ba81aa438766d34e59752ae6c3dccf52a9f1353c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 14 Feb 2014 12:06:24 -0800 Subject: [PATCH 40/47] Support for MPLS over VLAN. Patch by Chris Kanich. BIT-1017 #merged --- CHANGES | 4 ++++ VERSION | 2 +- src/PktSrc.cc | 22 ++++++++++++------ .../btest/Baseline/core.mpls-in-vlan/conn.log | 12 ++++++++++ testing/btest/Traces/mpls-in-vlan.trace | Bin 0 -> 2605 bytes testing/btest/core/mpls-in-vlan.bro | 2 ++ 6 files changed, 34 insertions(+), 8 deletions(-) create mode 100644 testing/btest/Baseline/core.mpls-in-vlan/conn.log create mode 100644 testing/btest/Traces/mpls-in-vlan.trace create mode 100644 testing/btest/core/mpls-in-vlan.bro diff --git a/CHANGES b/CHANGES index f00e43a271..ba9102aeeb 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.2-174 | 2014-02-14 12:07:04 -0800 + + * Support for MPLS over VLAN. (Chris Kanich) + 2.2-173 | 2014-02-14 10:50:15 -0800 * Fix misidentification of SOCKS traffic that in particiular seemed diff --git a/VERSION b/VERSION index 60dee2b058..5b847786b5 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-173 +2.2-174 diff --git a/src/PktSrc.cc b/src/PktSrc.cc index 941c4acd83..179630cdbd 100644 --- a/src/PktSrc.cc +++ b/src/PktSrc.cc @@ -229,12 +229,21 @@ void PktSrc::Process() { // MPLS carried over the ethernet frame. case 0x8847: + // Remove the data link layer and denote a + // header size of zero before the IP header. have_mpls = true; + data += get_link_header_size(datalink); + pkt_hdr_size = 0; break; // VLAN carried over the ethernet frame. case 0x8100: data += get_link_header_size(datalink); + + // Check for MPLS in VLAN. + if ( ((data[2] << 8) + data[3]) == 0x8847 ) + have_mpls = true; + data += 4; // Skip the vlan header pkt_hdr_size = 0; @@ -274,8 +283,13 @@ void PktSrc::Process() protocol = (data[2] << 8) + data[3]; if ( protocol == 0x0281 ) - // MPLS Unicast + { + // MPLS Unicast. Remove the data link layer and + // denote a header size of zero before the IP header. have_mpls = true; + data += get_link_header_size(datalink); + pkt_hdr_size = 0; + } else if ( protocol != 0x0021 && protocol != 0x0057 ) { @@ -290,12 +304,6 @@ void PktSrc::Process() if ( have_mpls ) { - // Remove the data link layer - data += get_link_header_size(datalink); - - // Denote a header size of zero before the IP header - pkt_hdr_size = 0; - // Skip the MPLS label stack. bool end_of_stack = false; diff --git a/testing/btest/Baseline/core.mpls-in-vlan/conn.log b/testing/btest/Baseline/core.mpls-in-vlan/conn.log new file mode 100644 index 0000000000..e8ee793b75 --- /dev/null +++ b/testing/btest/Baseline/core.mpls-in-vlan/conn.log @@ -0,0 +1,12 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-02-14-20-04-20 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] +1371685686.536606 CXWv6p3arKYeMETxOg 65.65.65.65 19244 65.65.65.65 80 tcp - - - - OTH - 0 D 1 257 0 0 (empty) +1371686961.156859 CjhGID4nQcgTWjvg4c 65.65.65.65 32828 65.65.65.65 80 tcp - - - - OTH - 0 d 0 0 1 1500 (empty) +1371686961.479321 CCvvfg3TEfuqmmG4bh 65.65.65.65 61193 65.65.65.65 80 tcp - - - - OTH - 0 D 1 710 0 0 (empty) +#close 2014-02-14-20-04-20 diff --git a/testing/btest/Traces/mpls-in-vlan.trace b/testing/btest/Traces/mpls-in-vlan.trace new file mode 100644 index 0000000000000000000000000000000000000000..634f3fce1469f61f436ed1b79c5b1c42f8c29289 GIT binary patch literal 2605 zcmc&$O>7%Q6rKc{rYlxd;*t}LWDX_rdhM86Wl5!&#EGR8$t5We327Pcj@LuRGt2Bc zPFf)h5>ig|g2V+ODpi~~fdq$~T7E9v5Jyy1LeN{0IKY)RyIv=@TnSf2j5IrXGjHC! z@0<7L?a$wSd1Dqmj?s+wT8KmDxu-Sz%iRH=NB z(qv!%;^>*P;@M1ZiqSOAG>NwxDA^Uh#X2OQj*x=BIJ)3TE|rs8UtiY?1F2N2ER`?7 z#5<;ui)ARt-X$h>@ikCsaY=v|#e#OMx4^Q4mx|%O7Lr+~bUu|r8L@<2(u^eU#`w8dxVpAhTf;By;Ogd9y}C25lE{lT++v6M5biO=^w6W@DV%E(NCo94 z_vd=4qIOIB2RGhx+RXQ<(9kd`{iA>`*r*O-=!<9^8bu zLPz>tPSb__1Z92PApXkbGMqG}&30K^Xi5ddW!~Wu1Ovn#^Kou9X@g1Q{%@q`U9uTc)% z*Tg!dH20=t3k-4gz%w}0r(^(^G(N1qS!Qz<0U_}Lr&fFr0xekB4*9y8g_f0-C^~V|1_~h2_d+zDNPyGe&@YY9zVtMn;JN2Vi z7apouX2$==@kG5&>u|4Ep!)<~U|NtW7%jxRx)8(b|FDJlQR9T0_BA+P2&~X zqKc}qe~CwZ=znZ4!(`QjC)27KTxDV2r!8F+^O5hhhdP&t2)g?30aIaJ*~&u|c>7|p Ja6$j^`~zPp1hfDE literal 0 HcmV?d00001 diff --git a/testing/btest/core/mpls-in-vlan.bro b/testing/btest/core/mpls-in-vlan.bro new file mode 100644 index 0000000000..f57c1862ce --- /dev/null +++ b/testing/btest/core/mpls-in-vlan.bro @@ -0,0 +1,2 @@ +# @TEST-EXEC: bro -C -r $TRACES/mpls-in-vlan.trace +# @TEST-EXEC: btest-diff conn.log From b712d6436cfbfe48c0afcb0a2fd30bfd07a4687b Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Tue, 18 Feb 2014 02:54:03 -0800 Subject: [PATCH 41/47] update 3rdparty submodule (new SQLite version) --- src/3rdparty | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/3rdparty b/src/3rdparty index 42a4c9694a..92674c5745 160000 --- a/src/3rdparty +++ b/src/3rdparty @@ -1 +1 @@ -Subproject commit 42a4c9694a2b2677b050fbb7cbae26bc5ec4605a +Subproject commit 92674c57455cb71de5a2be6f482570e10be46aa6 From 90026f7196d721d4141077954f9a415791c48fc0 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 19 Feb 2014 10:32:27 -0600 Subject: [PATCH 42/47] Update to libmagic version 5.17, address BIT-1136. --- CMakeLists.txt | 2 +- testing/btest/bifs/identify_data.bro | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 28027d63d3..f773381ae8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,7 +56,7 @@ set(LIBMAGIC_LIB_DIR ${LIBMAGIC_PREFIX}/lib) set(LIBMAGIC_LIBRARY ${LIBMAGIC_LIB_DIR}/libmagic.a) ExternalProject_Add(libmagic PREFIX ${LIBMAGIC_PREFIX} - URL ${CMAKE_CURRENT_SOURCE_DIR}/src/3rdparty/file-5.16.tar.gz + URL ${CMAKE_CURRENT_SOURCE_DIR}/src/3rdparty/file-5.17.tar.gz CONFIGURE_COMMAND ./configure --enable-static --disable-shared --prefix=${LIBMAGIC_PREFIX} --includedir=${LIBMAGIC_INCLUDE_DIR} diff --git a/testing/btest/bifs/identify_data.bro b/testing/btest/bifs/identify_data.bro index 836a5a428f..d49a144b1e 100644 --- a/testing/btest/bifs/identify_data.bro +++ b/testing/btest/bifs/identify_data.bro @@ -10,7 +10,7 @@ event bro_init() print identify_data(a, T); # PNG image - local b = "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"; + local b = "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00"; print identify_data(b, F); print identify_data(b, T); } From 18d89d6320db4ffd15f0250b2db8a1ec11750ac4 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 20 Feb 2014 14:37:43 -0800 Subject: [PATCH 43/47] New alert from https://tools.ietf.org/html/draft-ietf-tls-applayerprotoneg-04 --- scripts/base/protocols/ssl/consts.bro | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro index b81aebfbbb..c50ad13648 100644 --- a/scripts/base/protocols/ssl/consts.bro +++ b/scripts/base/protocols/ssl/consts.bro @@ -55,6 +55,7 @@ export { [113] = "bad_certificate_status_response", [114] = "bad_certificate_hash_value", [115] = "unknown_psk_identity", + [120] = "no_application_protocol", } &default=function(i: count):string { return fmt("unknown-%d", i); }; ## Mapping between numeric codes and human readable strings for SSL/TLS From 10d89a464896f3d041985672c9910c3fb14bdcda Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 20 Feb 2014 17:27:46 -0800 Subject: [PATCH 44/47] Updating submodule(s). [nomail] --- CHANGES | 4 ++++ VERSION | 2 +- src/3rdparty | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index ba9102aeeb..1b4d3841bd 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.2-177 | 2014-02-20 17:27:46 -0800 + + * Update to libmagic version 5.17. Addresses BIT-1136. (Jon Siwek) + 2.2-174 | 2014-02-14 12:07:04 -0800 * Support for MPLS over VLAN. (Chris Kanich) diff --git a/VERSION b/VERSION index 5b847786b5..598049d62c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-174 +2.2-177 diff --git a/src/3rdparty b/src/3rdparty index 42a4c9694a..e96d95a130 160000 --- a/src/3rdparty +++ b/src/3rdparty @@ -1 +1 @@ -Subproject commit 42a4c9694a2b2677b050fbb7cbae26bc5ec4605a +Subproject commit e96d95a130a572b611fe70b3c3ede2b4727aaa22 From 0e7d70e21924beb04cf0109e899c2b0003b55ffd Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 21 Feb 2014 06:05:12 -0800 Subject: [PATCH 45/47] Correct return type of topk_get_top, addresses BIT-1144 --- src/probabilistic/top-k.bif | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/probabilistic/top-k.bif b/src/probabilistic/top-k.bif index 5362750467..0589608d22 100644 --- a/src/probabilistic/top-k.bif +++ b/src/probabilistic/top-k.bif @@ -49,7 +49,7 @@ function topk_add%(handle: opaque of topk, value: any%): any ## ## .. bro:see:: topk_init topk_add topk_count topk_epsilon ## topk_size topk_sum topk_merge topk_merge_prune -function topk_get_top%(handle: opaque of topk, k: count%): any +function topk_get_top%(handle: opaque of topk, k: count%): index_vec %{ assert(handle); probabilistic::TopkVal* h = (probabilistic::TopkVal*) handle; From 81e561e5dea6a00c7d70058974964434450fe292 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 21 Feb 2014 11:18:35 -0800 Subject: [PATCH 46/47] Revert "Correct return type of topk_get_top, addresses BIT-1144" This reverts commit 0e7d70e21924beb04cf0109e899c2b0003b55ffd. Sorry, bad idea. --- src/probabilistic/top-k.bif | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/probabilistic/top-k.bif b/src/probabilistic/top-k.bif index 0589608d22..5362750467 100644 --- a/src/probabilistic/top-k.bif +++ b/src/probabilistic/top-k.bif @@ -49,7 +49,7 @@ function topk_add%(handle: opaque of topk, value: any%): any ## ## .. bro:see:: topk_init topk_add topk_count topk_epsilon ## topk_size topk_sum topk_merge topk_merge_prune -function topk_get_top%(handle: opaque of topk, k: count%): index_vec +function topk_get_top%(handle: opaque of topk, k: count%): any %{ assert(handle); probabilistic::TopkVal* h = (probabilistic::TopkVal*) handle; From ca2cdd88615584e782564d334e703883f40f6abf Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 21 Feb 2014 11:24:03 -0800 Subject: [PATCH 47/47] new TLS constants from https://tools.ietf.org/html/draft-bmoeller-tls-downgrade-scsv-01 --- scripts/base/protocols/ssl/consts.bro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro index c50ad13648..9e9222f12c 100644 --- a/scripts/base/protocols/ssl/consts.bro +++ b/scripts/base/protocols/ssl/consts.bro @@ -47,6 +47,7 @@ export { [70] = "protocol_version", [71] = "insufficient_security", [80] = "internal_error", + [86] = "inappropriate_fallback", [90] = "user_canceled", [100] = "no_renegotiation", [110] = "unsupported_extension", @@ -264,6 +265,8 @@ export { const TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C3; const TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C4; const TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C5; + # draft-bmoeller-tls-downgrade-scsv-01 + const TLS_FALLBACK_SCSV = 0x5600; # RFC 4492 const TLS_ECDH_ECDSA_WITH_NULL_SHA = 0xC001; const TLS_ECDH_ECDSA_WITH_RC4_128_SHA = 0xC002; @@ -630,6 +633,7 @@ export { [TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256", [TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256", [TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256", + [TLS_FALLBACK_SCSV] = "TLS_FALLBACK_SCSV", [TLS_ECDH_ECDSA_WITH_NULL_SHA] = "TLS_ECDH_ECDSA_WITH_NULL_SHA", [TLS_ECDH_ECDSA_WITH_RC4_128_SHA] = "TLS_ECDH_ECDSA_WITH_RC4_128_SHA", [TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA",